plano/cli/test/test_config_generator.py
Adil Hafeez c4634d0034 compact and deduplicate test suite
- Extract generate_storage_tests! macro for shared CRUD tests across memory/postgresql backends
- Move merge tests to mod.rs (testing default trait method once)
- Consolidate signal analyzer tests into table-driven tests
- Extract shared fixtures in router test files
- Parametrize Python CLI tests
- Remove dead tests (test_skip_version_check_env_var, test_arch_agent_config_default)
- Extract SSE event test helpers in streaming_response
2026-03-15 08:34:29 +00:00

366 lines
11 KiB
Python

import json
import pytest
from unittest import mock
from planoai.config_generator import validate_and_render_schema
@pytest.fixture(autouse=True)
def cleanup_env(monkeypatch):
# Clean up environment variables and mocks after each test
yield
monkeypatch.undo()
@pytest.mark.parametrize("plano_config", [
# Case 1: LLM provider config
"""
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- model: openai/gpt-4o-mini
access_key: $OPENAI_API_KEY
default: true
- model: openai/gpt-4o
access_key: $OPENAI_API_KEY
routing_preferences:
- name: code understanding
description: understand and explain existing code snippets, functions, or libraries
- model: openai/gpt-4.1
access_key: $OPENAI_API_KEY
routing_preferences:
- name: code generation
description: generating new code snippets, functions, or boilerplate based on user prompts or requirements
tracing:
random_sampling: 100
""",
# Case 2: Agent config
"""
version: v0.3.0
agents:
- id: query_rewriter
url: http://localhost:10500
- id: context_builder
url: http://localhost:10501
- id: response_generator
url: http://localhost:10502
- id: research_agent
url: http://localhost:10500
- id: input_guard_rails
url: http://localhost:10503
listeners:
- name: tmobile
type: agent
router: plano_orchestrator_v1
agents:
- id: simple_tmobile_rag_agent
description: t-mobile virtual assistant for device contracts.
filter_chain:
- query_rewriter
- context_builder
- response_generator
- id: research_agent
description: agent to research and gather information from various sources.
filter_chain:
- research_agent
- response_generator
port: 8000
- name: llm_provider
type: model
port: 12000
model_providers:
- access_key: ${OPENAI_API_KEY}
model: openai/gpt-4o
""",
], ids=["llm_provider_config", "agent_config"])
def test_validate_and_render_happy_path(monkeypatch, plano_config):
monkeypatch.setenv("PLANO_CONFIG_FILE", "fake_plano_config.yaml")
monkeypatch.setenv("PLANO_CONFIG_SCHEMA_FILE", "fake_plano_config_schema.yaml")
monkeypatch.setenv("ENVOY_CONFIG_TEMPLATE_FILE", "./envoy.template.yaml")
monkeypatch.setenv("PLANO_CONFIG_FILE_RENDERED", "fake_plano_config_rendered.yaml")
monkeypatch.setenv("ENVOY_CONFIG_FILE_RENDERED", "fake_envoy.yaml")
monkeypatch.setenv("TEMPLATE_ROOT", "../")
plano_config_schema = ""
with open("../config/plano_config_schema.yaml", "r") as file:
plano_config_schema = file.read()
m_open = mock.mock_open()
m_open.side_effect = [
mock.mock_open(read_data=plano_config).return_value,
mock.mock_open(read_data=plano_config_schema).return_value,
mock.mock_open(read_data=plano_config).return_value,
mock.mock_open(read_data=plano_config_schema).return_value,
mock.mock_open().return_value,
mock.mock_open().return_value,
]
with mock.patch("builtins.open", m_open):
with mock.patch("planoai.config_generator.Environment"):
validate_and_render_schema()
plano_config_test_cases = [
{
"id": "duplicate_provider_name",
"expected_error": "Duplicate model_provider name",
"plano_config": """
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- name: test1
model: openai/gpt-4o
access_key: $OPENAI_API_KEY
- name: test1
model: openai/gpt-4o
access_key: $OPENAI_API_KEY
""",
},
{
"id": "provider_interface_with_model_id",
"expected_error": "Please provide provider interface as part of model name",
"plano_config": """
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- model: openai/gpt-4o
access_key: $OPENAI_API_KEY
provider_interface: openai
""",
},
{
"id": "duplicate_model_id",
"expected_error": "Duplicate model_id",
"plano_config": """
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- model: openai/gpt-4o
access_key: $OPENAI_API_KEY
- model: mistral/gpt-4o
""",
},
{
"id": "custom_provider_base_url",
"expected_error": "Must provide base_url and provider_interface",
"plano_config": """
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- model: custom/gpt-4o
""",
},
{
"id": "base_url_with_path_prefix",
"expected_error": None,
"plano_config": """
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- model: custom/gpt-4o
base_url: "http://custom.com/api/v2"
provider_interface: openai
""",
},
{
"id": "duplicate_routeing_preference_name",
"expected_error": "Duplicate routing preference name",
"plano_config": """
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- model: openai/gpt-4o-mini
access_key: $OPENAI_API_KEY
default: true
- model: openai/gpt-4o
access_key: $OPENAI_API_KEY
routing_preferences:
- name: code understanding
description: understand and explain existing code snippets, functions, or libraries
- model: openai/gpt-4.1
access_key: $OPENAI_API_KEY
routing_preferences:
- name: code understanding
description: generating new code snippets, functions, or boilerplate based on user prompts or requirements
tracing:
random_sampling: 100
""",
},
]
@pytest.mark.parametrize(
"plano_config_test_case",
plano_config_test_cases,
ids=[case["id"] for case in plano_config_test_cases],
)
def test_validate_and_render_schema_tests(monkeypatch, plano_config_test_case):
monkeypatch.setenv("PLANO_CONFIG_FILE", "fake_plano_config.yaml")
monkeypatch.setenv("PLANO_CONFIG_SCHEMA_FILE", "fake_plano_config_schema.yaml")
monkeypatch.setenv("ENVOY_CONFIG_TEMPLATE_FILE", "./envoy.template.yaml")
monkeypatch.setenv("PLANO_CONFIG_FILE_RENDERED", "fake_plano_config_rendered.yaml")
monkeypatch.setenv("ENVOY_CONFIG_FILE_RENDERED", "fake_envoy.yaml")
monkeypatch.setenv("TEMPLATE_ROOT", "../")
plano_config = plano_config_test_case["plano_config"]
expected_error = plano_config_test_case.get("expected_error")
plano_config_schema = ""
with open("../config/plano_config_schema.yaml", "r") as file:
plano_config_schema = file.read()
m_open = mock.mock_open()
# Provide enough file handles for all open() calls in validate_and_render_schema
m_open.side_effect = [
mock.mock_open(
read_data=plano_config
).return_value, # validate_prompt_config: PLANO_CONFIG_FILE
mock.mock_open(
read_data=plano_config_schema
).return_value, # validate_prompt_config: PLANO_CONFIG_SCHEMA_FILE
mock.mock_open(
read_data=plano_config
).return_value, # validate_and_render_schema: PLANO_CONFIG_FILE
mock.mock_open(
read_data=plano_config_schema
).return_value, # validate_and_render_schema: PLANO_CONFIG_SCHEMA_FILE
mock.mock_open().return_value, # ENVOY_CONFIG_FILE_RENDERED (write)
mock.mock_open().return_value, # PLANO_CONFIG_FILE_RENDERED (write)
]
with mock.patch("builtins.open", m_open):
with mock.patch("planoai.config_generator.Environment"):
if expected_error:
# Test expects an error
with pytest.raises(Exception) as excinfo:
validate_and_render_schema()
assert expected_error in str(excinfo.value)
else:
# Test expects success - no exception should be raised
validate_and_render_schema()
@pytest.mark.parametrize("listeners,expected_providers,expected_llm_gateway,expected_prompt_gateway", [
# Case 1: With prompt gateway (ingress + egress)
(
{
"ingress_traffic": {"address": "0.0.0.0", "port": 10000, "timeout": "30s"},
"egress_traffic": {"address": "0.0.0.0", "port": 12000, "timeout": "30s"},
},
[
{
"name": "egress_traffic", "type": "model_listener", "port": 12000,
"address": "0.0.0.0", "timeout": "30s",
"model_providers": [{"model": "openai/gpt-4o", "access_key": "test_key"}],
},
{
"name": "ingress_traffic", "type": "prompt_listener", "port": 10000,
"address": "0.0.0.0", "timeout": "30s",
},
],
{
"address": "0.0.0.0", "model_providers": [{"access_key": "test_key", "model": "openai/gpt-4o"}],
"name": "egress_traffic", "type": "model_listener", "port": 12000, "timeout": "30s",
},
{"address": "0.0.0.0", "name": "ingress_traffic", "port": 10000, "timeout": "30s", "type": "prompt_listener"},
),
# Case 2: Without prompt gateway (egress only)
(
{"egress_traffic": {"address": "0.0.0.0", "port": 12000, "timeout": "30s"}},
[
{
"address": "0.0.0.0",
"model_providers": [{"access_key": "test_key", "model": "openai/gpt-4o"}],
"name": "egress_traffic", "port": 12000, "timeout": "30s", "type": "model_listener",
}
],
{
"address": "0.0.0.0", "model_providers": [{"access_key": "test_key", "model": "openai/gpt-4o"}],
"name": "egress_traffic", "type": "model_listener", "port": 12000, "timeout": "30s",
},
None,
),
], ids=["with_prompt_gateway", "without_prompt_gateway"])
def test_convert_legacy_llm_providers(listeners, expected_providers, expected_llm_gateway, expected_prompt_gateway):
from planoai.utils import convert_legacy_listeners
llm_providers = [{"model": "openai/gpt-4o", "access_key": "test_key"}]
updated_providers, llm_gateway, prompt_gateway = convert_legacy_listeners(
listeners, llm_providers
)
assert isinstance(updated_providers, list)
assert llm_gateway is not None
assert prompt_gateway is not None
assert updated_providers == expected_providers
assert llm_gateway == expected_llm_gateway
if expected_prompt_gateway is not None:
assert prompt_gateway == expected_prompt_gateway