plano/cli/test/test_config_generator.py
Adil Hafeez ba651aaf71
Rename all arch references to plano (#745)
* Rename all arch references to plano across the codebase

Complete rebrand from "Arch"/"archgw" to "Plano" including:
- Config files: arch_config_schema.yaml, workflow, demo configs
- Environment variables: ARCH_CONFIG_* → PLANO_CONFIG_*
- Python CLI: variables, functions, file paths, docker mounts
- Rust crates: config paths, log messages, metadata keys
- Docker/build: Dockerfile, supervisord, .dockerignore, .gitignore
- Docker Compose: volume mounts and env vars across all demos/tests
- GitHub workflows: job/step names
- Shell scripts: log messages
- Demos: Python code, READMEs, VS Code configs, Grafana dashboard
- Docs: RST includes, code comments, config references
- Package metadata: package.json, pyproject.toml, uv.lock

External URLs (docs.archgw.com, github.com/katanemo/archgw) left as-is.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* Update remaining arch references in docs

- Rename RST cross-reference labels: arch_access_logging, arch_overview_tracing, arch_overview_threading → plano_*
- Update label references in request_lifecycle.rst
- Rename arch_config_state_storage_example.yaml → plano_config_state_storage_example.yaml
- Update config YAML comments: "Arch creates/uses" → "Plano creates/uses"
- Update "the Arch gateway" → "the Plano gateway" in configuration_reference.rst
- Update arch_config_schema.yaml reference in provider_models.py
- Rename arch_agent_router → plano_agent_router in config example

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* Fix remaining arch references found in second pass

- config/docker-compose.dev.yaml: ARCH_CONFIG_FILE → PLANO_CONFIG_FILE,
  arch_config.yaml → plano_config.yaml, archgw_logs → plano_logs
- config/test_passthrough.yaml: container mount path
- tests/e2e/docker-compose.yaml: source file path (was still arch_config.yaml)
- cli/planoai/core.py: comment and log message
- crates/brightstaff/src/tracing/constants.rs: doc comment
- tests/{e2e,archgw}/common.py: get_arch_messages → get_plano_messages,
  arch_state/arch_messages variables renamed
- tests/{e2e,archgw}/test_prompt_gateway.py: updated imports and usages
- demos/shared/test_runner/{common,test_demos}.py: same renames
- tests/e2e/test_model_alias_routing.py: docstring
- .dockerignore: archgw_modelserver → plano_modelserver
- demos/use_cases/claude_code_router/pretty_model_resolution.sh: container name

Note: x-arch-* HTTP header values and Rust constant names intentionally
preserved for backwards compatibility with existing deployments.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-13 15:16:56 -08:00

467 lines
13 KiB
Python

import json
import pytest
from unittest import mock
from planoai.config_generator import validate_and_render_schema
@pytest.fixture(autouse=True)
def cleanup_env(monkeypatch):
# Clean up environment variables and mocks after each test
yield
monkeypatch.undo()
def test_validate_and_render_happy_path(monkeypatch):
monkeypatch.setenv("PLANO_CONFIG_FILE", "fake_plano_config.yaml")
monkeypatch.setenv("PLANO_CONFIG_SCHEMA_FILE", "fake_plano_config_schema.yaml")
monkeypatch.setenv("ENVOY_CONFIG_TEMPLATE_FILE", "./envoy.template.yaml")
monkeypatch.setenv("PLANO_CONFIG_FILE_RENDERED", "fake_plano_config_rendered.yaml")
monkeypatch.setenv("ENVOY_CONFIG_FILE_RENDERED", "fake_envoy.yaml")
monkeypatch.setenv("TEMPLATE_ROOT", "../")
plano_config = """
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- model: openai/gpt-4o-mini
access_key: $OPENAI_API_KEY
default: true
- model: openai/gpt-4o
access_key: $OPENAI_API_KEY
routing_preferences:
- name: code understanding
description: understand and explain existing code snippets, functions, or libraries
- model: openai/gpt-4.1
access_key: $OPENAI_API_KEY
routing_preferences:
- name: code generation
description: generating new code snippets, functions, or boilerplate based on user prompts or requirements
tracing:
random_sampling: 100
"""
plano_config_schema = ""
with open("../config/plano_config_schema.yaml", "r") as file:
plano_config_schema = file.read()
m_open = mock.mock_open()
# Provide enough file handles for all open() calls in validate_and_render_schema
m_open.side_effect = [
# Removed empty read - was causing validation failures
mock.mock_open(read_data=plano_config).return_value, # PLANO_CONFIG_FILE
mock.mock_open(
read_data=plano_config_schema
).return_value, # PLANO_CONFIG_SCHEMA_FILE
mock.mock_open(read_data=plano_config).return_value, # PLANO_CONFIG_FILE
mock.mock_open(
read_data=plano_config_schema
).return_value, # PLANO_CONFIG_SCHEMA_FILE
mock.mock_open().return_value, # ENVOY_CONFIG_FILE_RENDERED (write)
mock.mock_open().return_value, # PLANO_CONFIG_FILE_RENDERED (write)
]
with mock.patch("builtins.open", m_open):
with mock.patch("planoai.config_generator.Environment"):
validate_and_render_schema()
def test_validate_and_render_happy_path_agent_config(monkeypatch):
monkeypatch.setenv("PLANO_CONFIG_FILE", "fake_plano_config.yaml")
monkeypatch.setenv("PLANO_CONFIG_SCHEMA_FILE", "fake_plano_config_schema.yaml")
monkeypatch.setenv("ENVOY_CONFIG_TEMPLATE_FILE", "./envoy.template.yaml")
monkeypatch.setenv("PLANO_CONFIG_FILE_RENDERED", "fake_plano_config_rendered.yaml")
monkeypatch.setenv("ENVOY_CONFIG_FILE_RENDERED", "fake_envoy.yaml")
monkeypatch.setenv("TEMPLATE_ROOT", "../")
plano_config = """
version: v0.3.0
agents:
- id: query_rewriter
url: http://localhost:10500
- id: context_builder
url: http://localhost:10501
- id: response_generator
url: http://localhost:10502
- id: research_agent
url: http://localhost:10500
- id: input_guard_rails
url: http://localhost:10503
listeners:
- name: tmobile
type: agent
router: plano_orchestrator_v1
agents:
- id: simple_tmobile_rag_agent
description: t-mobile virtual assistant for device contracts.
filter_chain:
- query_rewriter
- context_builder
- response_generator
- id: research_agent
description: agent to research and gather information from various sources.
filter_chain:
- research_agent
- response_generator
port: 8000
- name: llm_provider
type: model
port: 12000
model_providers:
- access_key: ${OPENAI_API_KEY}
model: openai/gpt-4o
"""
plano_config_schema = ""
with open("../config/plano_config_schema.yaml", "r") as file:
plano_config_schema = file.read()
m_open = mock.mock_open()
# Provide enough file handles for all open() calls in validate_and_render_schema
m_open.side_effect = [
# Removed empty read - was causing validation failures
mock.mock_open(read_data=plano_config).return_value, # PLANO_CONFIG_FILE
mock.mock_open(
read_data=plano_config_schema
).return_value, # PLANO_CONFIG_SCHEMA_FILE
mock.mock_open(read_data=plano_config).return_value, # PLANO_CONFIG_FILE
mock.mock_open(
read_data=plano_config_schema
).return_value, # PLANO_CONFIG_SCHEMA_FILE
mock.mock_open().return_value, # ENVOY_CONFIG_FILE_RENDERED (write)
mock.mock_open().return_value, # PLANO_CONFIG_FILE_RENDERED (write)
]
with mock.patch("builtins.open", m_open):
with mock.patch("planoai.config_generator.Environment"):
validate_and_render_schema()
plano_config_test_cases = [
{
"id": "duplicate_provider_name",
"expected_error": "Duplicate model_provider name",
"plano_config": """
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- name: test1
model: openai/gpt-4o
access_key: $OPENAI_API_KEY
- name: test1
model: openai/gpt-4o
access_key: $OPENAI_API_KEY
""",
},
{
"id": "provider_interface_with_model_id",
"expected_error": "Please provide provider interface as part of model name",
"plano_config": """
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- model: openai/gpt-4o
access_key: $OPENAI_API_KEY
provider_interface: openai
""",
},
{
"id": "duplicate_model_id",
"expected_error": "Duplicate model_id",
"plano_config": """
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- model: openai/gpt-4o
access_key: $OPENAI_API_KEY
- model: mistral/gpt-4o
""",
},
{
"id": "custom_provider_base_url",
"expected_error": "Must provide base_url and provider_interface",
"plano_config": """
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- model: custom/gpt-4o
""",
},
{
"id": "base_url_with_path_prefix",
"expected_error": None,
"plano_config": """
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- model: custom/gpt-4o
base_url: "http://custom.com/api/v2"
provider_interface: openai
""",
},
{
"id": "duplicate_routeing_preference_name",
"expected_error": "Duplicate routing preference name",
"plano_config": """
version: v0.1.0
listeners:
egress_traffic:
address: 0.0.0.0
port: 12000
message_format: openai
timeout: 30s
llm_providers:
- model: openai/gpt-4o-mini
access_key: $OPENAI_API_KEY
default: true
- model: openai/gpt-4o
access_key: $OPENAI_API_KEY
routing_preferences:
- name: code understanding
description: understand and explain existing code snippets, functions, or libraries
- model: openai/gpt-4.1
access_key: $OPENAI_API_KEY
routing_preferences:
- name: code understanding
description: generating new code snippets, functions, or boilerplate based on user prompts or requirements
tracing:
random_sampling: 100
""",
},
]
@pytest.mark.parametrize(
"plano_config_test_case",
plano_config_test_cases,
ids=[case["id"] for case in plano_config_test_cases],
)
def test_validate_and_render_schema_tests(monkeypatch, plano_config_test_case):
monkeypatch.setenv("PLANO_CONFIG_FILE", "fake_plano_config.yaml")
monkeypatch.setenv("PLANO_CONFIG_SCHEMA_FILE", "fake_plano_config_schema.yaml")
monkeypatch.setenv("ENVOY_CONFIG_TEMPLATE_FILE", "./envoy.template.yaml")
monkeypatch.setenv("PLANO_CONFIG_FILE_RENDERED", "fake_plano_config_rendered.yaml")
monkeypatch.setenv("ENVOY_CONFIG_FILE_RENDERED", "fake_envoy.yaml")
monkeypatch.setenv("TEMPLATE_ROOT", "../")
plano_config = plano_config_test_case["plano_config"]
expected_error = plano_config_test_case.get("expected_error")
plano_config_schema = ""
with open("../config/plano_config_schema.yaml", "r") as file:
plano_config_schema = file.read()
m_open = mock.mock_open()
# Provide enough file handles for all open() calls in validate_and_render_schema
m_open.side_effect = [
mock.mock_open(
read_data=plano_config
).return_value, # validate_prompt_config: PLANO_CONFIG_FILE
mock.mock_open(
read_data=plano_config_schema
).return_value, # validate_prompt_config: PLANO_CONFIG_SCHEMA_FILE
mock.mock_open(
read_data=plano_config
).return_value, # validate_and_render_schema: PLANO_CONFIG_FILE
mock.mock_open(
read_data=plano_config_schema
).return_value, # validate_and_render_schema: PLANO_CONFIG_SCHEMA_FILE
mock.mock_open().return_value, # ENVOY_CONFIG_FILE_RENDERED (write)
mock.mock_open().return_value, # PLANO_CONFIG_FILE_RENDERED (write)
]
with mock.patch("builtins.open", m_open):
with mock.patch("planoai.config_generator.Environment"):
if expected_error:
# Test expects an error
with pytest.raises(Exception) as excinfo:
validate_and_render_schema()
assert expected_error in str(excinfo.value)
else:
# Test expects success - no exception should be raised
validate_and_render_schema()
def test_convert_legacy_llm_providers():
from planoai.utils import convert_legacy_listeners
listeners = {
"ingress_traffic": {
"address": "0.0.0.0",
"port": 10000,
"timeout": "30s",
},
"egress_traffic": {
"address": "0.0.0.0",
"port": 12000,
"timeout": "30s",
},
}
llm_providers = [
{
"model": "openai/gpt-4o",
"access_key": "test_key",
}
]
updated_providers, llm_gateway, prompt_gateway = convert_legacy_listeners(
listeners, llm_providers
)
assert isinstance(updated_providers, list)
assert llm_gateway is not None
assert prompt_gateway is not None
print(json.dumps(updated_providers))
assert updated_providers == [
{
"name": "egress_traffic",
"type": "model_listener",
"port": 12000,
"address": "0.0.0.0",
"timeout": "30s",
"model_providers": [{"model": "openai/gpt-4o", "access_key": "test_key"}],
},
{
"name": "ingress_traffic",
"type": "prompt_listener",
"port": 10000,
"address": "0.0.0.0",
"timeout": "30s",
},
]
assert llm_gateway == {
"address": "0.0.0.0",
"model_providers": [
{
"access_key": "test_key",
"model": "openai/gpt-4o",
},
],
"name": "egress_traffic",
"type": "model_listener",
"port": 12000,
"timeout": "30s",
}
assert prompt_gateway == {
"address": "0.0.0.0",
"name": "ingress_traffic",
"port": 10000,
"timeout": "30s",
"type": "prompt_listener",
}
def test_convert_legacy_llm_providers_no_prompt_gateway():
from planoai.utils import convert_legacy_listeners
listeners = {
"egress_traffic": {
"address": "0.0.0.0",
"port": 12000,
"timeout": "30s",
}
}
llm_providers = [
{
"model": "openai/gpt-4o",
"access_key": "test_key",
}
]
updated_providers, llm_gateway, prompt_gateway = convert_legacy_listeners(
listeners, llm_providers
)
assert isinstance(updated_providers, list)
assert llm_gateway is not None
assert prompt_gateway is not None
assert updated_providers == [
{
"address": "0.0.0.0",
"model_providers": [
{
"access_key": "test_key",
"model": "openai/gpt-4o",
},
],
"name": "egress_traffic",
"port": 12000,
"timeout": "30s",
"type": "model_listener",
}
]
assert llm_gateway == {
"address": "0.0.0.0",
"model_providers": [
{
"access_key": "test_key",
"model": "openai/gpt-4o",
},
],
"name": "egress_traffic",
"type": "model_listener",
"port": 12000,
"timeout": "30s",
}