plano/tests/archgw/test_prompt_gateway.py
Adil Hafeez ba651aaf71
Rename all arch references to plano (#745)
* Rename all arch references to plano across the codebase

Complete rebrand from "Arch"/"archgw" to "Plano" including:
- Config files: arch_config_schema.yaml, workflow, demo configs
- Environment variables: ARCH_CONFIG_* → PLANO_CONFIG_*
- Python CLI: variables, functions, file paths, docker mounts
- Rust crates: config paths, log messages, metadata keys
- Docker/build: Dockerfile, supervisord, .dockerignore, .gitignore
- Docker Compose: volume mounts and env vars across all demos/tests
- GitHub workflows: job/step names
- Shell scripts: log messages
- Demos: Python code, READMEs, VS Code configs, Grafana dashboard
- Docs: RST includes, code comments, config references
- Package metadata: package.json, pyproject.toml, uv.lock

External URLs (docs.archgw.com, github.com/katanemo/archgw) left as-is.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* Update remaining arch references in docs

- Rename RST cross-reference labels: arch_access_logging, arch_overview_tracing, arch_overview_threading → plano_*
- Update label references in request_lifecycle.rst
- Rename arch_config_state_storage_example.yaml → plano_config_state_storage_example.yaml
- Update config YAML comments: "Arch creates/uses" → "Plano creates/uses"
- Update "the Arch gateway" → "the Plano gateway" in configuration_reference.rst
- Update arch_config_schema.yaml reference in provider_models.py
- Rename arch_agent_router → plano_agent_router in config example

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* Fix remaining arch references found in second pass

- config/docker-compose.dev.yaml: ARCH_CONFIG_FILE → PLANO_CONFIG_FILE,
  arch_config.yaml → plano_config.yaml, archgw_logs → plano_logs
- config/test_passthrough.yaml: container mount path
- tests/e2e/docker-compose.yaml: source file path (was still arch_config.yaml)
- cli/planoai/core.py: comment and log message
- crates/brightstaff/src/tracing/constants.rs: doc comment
- tests/{e2e,archgw}/common.py: get_arch_messages → get_plano_messages,
  arch_state/arch_messages variables renamed
- tests/{e2e,archgw}/test_prompt_gateway.py: updated imports and usages
- demos/shared/test_runner/{common,test_demos}.py: same renames
- tests/e2e/test_model_alias_routing.py: docstring
- .dockerignore: archgw_modelserver → plano_modelserver
- demos/use_cases/claude_code_router/pretty_model_resolution.sh: container name

Note: x-arch-* HTTP header values and Rust constant names intentionally
preserved for backwards compatibility with existing deployments.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-13 15:16:56 -08:00

141 lines
4.6 KiB
Python

import json
import pytest
import requests
from deepdiff import DeepDiff
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
from pytest_httpserver import HTTPServer, RequestMatcher
@pytest.fixture(scope="session")
def httpserver_listen_address():
return ("0.0.0.0", 51001)
from common import (
PROMPT_GATEWAY_ENDPOINT,
TEST_CASE_FIXTURES,
get_plano_messages,
)
def normalize_tool_call_arguments(tool_call):
"""
Normalize tool call arguments to ensure they are always a dict.
According to OpenAI API spec, the 'arguments' field should be a JSON string,
but for easier testing we parse it into a dict here.
Args:
tool_call: A tool call dict that may have 'arguments' as either a string or dict
Returns:
A tool call dict with 'arguments' guaranteed to be a dict
"""
if "arguments" in tool_call and isinstance(tool_call["arguments"], str):
try:
tool_call["arguments"] = json.loads(tool_call["arguments"])
except (json.JSONDecodeError, TypeError):
# If parsing fails, keep it as is
pass
return tool_call
def test_prompt_gateway(httpserver: HTTPServer):
simple_fixture = TEST_CASE_FIXTURES["SIMPLE"]
input = simple_fixture["input"]
model_server_response = simple_fixture["model_server_response"]
api_server_response = simple_fixture["api_server_response"]
expected_tool_call = {
"name": "get_current_weather",
"arguments": {"location": "seattle, wa", "days": "2"},
}
# setup mock response from model_server
httpserver.expect_request("/function_calling").respond_with_data(
json.dumps(model_server_response)
)
# setup mock response from api_server
httpserver.expect_request("/weather").respond_with_data(
json.dumps(api_server_response)
)
response = requests.post(PROMPT_GATEWAY_ENDPOINT, json=input)
assert response.status_code == 200
httpserver.assert_request_made(
RequestMatcher(uri="/function_calling", method="POST")
)
httpserver.assert_request_made(RequestMatcher(uri="/weather", method="POST"))
response_json = response.json()
assert response_json.get("model").startswith("gpt-4o-mini")
choices = response_json.get("choices", [])
assert len(choices) > 0
assert "message" in choices[0]
assistant_message = choices[0]["message"]
assert "role" in assistant_message
assert assistant_message["role"] == "assistant"
assert "content" in assistant_message
assert "weather" in assistant_message["content"]
# now verify plano_messages (tool call and api response) that are sent as response metadata
plano_messages = get_plano_messages(response_json)
assert len(plano_messages) == 2
tool_calls_message = plano_messages[0]
tool_calls = tool_calls_message.get("tool_calls", [])
assert len(tool_calls) > 0
tool_call = normalize_tool_call_arguments(tool_calls[0]["function"])
diff = DeepDiff(tool_call, expected_tool_call, ignore_string_case=True)
assert not diff
def test_prompt_gateway_api_server_404(httpserver: HTTPServer):
simple_fixture = TEST_CASE_FIXTURES["SIMPLE"]
input = simple_fixture["input"]
model_server_response = simple_fixture["model_server_response"]
# setup mock response from model_server
httpserver.expect_request("/function_calling").respond_with_data(
json.dumps(model_server_response)
)
# setup mock response from model_server
httpserver.expect_request("/weather").respond_with_data(status=404)
response = requests.post(PROMPT_GATEWAY_ENDPOINT, json=input)
assert response.status_code == 404
httpserver.assert_request_made(
RequestMatcher(uri="/function_calling", method="POST")
)
httpserver.assert_request_made(RequestMatcher(uri="/weather", method="POST"))
assert (
response.text
== "upstream application error host=weather_forecast_service, path=/weather, status=404, body="
)
def test_prompt_gateway_model_server_500(httpserver: HTTPServer):
simple_fixture = TEST_CASE_FIXTURES["SIMPLE"]
input = simple_fixture["input"]
# setup mock response from model_server
httpserver.expect_request("/function_calling").respond_with_data(status=500)
response = requests.post(PROMPT_GATEWAY_ENDPOINT, json=input)
assert response.status_code == 500
httpserver.assert_request_made(
RequestMatcher(uri="/function_calling", method="POST")
)
assert (
response.text
== "upstream application error host=arch_internal, path=/function_calling, status=500, body="
)