2025-09-17 01:06:06 -07:00
|
|
|
|
# bench.py
|
|
|
|
|
|
import json, time, yaml, statistics as stats
|
|
|
|
|
|
from pydantic import BaseModel, ValidationError
|
|
|
|
|
|
from openai import OpenAI
|
|
|
|
|
|
|
Rename all arch references to plano (#745)
* Rename all arch references to plano across the codebase
Complete rebrand from "Arch"/"archgw" to "Plano" including:
- Config files: arch_config_schema.yaml, workflow, demo configs
- Environment variables: ARCH_CONFIG_* → PLANO_CONFIG_*
- Python CLI: variables, functions, file paths, docker mounts
- Rust crates: config paths, log messages, metadata keys
- Docker/build: Dockerfile, supervisord, .dockerignore, .gitignore
- Docker Compose: volume mounts and env vars across all demos/tests
- GitHub workflows: job/step names
- Shell scripts: log messages
- Demos: Python code, READMEs, VS Code configs, Grafana dashboard
- Docs: RST includes, code comments, config references
- Package metadata: package.json, pyproject.toml, uv.lock
External URLs (docs.archgw.com, github.com/katanemo/archgw) left as-is.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* Update remaining arch references in docs
- Rename RST cross-reference labels: arch_access_logging, arch_overview_tracing, arch_overview_threading → plano_*
- Update label references in request_lifecycle.rst
- Rename arch_config_state_storage_example.yaml → plano_config_state_storage_example.yaml
- Update config YAML comments: "Arch creates/uses" → "Plano creates/uses"
- Update "the Arch gateway" → "the Plano gateway" in configuration_reference.rst
- Update arch_config_schema.yaml reference in provider_models.py
- Rename arch_agent_router → plano_agent_router in config example
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* Fix remaining arch references found in second pass
- config/docker-compose.dev.yaml: ARCH_CONFIG_FILE → PLANO_CONFIG_FILE,
arch_config.yaml → plano_config.yaml, archgw_logs → plano_logs
- config/test_passthrough.yaml: container mount path
- tests/e2e/docker-compose.yaml: source file path (was still arch_config.yaml)
- cli/planoai/core.py: comment and log message
- crates/brightstaff/src/tracing/constants.rs: doc comment
- tests/{e2e,archgw}/common.py: get_arch_messages → get_plano_messages,
arch_state/arch_messages variables renamed
- tests/{e2e,archgw}/test_prompt_gateway.py: updated imports and usages
- demos/shared/test_runner/{common,test_demos}.py: same renames
- tests/e2e/test_model_alias_routing.py: docstring
- .dockerignore: archgw_modelserver → plano_modelserver
- demos/use_cases/claude_code_router/pretty_model_resolution.sh: container name
Note: x-arch-* HTTP header values and Rust constant names intentionally
preserved for backwards compatibility with existing deployments.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
---------
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-13 15:16:56 -08:00
|
|
|
|
# Plano endpoint (keys are handled by Plano)
|
2025-09-17 01:06:06 -07:00
|
|
|
|
client = OpenAI(base_url="http://localhost:12000/v1", api_key="n/a")
|
|
|
|
|
|
MODELS = ["arch.summarize.v1", "arch.reason.v1"]
|
|
|
|
|
|
FIXTURES = "evals_summarize.yaml"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Expected output shape
|
|
|
|
|
|
class SummarizeOut(BaseModel):
|
|
|
|
|
|
title: str
|
|
|
|
|
|
bullets: list[str]
|
|
|
|
|
|
next_actions: list[str]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_fixtures(path):
|
|
|
|
|
|
with open(path, "r") as f:
|
|
|
|
|
|
return yaml.safe_load(f)["fixtures"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def must_contain(text: str, anchors: list[str]) -> bool:
|
|
|
|
|
|
t = text.lower()
|
|
|
|
|
|
return all(a.lower() in t for a in anchors)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def schema_fmt(model: type[BaseModel]):
|
|
|
|
|
|
return {"type": "json_object"} # Simplified for broad compatibility
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run_case(model, fx):
|
|
|
|
|
|
t0 = time.perf_counter()
|
|
|
|
|
|
schema = SummarizeOut.model_json_schema()
|
|
|
|
|
|
resp = client.chat.completions.create(
|
|
|
|
|
|
model=model,
|
|
|
|
|
|
messages=[
|
|
|
|
|
|
{
|
|
|
|
|
|
"role": "system",
|
|
|
|
|
|
"content": f"Be concise. Output valid JSON matching this schema:\n{json.dumps(schema)}",
|
|
|
|
|
|
},
|
|
|
|
|
|
{"role": "user", "content": fx["input"]},
|
|
|
|
|
|
],
|
|
|
|
|
|
response_format=schema_fmt(SummarizeOut),
|
|
|
|
|
|
)
|
|
|
|
|
|
dt = time.perf_counter() - t0
|
|
|
|
|
|
|
|
|
|
|
|
content = resp.choices[0].message.content or "{}"
|
|
|
|
|
|
passed, reasons = True, []
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
|
data = json.loads(content)
|
|
|
|
|
|
except:
|
|
|
|
|
|
return {"ok": False, "lat": dt, "why": "json decode"}
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
|
SummarizeOut(**data)
|
|
|
|
|
|
except ValidationError:
|
|
|
|
|
|
passed = False
|
|
|
|
|
|
reasons.append("schema")
|
|
|
|
|
|
if not must_contain(json.dumps(data), fx.get("must_include", [])):
|
|
|
|
|
|
passed = False
|
|
|
|
|
|
reasons.append("anchors")
|
|
|
|
|
|
|
|
|
|
|
|
return {"ok": passed, "lat": dt, "why": ";".join(reasons)}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
|
|
fixtures = load_fixtures(FIXTURES)
|
|
|
|
|
|
for model in MODELS:
|
|
|
|
|
|
results = [run_case(model, fx) for fx in fixtures]
|
|
|
|
|
|
ok = sum(r["ok"] for r in results)
|
|
|
|
|
|
total = len(results)
|
|
|
|
|
|
latencies = [r["lat"] for r in results]
|
|
|
|
|
|
|
|
|
|
|
|
print(f"\n››› {model}")
|
|
|
|
|
|
print(f" Success: {ok}/{total} ({ok/total:.0%})")
|
|
|
|
|
|
if latencies:
|
|
|
|
|
|
avg_lat = stats.mean(latencies)
|
|
|
|
|
|
p95_lat = stats.quantiles(latencies, n=100)[94]
|
|
|
|
|
|
print(f" Latency (ms): avg={avg_lat*1000:.0f}, p95={p95_lat*1000:.0f}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
|
main()
|