This commit is contained in:
Adil Hafeez 2026-04-20 19:48:27 +00:00 committed by GitHub
commit e46bdc9abf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 1829 additions and 39 deletions

View file

@ -4,6 +4,8 @@ on:
push:
branches: [main]
pull_request:
schedule:
- cron: '0 6 * * *' # daily at 6am UTC
permissions:
contents: read
@ -213,10 +215,60 @@ jobs:
sarif_file: trivy-results.sarif
# ──────────────────────────────────────────────
# E2E: prompt_gateway tests
# Mock-based E2E tests (zero secrets required)
# ──────────────────────────────────────────────
mock-e2e-tests:
needs: docker-build
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./tests/archgw
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.14"
- name: Download plano image
uses: actions/download-artifact@v4
with:
name: plano-image
path: /tmp
- name: Load plano image
run: docker load -i /tmp/plano-image.tar
- name: Start plano with mock config
run: |
docker compose -f docker-compose.mock.yaml up -d
- name: Wait for plano to be healthy
run: |
source common.sh && wait_for_healthz http://localhost:12000/healthz
- name: Install uv
run: curl -LsSf https://astral.sh/uv/install.sh | sh
- name: Install test dependencies
run: uv sync
- name: Run mock-based E2E tests
run: |
uv run pytest test_model_alias_routing.py test_responses_api.py test_streaming.py || (docker compose -f docker-compose.mock.yaml logs && false)
- name: Stop plano
if: always()
run: docker compose -f docker-compose.mock.yaml down
# ──────────────────────────────────────────────
# E2E: prompt_gateway tests (live — main + nightly only)
# ──────────────────────────────────────────────
test-prompt-gateway:
needs: docker-build
if: github.event_name == 'push' && github.ref == 'refs/heads/main' || github.event_name == 'schedule'
runs-on: ubuntu-latest
steps:
- name: Checkout code
@ -253,20 +305,17 @@ jobs:
- name: Run prompt_gateway tests
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
AZURE_API_KEY: ${{ secrets.AZURE_API_KEY }}
AWS_BEARER_TOKEN_BEDROCK: ${{ secrets.AWS_BEARER_TOKEN_BEDROCK }}
GROK_API_KEY: ${{ secrets.GROK_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
run: |
cd tests/e2e && bash run_prompt_gateway_tests.sh
# ──────────────────────────────────────────────
# E2E: model_alias_routing tests
# E2E: model_alias_routing tests (live — main + nightly only)
# ──────────────────────────────────────────────
test-model-alias-routing:
needs: docker-build
if: github.event_name == 'push' && github.ref == 'refs/heads/main' || github.event_name == 'schedule'
runs-on: ubuntu-latest
steps:
- name: Checkout code
@ -313,10 +362,11 @@ jobs:
cd tests/e2e && bash run_model_alias_tests.sh
# ──────────────────────────────────────────────
# E2E: responses API with state tests
# E2E: responses API with state tests (live — main + nightly only)
# ──────────────────────────────────────────────
test-responses-api-with-state:
needs: docker-build
if: github.event_name == 'push' && github.ref == 'refs/heads/main' || github.event_name == 'schedule'
runs-on: ubuntu-latest
steps:
- name: Checkout code
@ -353,20 +403,16 @@ jobs:
- name: Run responses API with state tests
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
AZURE_API_KEY: ${{ secrets.AZURE_API_KEY }}
AWS_BEARER_TOKEN_BEDROCK: ${{ secrets.AWS_BEARER_TOKEN_BEDROCK }}
GROK_API_KEY: ${{ secrets.GROK_API_KEY }}
run: |
cd tests/e2e && bash run_responses_state_tests.sh
# ──────────────────────────────────────────────
# E2E: plano tests (multi-Python matrix)
# E2E: plano tests (multi-Python matrix, live — main + nightly only)
# ──────────────────────────────────────────────
e2e-plano-tests:
needs: docker-build
if: github.event_name == 'push' && github.ref == 'refs/heads/main' || github.event_name == 'schedule'
runs-on: ubuntu-latest-m
strategy:
fail-fast: false
@ -397,10 +443,6 @@ jobs:
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
AZURE_API_KEY: ${{ secrets.AZURE_API_KEY }}
AWS_BEARER_TOKEN_BEDROCK: ${{ secrets.AWS_BEARER_TOKEN_BEDROCK }}
run: |
docker compose up | tee &> plano.logs &
@ -416,22 +458,21 @@ jobs:
- name: Run plano tests
run: |
uv run pytest || tail -100 plano.logs
uv run pytest test_prompt_gateway.py test_llm_gateway.py || tail -100 plano.logs
- name: Stop plano docker container
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
run: |
docker compose down
# ──────────────────────────────────────────────
# E2E: demo — preference based routing
# E2E: demo — preference based routing (live — main + nightly only)
# ──────────────────────────────────────────────
e2e-demo-preference:
needs: docker-build
if: github.event_name == 'push' && github.ref == 'refs/heads/main' || github.event_name == 'schedule'
runs-on: ubuntu-latest-m
steps:
- name: Checkout code
@ -473,17 +514,17 @@ jobs:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
ARCH_API_KEY: ${{ secrets.ARCH_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
run: |
source venv/bin/activate
cd demos/shared/test_runner && bash run_demo_tests.sh llm_routing/preference_based_routing
# ──────────────────────────────────────────────
# E2E: demo — currency conversion
# E2E: demo — currency conversion (live — main + nightly only)
# ──────────────────────────────────────────────
e2e-demo-currency:
needs: docker-build
if: github.event_name == 'push' && github.ref == 'refs/heads/main' || github.event_name == 'schedule'
runs-on: ubuntu-latest-m
steps:
- name: Checkout code

View file

@ -0,0 +1,42 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
# OpenAI Models - all point to mock server
- model: openai/gpt-5-mini-2025-08-07
access_key: $OPENAI_API_KEY
default: true
base_url: http://host.docker.internal:51001
- model: openai/gpt-4o-mini
access_key: $OPENAI_API_KEY
base_url: http://host.docker.internal:51001
- model: openai/o3
access_key: $OPENAI_API_KEY
base_url: http://host.docker.internal:51001
- model: openai/gpt-4o
access_key: $OPENAI_API_KEY
base_url: http://host.docker.internal:51001
# Anthropic Models - point to mock server
- model: anthropic/claude-sonnet-4-20250514
access_key: $ANTHROPIC_API_KEY
base_url: http://host.docker.internal:51001
# Model aliases
model_aliases:
arch.summarize.v1:
target: gpt-5-mini-2025-08-07
arch.v1:
target: o3
# State storage for v1/responses API multi-turn tests
state_storage:
type: memory

448
tests/archgw/conftest.py Normal file
View file

@ -0,0 +1,448 @@
"""Shared fixtures for mock-based tests.
Provides mock HTTP server handlers that simulate OpenAI and Anthropic API responses.
The gateway container routes to host.docker.internal:51001 where the mock server runs.
"""
import json
import pytest
from pytest_httpserver import HTTPServer
from pytest_httpserver.httpserver import HandlerType
from werkzeug.wrappers import Response
@pytest.fixture(scope="session")
def httpserver_listen_address():
return ("0.0.0.0", 51001)
# ---------------------------------------------------------------------------
# OpenAI Chat Completions helpers
# ---------------------------------------------------------------------------
def make_openai_chat_response(
content="Hello from mock!", model="gpt-5-mini-2025-08-07", tool_calls=None
):
message = {"role": "assistant", "content": content}
finish_reason = "stop"
if tool_calls:
message["content"] = None
message["tool_calls"] = tool_calls
finish_reason = "tool_calls"
return {
"id": "chatcmpl-mock-123",
"object": "chat.completion",
"created": 1234567890,
"model": model,
"choices": [{"index": 0, "message": message, "finish_reason": finish_reason}],
"usage": {"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15},
}
def make_openai_chat_stream(content="Hello from mock!", model="gpt-5-mini-2025-08-07"):
lines = []
# Role chunk
lines.append(
f'data: {{"id":"chatcmpl-mock-123","object":"chat.completion.chunk","created":1234567890,'
f'"model":"{model}","choices":[{{"index":0,"delta":{{"role":"assistant","content":""}},"finish_reason":null}}]}}\n\n'
)
# Content chunks (one per word)
words = content.split(" ")
for i, word in enumerate(words):
prefix = " " if i > 0 else ""
escaped = json.dumps(f"{prefix}{word}")[1:-1] # strip quotes from json string
lines.append(
f'data: {{"id":"chatcmpl-mock-123","object":"chat.completion.chunk","created":1234567890,'
f'"model":"{model}","choices":[{{"index":0,"delta":{{"content":"{escaped}"}},"finish_reason":null}}]}}\n\n'
)
# Stop chunk
lines.append(
f'data: {{"id":"chatcmpl-mock-123","object":"chat.completion.chunk","created":1234567890,'
f'"model":"{model}","choices":[{{"index":0,"delta":{{}},"finish_reason":"stop"}}]}}\n\n'
)
lines.append("data: [DONE]\n\n")
return "".join(lines)
def make_openai_tool_call_stream(
model="gpt-5-mini-2025-08-07", tool_name="echo_tool", tool_args='{"text":"hello"}'
):
lines = []
# Role chunk
lines.append(
f'data: {{"id":"chatcmpl-mock-tool","object":"chat.completion.chunk","created":1234567890,'
f'"model":"{model}","choices":[{{"index":0,"delta":{{"role":"assistant","content":null}},"finish_reason":null}}]}}\n\n'
)
# Tool call chunk - id + function name
lines.append(
f'data: {{"id":"chatcmpl-mock-tool","object":"chat.completion.chunk","created":1234567890,'
f'"model":"{model}","choices":[{{"index":0,"delta":{{"tool_calls":[{{"index":0,"id":"call_mock_123","type":"function","function":{{"name":"{tool_name}","arguments":""}}}}]}},"finish_reason":null}}]}}\n\n'
)
# Tool call arguments chunk
escaped_args = json.dumps(tool_args)[1:-1]
lines.append(
f'data: {{"id":"chatcmpl-mock-tool","object":"chat.completion.chunk","created":1234567890,'
f'"model":"{model}","choices":[{{"index":0,"delta":{{"tool_calls":[{{"index":0,"function":{{"arguments":"{escaped_args}"}}}}]}},"finish_reason":null}}]}}\n\n'
)
# Stop chunk
lines.append(
f'data: {{"id":"chatcmpl-mock-tool","object":"chat.completion.chunk","created":1234567890,'
f'"model":"{model}","choices":[{{"index":0,"delta":{{}},"finish_reason":"tool_calls"}}]}}\n\n'
)
lines.append("data: [DONE]\n\n")
return "".join(lines)
# ---------------------------------------------------------------------------
# Anthropic Messages helpers
# ---------------------------------------------------------------------------
def make_anthropic_response(
content="Hello from mock!", model="claude-sonnet-4-20250514"
):
return {
"id": "msg-mock-123",
"type": "message",
"role": "assistant",
"model": model,
"content": [{"type": "text", "text": content}],
"stop_reason": "end_turn",
"stop_sequence": None,
"usage": {"input_tokens": 10, "output_tokens": 5},
}
def make_anthropic_stream(content="Hello from mock!", model="claude-sonnet-4-20250514"):
lines = []
msg = {
"id": "msg-mock-123",
"type": "message",
"role": "assistant",
"model": model,
"content": [],
"stop_reason": None,
"stop_sequence": None,
"usage": {"input_tokens": 10, "output_tokens": 0},
}
lines.append(
f"event: message_start\ndata: {json.dumps({'type': 'message_start', 'message': msg})}\n\n"
)
lines.append(
f'event: content_block_start\ndata: {{"type":"content_block_start","index":0,"content_block":{{"type":"text","text":""}}}}\n\n'
)
words = content.split(" ")
for i, word in enumerate(words):
prefix = " " if i > 0 else ""
text = f"{prefix}{word}"
escaped = json.dumps(text)[1:-1]
lines.append(
f'event: content_block_delta\ndata: {{"type":"content_block_delta","index":0,"delta":{{"type":"text_delta","text":"{escaped}"}}}}\n\n'
)
lines.append(
f'event: content_block_stop\ndata: {{"type":"content_block_stop","index":0}}\n\n'
)
lines.append(
f'event: message_delta\ndata: {{"type":"message_delta","delta":{{"stop_reason":"end_turn","stop_sequence":null}},"usage":{{"output_tokens":5}}}}\n\n'
)
lines.append(f'event: message_stop\ndata: {{"type":"message_stop"}}\n\n')
return "".join(lines)
def make_anthropic_thinking_stream(
content="The answer is 4.",
thinking="Let me think... 2+2=4",
model="claude-sonnet-4-20250514",
):
lines = []
msg = {
"id": "msg-mock-think",
"type": "message",
"role": "assistant",
"model": model,
"content": [],
"stop_reason": None,
"stop_sequence": None,
"usage": {"input_tokens": 10, "output_tokens": 0},
}
lines.append(
f"event: message_start\ndata: {json.dumps({'type': 'message_start', 'message': msg})}\n\n"
)
# Thinking block
lines.append(
f'event: content_block_start\ndata: {{"type":"content_block_start","index":0,"content_block":{{"type":"thinking","thinking":""}}}}\n\n'
)
for word in thinking.split(" "):
escaped = json.dumps(word)[1:-1]
lines.append(
f'event: content_block_delta\ndata: {{"type":"content_block_delta","index":0,"delta":{{"type":"thinking_delta","thinking":"{escaped} "}}}}\n\n'
)
lines.append(
f'event: content_block_stop\ndata: {{"type":"content_block_stop","index":0}}\n\n'
)
# Text block
lines.append(
f'event: content_block_start\ndata: {{"type":"content_block_start","index":1,"content_block":{{"type":"text","text":""}}}}\n\n'
)
for i, word in enumerate(content.split(" ")):
prefix = " " if i > 0 else ""
escaped = json.dumps(f"{prefix}{word}")[1:-1]
lines.append(
f'event: content_block_delta\ndata: {{"type":"content_block_delta","index":1,"delta":{{"type":"text_delta","text":"{escaped}"}}}}\n\n'
)
lines.append(
f'event: content_block_stop\ndata: {{"type":"content_block_stop","index":1}}\n\n'
)
lines.append(
f'event: message_delta\ndata: {{"type":"message_delta","delta":{{"stop_reason":"end_turn","stop_sequence":null}},"usage":{{"output_tokens":20}}}}\n\n'
)
lines.append(f'event: message_stop\ndata: {{"type":"message_stop"}}\n\n')
return "".join(lines)
# ---------------------------------------------------------------------------
# OpenAI Responses API helpers
# ---------------------------------------------------------------------------
def make_responses_api_response(
content="Hello from mock!",
model="gpt-5-mini-2025-08-07",
response_id="resp-mock-123",
):
return {
"id": response_id,
"object": "response",
"created_at": 1234567890,
"model": model,
"output": [
{
"type": "message",
"id": "msg_mock_123",
"status": "completed",
"role": "assistant",
"content": [
{"type": "output_text", "text": content, "annotations": []}
],
}
],
"status": "completed",
"usage": {"input_tokens": 10, "output_tokens": 5, "total_tokens": 15},
}
def make_responses_api_stream(
content="Hello from mock!",
model="gpt-5-mini-2025-08-07",
response_id="resp-mock-123",
):
lines = []
resp_base = {
"id": response_id,
"object": "response",
"created_at": 1234567890,
"model": model,
"output": [],
"status": "in_progress",
}
lines.append(
f"event: response.created\ndata: {json.dumps({'type': 'response.created', 'response': resp_base})}\n\n"
)
lines.append(
f'event: response.output_item.added\ndata: {{"type":"response.output_item.added","output_index":0,'
f'"item":{{"type":"message","id":"msg_mock_123","status":"in_progress","role":"assistant","content":[]}}}}\n\n'
)
lines.append(
f'event: response.content_part.added\ndata: {{"type":"response.content_part.added","output_index":0,'
f'"content_index":0,"part":{{"type":"output_text","text":"","annotations":[]}}}}\n\n'
)
words = content.split(" ")
for i, word in enumerate(words):
prefix = " " if i > 0 else ""
escaped = json.dumps(f"{prefix}{word}")[1:-1]
lines.append(
f'event: response.output_text.delta\ndata: {{"type":"response.output_text.delta","output_index":0,'
f'"content_index":0,"delta":"{escaped}"}}\n\n'
)
lines.append(
f'event: response.output_text.done\ndata: {{"type":"response.output_text.done","output_index":0,'
f'"content_index":0,"text":"{json.dumps(content)[1:-1]}"}}\n\n'
)
final_item = {
"type": "message",
"id": "msg_mock_123",
"status": "completed",
"role": "assistant",
"content": [{"type": "output_text", "text": content, "annotations": []}],
}
lines.append(
f"event: response.output_item.done\ndata: {json.dumps({'type': 'response.output_item.done', 'output_index': 0, 'item': final_item})}\n\n"
)
final_resp = dict(
resp_base,
output=[final_item],
status="completed",
usage={"input_tokens": 10, "output_tokens": 5, "total_tokens": 15},
)
lines.append(
f"event: response.completed\ndata: {json.dumps({'type': 'response.completed', 'response': final_resp})}\n\n"
)
return "".join(lines)
# ---------------------------------------------------------------------------
# Mock server setup helpers
# ---------------------------------------------------------------------------
def setup_openai_chat_mock(
httpserver: HTTPServer, content="Hello from mock!", tool_calls=None
):
"""Register a permanent handler for /v1/chat/completions on the mock server.
Returns a list that will be populated with captured request bodies.
"""
captured = []
def handler(request):
body = json.loads(request.data)
captured.append(body)
is_stream = body.get("stream", False)
model = body.get("model", "gpt-5-mini-2025-08-07")
if tool_calls and not is_stream:
return Response(
json.dumps(
make_openai_chat_response(model=model, tool_calls=tool_calls)
),
status=200,
content_type="application/json",
)
if is_stream:
return Response(
make_openai_chat_stream(content=content, model=model),
status=200,
content_type="text/event-stream",
)
return Response(
json.dumps(make_openai_chat_response(content=content, model=model)),
status=200,
content_type="application/json",
)
httpserver.expect_request(
"/v1/chat/completions",
method="POST",
handler_type=HandlerType.PERMANENT,
).respond_with_handler(handler)
return captured
def setup_anthropic_mock(
httpserver: HTTPServer, content="Hello from mock!", thinking=False
):
"""Register a permanent handler for /v1/messages on the mock server.
Returns a list that will be populated with captured request bodies.
"""
captured = []
def handler(request):
body = json.loads(request.data)
captured.append(body)
is_stream = body.get("stream", False)
model = body.get("model", "claude-sonnet-4-20250514")
if thinking and is_stream:
return Response(
make_anthropic_thinking_stream(model=model),
status=200,
content_type="text/event-stream",
)
if is_stream:
return Response(
make_anthropic_stream(content=content, model=model),
status=200,
content_type="text/event-stream",
)
return Response(
json.dumps(make_anthropic_response(content=content, model=model)),
status=200,
content_type="application/json",
)
httpserver.expect_request(
"/v1/messages",
method="POST",
handler_type=HandlerType.PERMANENT,
).respond_with_handler(handler)
return captured
def setup_responses_api_mock(httpserver: HTTPServer, content="Hello from mock!"):
"""Register a permanent handler for /v1/responses on the mock server.
Returns a list that will be populated with captured request bodies.
"""
captured = []
call_count = [0]
def handler(request):
body = json.loads(request.data)
captured.append(body)
call_count[0] += 1
is_stream = body.get("stream", False)
model = body.get("model", "gpt-5-mini-2025-08-07")
response_id = f"resp-mock-{call_count[0]}"
if is_stream:
return Response(
make_responses_api_stream(
content=content, model=model, response_id=response_id
),
status=200,
content_type="text/event-stream",
)
return Response(
json.dumps(
make_responses_api_response(
content=content, model=model, response_id=response_id
)
),
status=200,
content_type="application/json",
)
httpserver.expect_request(
"/v1/responses",
method="POST",
handler_type=HandlerType.PERMANENT,
).respond_with_handler(handler)
return captured
def setup_error_mock(
httpserver: HTTPServer, path="/v1/chat/completions", status=400, body=None
):
"""Register a handler that returns an error response."""
error_body = body or json.dumps(
{
"error": {
"message": "Bad Request",
"type": "invalid_request_error",
"code": "bad_request",
}
}
)
httpserver.expect_request(path, method="POST").respond_with_data(
error_body,
status=status,
content_type="application/json",
)

View file

@ -0,0 +1,13 @@
services:
plano:
image: katanemo/plano:latest
ports:
- "12000:12000"
volumes:
- ./config_mock_llm.yaml:/app/plano_config.yaml
- /etc/ssl/cert.pem:/etc/ssl/cert.pem
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
- OPENAI_API_KEY=mock-key
- ANTHROPIC_API_KEY=mock-key

View file

@ -13,6 +13,8 @@ dependencies = [
"deepdiff>=8.0.1",
"pytest-retry>=1.6.3",
"pytest-httpserver>=1.1.0",
"openai>=1.0.0",
"anthropic>=0.66.0",
]
[project.optional-dependencies]

View file

@ -0,0 +1,365 @@
"""Mock-based tests for model alias routing.
Tests alias resolution, protocol transformation (OpenAI client Anthropic upstream
and vice versa), error handling, and multi-turn conversations with tool calls.
These tests require the gateway to be running with config_mock_llm.yaml
(started via docker-compose.mock.yaml).
"""
import json
import openai
import anthropic
import pytest
import logging
from pytest_httpserver import HTTPServer
from conftest import (
setup_openai_chat_mock,
setup_anthropic_mock,
setup_error_mock,
make_openai_chat_response,
)
logger = logging.getLogger(__name__)
LLM_GATEWAY_BASE = "http://localhost:12000"
# =============================================================================
# ALIAS RESOLUTION TESTS — OpenAI client
# =============================================================================
def test_openai_client_with_alias_arch_summarize_v1(httpserver: HTTPServer):
"""arch.summarize.v1 should resolve to gpt-5-mini-2025-08-07 (OpenAI)"""
captured = setup_openai_chat_mock(httpserver, content="Hello from mock OpenAI!")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
completion = client.chat.completions.create(
model="arch.summarize.v1",
max_completion_tokens=500,
messages=[{"role": "user", "content": "Hello"}],
)
assert completion.choices[0].message.content == "Hello from mock OpenAI!"
# Verify alias was resolved before reaching upstream
assert len(captured) == 1
assert captured[0]["model"] == "gpt-5-mini-2025-08-07"
def test_openai_client_with_alias_arch_v1(httpserver: HTTPServer):
"""arch.v1 should resolve to o3 (OpenAI)"""
captured = setup_openai_chat_mock(httpserver, content="Hello from mock o3!")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
completion = client.chat.completions.create(
model="arch.v1",
max_completion_tokens=500,
messages=[{"role": "user", "content": "Hello"}],
)
assert completion.choices[0].message.content == "Hello from mock o3!"
assert len(captured) == 1
assert captured[0]["model"] == "o3"
def test_openai_client_with_alias_streaming(httpserver: HTTPServer):
"""Streaming with alias should resolve and return streamed content"""
setup_openai_chat_mock(httpserver, content="Hello from streaming mock!")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
stream = client.chat.completions.create(
model="arch.summarize.v1",
max_completion_tokens=500,
messages=[{"role": "user", "content": "Hello"}],
stream=True,
)
chunks = []
for chunk in stream:
if chunk.choices[0].delta.content:
chunks.append(chunk.choices[0].delta.content)
assert "".join(chunks) == "Hello from streaming mock!"
# =============================================================================
# ALIAS RESOLUTION TESTS — Anthropic client
# =============================================================================
def test_anthropic_client_with_alias_arch_summarize_v1(httpserver: HTTPServer):
"""Anthropic client with alias should route to OpenAI upstream, response transformed to Anthropic format"""
captured = setup_openai_chat_mock(httpserver, content="Hello via Anthropic client!")
client = anthropic.Anthropic(api_key="test-key", base_url=LLM_GATEWAY_BASE)
message = client.messages.create(
model="arch.summarize.v1",
max_tokens=500,
messages=[{"role": "user", "content": "Hello"}],
)
response_text = "".join(b.text for b in message.content if b.type == "text")
assert response_text == "Hello via Anthropic client!"
# Verify upstream received OpenAI-format request with resolved model
assert len(captured) == 1
assert captured[0]["model"] == "gpt-5-mini-2025-08-07"
def test_anthropic_client_with_alias_streaming(httpserver: HTTPServer):
"""Anthropic client streaming with alias → OpenAI upstream → transformed back to Anthropic SSE"""
setup_openai_chat_mock(httpserver, content="Streaming via Anthropic!")
client = anthropic.Anthropic(api_key="test-key", base_url=LLM_GATEWAY_BASE)
with client.messages.stream(
model="arch.summarize.v1",
max_tokens=500,
messages=[{"role": "user", "content": "Hello"}],
) as stream:
pieces = [t for t in stream.text_stream]
full_text = "".join(pieces)
assert full_text == "Streaming via Anthropic!"
# =============================================================================
# PROTOCOL TRANSFORMATION TESTS
# =============================================================================
def test_openai_client_with_claude_model(httpserver: HTTPServer):
"""OpenAI client → Claude model → gateway proxies via /v1/chat/completions → transforms response"""
# Gateway routes OpenAI-format requests to /v1/chat/completions on upstream
# even for Anthropic models, so we need the OpenAI chat mock
captured = setup_openai_chat_mock(
httpserver, content="Hello from Claude via OpenAI client!"
)
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
completion = client.chat.completions.create(
model="claude-sonnet-4-20250514",
max_tokens=500,
messages=[{"role": "user", "content": "Hello"}],
)
assert (
completion.choices[0].message.content == "Hello from Claude via OpenAI client!"
)
assert len(captured) == 1
assert captured[0]["model"] == "claude-sonnet-4-20250514"
def test_openai_client_with_claude_model_streaming(httpserver: HTTPServer):
"""OpenAI client streaming → Claude model → proxied via /v1/chat/completions"""
# Gateway routes OpenAI-format requests to /v1/chat/completions on upstream
setup_openai_chat_mock(httpserver, content="Streaming from Claude!")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
stream = client.chat.completions.create(
model="claude-sonnet-4-20250514",
max_tokens=500,
messages=[{"role": "user", "content": "Hello"}],
stream=True,
)
chunks = []
for chunk in stream:
if chunk.choices[0].delta.content:
chunks.append(chunk.choices[0].delta.content)
assert "".join(chunks) == "Streaming from Claude!"
def test_anthropic_client_with_openai_model(httpserver: HTTPServer):
"""Anthropic client → OpenAI model (gpt-4o-mini) → OpenAI upstream → transforms response to Anthropic format"""
captured = setup_openai_chat_mock(
httpserver, content="Hello from GPT via Anthropic!"
)
client = anthropic.Anthropic(api_key="test-key", base_url=LLM_GATEWAY_BASE)
message = client.messages.create(
model="gpt-4o-mini",
max_tokens=500,
messages=[{"role": "user", "content": "Hello"}],
)
response_text = "".join(b.text for b in message.content if b.type == "text")
assert response_text == "Hello from GPT via Anthropic!"
assert len(captured) == 1
assert captured[0]["model"] == "gpt-4o-mini"
def test_anthropic_client_with_openai_model_streaming(httpserver: HTTPServer):
"""Anthropic client streaming → OpenAI model → OpenAI SSE → transformed to Anthropic SSE"""
setup_openai_chat_mock(httpserver, content="Streaming from GPT!")
client = anthropic.Anthropic(api_key="test-key", base_url=LLM_GATEWAY_BASE)
with client.messages.stream(
model="gpt-4o-mini",
max_tokens=500,
messages=[{"role": "user", "content": "Hello"}],
) as stream:
pieces = [t for t in stream.text_stream]
full_text = "".join(pieces)
assert full_text == "Streaming from GPT!"
# =============================================================================
# DIRECT MODEL TESTS
# =============================================================================
def test_direct_model_gpt4o_mini_openai(httpserver: HTTPServer):
"""Direct model name (no alias) via OpenAI client"""
captured = setup_openai_chat_mock(httpserver, content="Direct GPT response!")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
completion = client.chat.completions.create(
model="gpt-4o-mini",
max_completion_tokens=500,
messages=[{"role": "user", "content": "Hello"}],
)
assert completion.choices[0].message.content == "Direct GPT response!"
assert captured[0]["model"] == "gpt-4o-mini"
def test_direct_model_claude_anthropic(httpserver: HTTPServer):
"""Direct Claude model via Anthropic client"""
captured = setup_anthropic_mock(httpserver, content="Direct Claude response!")
client = anthropic.Anthropic(api_key="test-key", base_url=LLM_GATEWAY_BASE)
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=500,
messages=[{"role": "user", "content": "Hello"}],
)
response_text = "".join(b.text for b in message.content if b.type == "text")
assert response_text == "Direct Claude response!"
assert captured[0]["model"] == "claude-sonnet-4-20250514"
# =============================================================================
# MULTI-TURN WITH TOOL CALLS
# =============================================================================
def test_assistant_message_with_null_content_and_tool_calls(httpserver: HTTPServer):
"""Gateway should handle assistant messages with null content + tool_calls in history"""
setup_openai_chat_mock(httpserver, content="The weather is sunny in Seattle.")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
completion = client.chat.completions.create(
model="gpt-4o",
max_tokens=500,
messages=[
{"role": "system", "content": "You are a weather assistant."},
{"role": "user", "content": "What's the weather in Seattle?"},
{
"role": "assistant",
"content": None,
"tool_calls": [
{
"id": "call_test123",
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"city": "Seattle"}',
},
}
],
},
{
"role": "tool",
"tool_call_id": "call_test123",
"content": '{"temperature": "10C", "condition": "Partly cloudy"}',
},
],
tools=[
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather for a city",
"parameters": {
"type": "object",
"properties": {"city": {"type": "string"}},
"required": ["city"],
},
},
}
],
)
assert completion.choices[0].message.content == "The weather is sunny in Seattle."
# =============================================================================
# ERROR HANDLING
# =============================================================================
def test_nonexistent_alias(httpserver: HTTPServer):
"""Non-existent alias should be treated as direct model name and likely fail"""
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
try:
client.chat.completions.create(
model="nonexistent.alias",
max_completion_tokens=50,
messages=[{"role": "user", "content": "Hello"}],
)
# If it succeeds, the alias was passed through as a direct model name
except Exception:
# Error is also acceptable - non-existent model should fail
pass
# =============================================================================
# THINKING MODE
# =============================================================================
def test_anthropic_thinking_mode_streaming(httpserver: HTTPServer):
"""Anthropic thinking mode should stream thinking + text blocks correctly"""
setup_anthropic_mock(httpserver, thinking=True)
client = anthropic.Anthropic(api_key="test-key", base_url=LLM_GATEWAY_BASE)
thinking_block_started = False
thinking_delta_seen = False
text_delta_seen = False
with client.messages.stream(
model="claude-sonnet-4-20250514",
max_tokens=2048,
thinking={"type": "enabled", "budget_tokens": 1024},
messages=[{"role": "user", "content": "What is 2+2?"}],
) as stream:
for event in stream:
if event.type == "content_block_start" and getattr(
event, "content_block", None
):
if getattr(event.content_block, "type", None) == "thinking":
thinking_block_started = True
if event.type == "content_block_delta" and getattr(event, "delta", None):
if event.delta.type == "text_delta":
text_delta_seen = True
elif event.delta.type == "thinking_delta":
thinking_delta_seen = True
final = stream.get_final_message()
assert final is not None
assert final.content and len(final.content) > 0
assert text_delta_seen, "Expected text deltas in stream"
assert thinking_block_started, "No thinking block started"
assert thinking_delta_seen, "No thinking deltas observed"
block_types = [blk.type for blk in final.content]
assert "text" in block_types
assert "thinking" in block_types

View file

@ -0,0 +1,287 @@
"""Mock-based tests for the OpenAI Responses API (/v1/responses).
Tests translation to chat completions via the gateway, tool calling,
streaming, mixed content types, and multi-turn state management.
Note: The gateway translates all Responses API requests to /v1/chat/completions
on the upstream when using base_url-configured providers. Direct /v1/responses
passthrough is tested by the live e2e tests on main/nightly.
These tests require the gateway to be running with config_mock_llm.yaml
(started via docker-compose.mock.yaml).
"""
import openai
import logging
from pytest_httpserver import HTTPServer
from conftest import (
setup_openai_chat_mock,
)
logger = logging.getLogger(__name__)
LLM_GATEWAY_BASE = "http://localhost:12000"
# =============================================================================
# NON-STREAMING TESTS
# =============================================================================
def test_responses_api_non_streaming(httpserver: HTTPServer):
"""Responses API non-streaming → translated to /v1/chat/completions"""
captured = setup_openai_chat_mock(httpserver, content="Hello from Responses API!")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
resp = client.responses.create(
model="claude-sonnet-4-20250514",
input="Hello via responses API",
)
assert resp is not None
assert resp.id is not None
assert len(resp.output_text) > 0
# Note: Responses API with OpenAI models passes through to /v1/responses on the
# upstream, which doesn't work correctly with mock servers (response format issues).
# Those tests are covered by the live e2e tests on main/nightly.
# =============================================================================
# STREAMING TESTS
# =============================================================================
def test_responses_api_streaming(httpserver: HTTPServer):
"""Responses API streaming → translated to /v1/chat/completions"""
setup_openai_chat_mock(httpserver, content="Streaming from Responses API!")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
stream = client.responses.create(
model="claude-sonnet-4-20250514",
input="Write a haiku",
stream=True,
)
text_chunks = []
for event in stream:
if getattr(event, "type", None) == "response.output_text.delta" and getattr(
event, "delta", None
):
text_chunks.append(event.delta)
assert len(text_chunks) > 0, "Should have received streaming text deltas"
# =============================================================================
# TOOL CALLING TESTS
# =============================================================================
def test_responses_api_with_tools(httpserver: HTTPServer):
"""Responses API with tools → translated to /v1/chat/completions"""
setup_openai_chat_mock(httpserver, content="Tool response via Claude")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
tools = [
{
"type": "function",
"name": "echo_tool",
"description": "Echo back the provided input: hello_world",
"parameters": {
"type": "object",
"properties": {"text": {"type": "string"}},
"required": ["text"],
},
}
]
resp = client.responses.create(
model="claude-sonnet-4-20250514",
input="Call the echo tool",
tools=tools,
)
assert resp.id is not None
def test_responses_api_streaming_with_tools(httpserver: HTTPServer):
"""Responses API streaming with tools → translated to /v1/chat/completions"""
setup_openai_chat_mock(httpserver, content="Streamed tool via Claude")
client = openai.OpenAI(
api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1", max_retries=0
)
tools = [
{
"type": "function",
"name": "echo_tool",
"description": "Echo back the provided input: hello_world",
"parameters": {
"type": "object",
"properties": {"text": {"type": "string"}},
"required": ["text"],
},
}
]
stream = client.responses.create(
model="claude-sonnet-4-20250514",
input="Call the echo tool with hello_world",
tools=tools,
stream=True,
)
text_chunks = []
tool_calls = []
for event in stream:
etype = getattr(event, "type", None)
if etype == "response.output_text.delta" and getattr(event, "delta", None):
text_chunks.append(event.delta)
if etype == "response.function_call_arguments.delta" and getattr(
event, "delta", None
):
tool_calls.append(event.delta)
assert text_chunks or tool_calls, "Expected streamed text or tool call deltas"
# =============================================================================
# MIXED CONTENT TYPES
# =============================================================================
def test_responses_api_mixed_content_types(httpserver: HTTPServer):
"""Responses API with mixed content types (string and array) in input"""
setup_openai_chat_mock(httpserver, content="Weather Seattle")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
resp = client.responses.create(
model="claude-sonnet-4-20250514",
input=[
{
"role": "developer",
"content": "Generate a short chat title based on the user's message.",
},
{
"role": "user",
"content": [
{"type": "input_text", "text": "What is the weather in Seattle"}
],
},
],
)
assert resp is not None
assert resp.id is not None
assert len(resp.output_text) > 0
# =============================================================================
# STATE MANAGEMENT (multi-turn via previous_response_id)
# =============================================================================
def test_conversation_state_management_two_turn(httpserver: HTTPServer):
"""Two-turn conversation using previous_response_id for state management.
Turn 1: Send initial message get response_id
Turn 2: Send with previous_response_id verify state was combined
"""
captured = setup_openai_chat_mock(
httpserver, content="I remember your name is Alice!"
)
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
# Turn 1
resp1 = client.responses.create(
model="claude-sonnet-4-20250514",
input="My name is Alice and I like pizza.",
)
response_id_1 = resp1.id
assert response_id_1 is not None
assert len(resp1.output_text) > 0
# Turn 2 with previous_response_id
resp2 = client.responses.create(
model="claude-sonnet-4-20250514",
input="What is my name?",
previous_response_id=response_id_1,
)
response_id_2 = resp2.id
assert response_id_2 is not None
assert response_id_2 != response_id_1
# Verify the upstream received both turns' messages in the second request
assert len(captured) == 2
second_request = captured[1]
messages = second_request.get("messages", [])
# Should have messages from both turns (user + assistant from turn 1, plus user from turn 2)
assert (
len(messages) >= 3
), f"Expected >= 3 messages in second turn, got {len(messages)}: {messages}"
def test_conversation_state_management_two_turn_streaming(httpserver: HTTPServer):
"""Two-turn streaming conversation using previous_response_id."""
captured = setup_openai_chat_mock(httpserver, content="Alice likes pizza!")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
# Turn 1: streaming
stream1 = client.responses.create(
model="claude-sonnet-4-20250514",
input="My name is Alice and I like pizza.",
stream=True,
)
text_chunks_1 = []
response_id_1 = None
for event in stream1:
if getattr(event, "type", None) == "response.output_text.delta" and getattr(
event, "delta", None
):
text_chunks_1.append(event.delta)
if getattr(event, "type", None) == "response.completed" and getattr(
event, "response", None
):
response_id_1 = event.response.id
assert response_id_1 is not None
assert len(text_chunks_1) > 0
# Turn 2: streaming with previous_response_id
stream2 = client.responses.create(
model="claude-sonnet-4-20250514",
input="What do I like?",
previous_response_id=response_id_1,
stream=True,
)
text_chunks_2 = []
response_id_2 = None
for event in stream2:
if getattr(event, "type", None) == "response.output_text.delta" and getattr(
event, "delta", None
):
text_chunks_2.append(event.delta)
if getattr(event, "type", None) == "response.completed" and getattr(
event, "response", None
):
response_id_2 = event.response.id
assert response_id_2 is not None
assert response_id_2 != response_id_1
assert len(text_chunks_2) > 0
# Verify second turn included first turn's context
assert len(captured) == 2
second_request = captured[1]
messages = second_request.get("messages", [])
assert (
len(messages) >= 3
), f"Expected >= 3 messages in second turn, got {len(messages)}"

View file

@ -0,0 +1,289 @@
"""Mock-based streaming tests for all three API shapes.
Tests streaming for:
- OpenAI Chat Completions (both OpenAI and Anthropic clients)
- Anthropic Messages API (both native and cross-provider)
- OpenAI Responses API (passthrough and translated)
- Tool call streaming
- Thinking mode streaming
These tests require the gateway to be running with config_mock_llm.yaml
(started via docker-compose.mock.yaml).
"""
import json
import openai
import anthropic
import logging
from pytest_httpserver import HTTPServer
from pytest_httpserver.httpserver import HandlerType
from werkzeug.wrappers import Response
from conftest import (
setup_openai_chat_mock,
setup_anthropic_mock,
make_openai_tool_call_stream,
)
logger = logging.getLogger(__name__)
LLM_GATEWAY_BASE = "http://localhost:12000"
# =============================================================================
# OPENAI CHAT COMPLETIONS STREAMING
# =============================================================================
def test_openai_chat_streaming_basic(httpserver: HTTPServer):
"""Basic OpenAI streaming: verify chunks arrive in order and reassemble correctly"""
setup_openai_chat_mock(
httpserver, content="The quick brown fox jumps over the lazy dog"
)
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
stream = client.chat.completions.create(
model="gpt-4o-mini",
max_tokens=100,
messages=[{"role": "user", "content": "Hello"}],
stream=True,
)
chunks = []
for chunk in stream:
if chunk.choices[0].delta.content:
chunks.append(chunk.choices[0].delta.content)
full_text = "".join(chunks)
assert full_text == "The quick brown fox jumps over the lazy dog"
assert len(chunks) > 1, "Should have received multiple chunks"
def test_openai_chat_streaming_tool_calls(httpserver: HTTPServer):
"""OpenAI streaming with tool calls: verify tool call chunks are properly assembled"""
def handler(request):
body = json.loads(request.data)
model = body.get("model", "gpt-5-mini-2025-08-07")
return Response(
make_openai_tool_call_stream(
model=model, tool_name="echo_tool", tool_args='{"text":"hello"}'
),
status=200,
content_type="text/event-stream",
)
httpserver.expect_request(
"/v1/chat/completions",
method="POST",
handler_type=HandlerType.PERMANENT,
).respond_with_handler(handler)
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
stream = client.chat.completions.create(
model="gpt-4o-mini",
max_tokens=100,
messages=[{"role": "user", "content": "Call the echo tool"}],
tools=[
{
"type": "function",
"function": {
"name": "echo_tool",
"description": "Echo input",
"parameters": {
"type": "object",
"properties": {"text": {"type": "string"}},
"required": ["text"],
},
},
}
],
stream=True,
)
tool_calls = []
for chunk in stream:
if chunk.choices and chunk.choices[0].delta.tool_calls:
for tc in chunk.choices[0].delta.tool_calls:
while len(tool_calls) <= tc.index:
tool_calls.append(
{"id": "", "function": {"name": "", "arguments": ""}}
)
if tc.id:
tool_calls[tc.index]["id"] = tc.id
if tc.function:
if tc.function.name:
tool_calls[tc.index]["function"]["name"] = tc.function.name
if tc.function.arguments:
tool_calls[tc.index]["function"][
"arguments"
] += tc.function.arguments
assert len(tool_calls) > 0, "Should have received tool calls"
assert tool_calls[0]["function"]["name"] == "echo_tool"
assert tool_calls[0]["id"] == "call_mock_123"
# =============================================================================
# ANTHROPIC MESSAGES STREAMING
# =============================================================================
def test_anthropic_messages_streaming_basic(httpserver: HTTPServer):
"""Basic Anthropic streaming: verify text_stream yields chunks and final message is complete"""
setup_anthropic_mock(httpserver, content="Hello from streaming Claude!")
client = anthropic.Anthropic(api_key="test-key", base_url=LLM_GATEWAY_BASE)
with client.messages.stream(
model="claude-sonnet-4-20250514",
max_tokens=100,
messages=[{"role": "user", "content": "Hello"}],
) as stream:
pieces = list(stream.text_stream)
full_text = "".join(pieces)
final = stream.get_final_message()
assert full_text == "Hello from streaming Claude!"
assert len(pieces) > 1, "Should have received multiple text chunks"
assert final is not None
assert final.content[0].text == "Hello from streaming Claude!"
def test_anthropic_messages_streaming_thinking(httpserver: HTTPServer):
"""Anthropic thinking mode streaming: verify thinking + text blocks"""
setup_anthropic_mock(httpserver, thinking=True)
client = anthropic.Anthropic(api_key="test-key", base_url=LLM_GATEWAY_BASE)
events_seen = {
"thinking_start": False,
"thinking_delta": False,
"text_delta": False,
}
with client.messages.stream(
model="claude-sonnet-4-20250514",
max_tokens=2048,
thinking={"type": "enabled", "budget_tokens": 1024},
messages=[{"role": "user", "content": "What is 2+2?"}],
) as stream:
for event in stream:
if event.type == "content_block_start" and getattr(
event, "content_block", None
):
if getattr(event.content_block, "type", None) == "thinking":
events_seen["thinking_start"] = True
if event.type == "content_block_delta" and getattr(event, "delta", None):
if event.delta.type == "text_delta":
events_seen["text_delta"] = True
elif event.delta.type == "thinking_delta":
events_seen["thinking_delta"] = True
final = stream.get_final_message()
assert events_seen["thinking_start"], "No thinking block started"
assert events_seen["thinking_delta"], "No thinking deltas"
assert events_seen["text_delta"], "No text deltas"
block_types = [blk.type for blk in final.content]
assert "thinking" in block_types
assert "text" in block_types
# =============================================================================
# CROSS-PROVIDER STREAMING
# =============================================================================
def test_openai_client_streaming_anthropic_upstream(httpserver: HTTPServer):
"""OpenAI client streaming → Anthropic model → proxied via /v1/chat/completions"""
# Gateway routes OpenAI-format requests to /v1/chat/completions on upstream
setup_openai_chat_mock(httpserver, content="Cross-provider streaming works!")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
stream = client.chat.completions.create(
model="claude-sonnet-4-20250514",
max_tokens=100,
messages=[{"role": "user", "content": "Hello"}],
stream=True,
)
chunks = []
for chunk in stream:
if chunk.choices[0].delta.content:
chunks.append(chunk.choices[0].delta.content)
assert "".join(chunks) == "Cross-provider streaming works!"
def test_anthropic_client_streaming_openai_upstream(httpserver: HTTPServer):
"""Anthropic client streaming → OpenAI model → OpenAI SSE → transformed to Anthropic SSE"""
setup_openai_chat_mock(httpserver, content="Reverse cross-provider streaming!")
client = anthropic.Anthropic(api_key="test-key", base_url=LLM_GATEWAY_BASE)
with client.messages.stream(
model="gpt-4o-mini",
max_tokens=100,
messages=[{"role": "user", "content": "Hello"}],
) as stream:
pieces = list(stream.text_stream)
full_text = "".join(pieces)
assert full_text == "Reverse cross-provider streaming!"
# =============================================================================
# RESPONSES API STREAMING
# =============================================================================
def test_responses_api_streaming_basic(httpserver: HTTPServer):
"""Responses API streaming: verify event types and content assembly"""
# Gateway translates Responses API to /v1/chat/completions on upstream
# for non-OpenAI models (OpenAI models pass through to /v1/responses which
# doesn't work with mocks)
setup_openai_chat_mock(httpserver, content="Responses API streaming works!")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
stream = client.responses.create(
model="claude-sonnet-4-20250514",
input="Hello",
stream=True,
)
text_chunks = []
completed = False
for event in stream:
etype = getattr(event, "type", None)
if etype == "response.output_text.delta" and getattr(event, "delta", None):
text_chunks.append(event.delta)
if etype == "response.completed":
completed = True
full_content = "".join(text_chunks)
assert len(text_chunks) > 0, "Should have received text delta events"
assert len(full_content) > 0
def test_responses_api_streaming_translated_upstream(httpserver: HTTPServer):
"""Responses API streaming with non-OpenAI model → translated to chat completions upstream"""
setup_openai_chat_mock(httpserver, content="Translated streaming response!")
client = openai.OpenAI(api_key="test-key", base_url=f"{LLM_GATEWAY_BASE}/v1")
stream = client.responses.create(
model="claude-sonnet-4-20250514",
input="Hello",
stream=True,
)
text_chunks = []
for event in stream:
if getattr(event, "type", None) == "response.output_text.delta" and getattr(
event, "delta", None
):
text_chunks.append(event.delta)
assert (
len(text_chunks) > 0
), "Should have received text delta events from translated stream"

288
tests/archgw/uv.lock generated
View file

@ -2,6 +2,47 @@ version = 1
revision = 3
requires-python = ">=3.12"
[[package]]
name = "annotated-types"
version = "0.7.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" },
]
[[package]]
name = "anthropic"
version = "0.81.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "distro" },
{ name = "docstring-parser" },
{ name = "httpx" },
{ name = "jiter" },
{ name = "pydantic" },
{ name = "sniffio" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/f3/c2/d2bb9b3c82c386abf3b2c32ae0452a8dcb89ed2809d875e1420bea22e318/anthropic-0.81.0.tar.gz", hash = "sha256:bab2d4e45c2e81a0668fdc2da2f7fd665ed8a0295ba3c86450f9dcc3a7804524", size = 532935, upload-time = "2026-02-18T04:00:54.658Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/86/27/a18e1613da66b3c9c7565c92457a60de15e824a6dd2ed9bce0fbfe615ded/anthropic-0.81.0-py3-none-any.whl", hash = "sha256:ac54407e9a1f9b35e6e6c86f75bf403f0e54d60944f99f15f685a38d6829f20b", size = 455627, upload-time = "2026-02-18T04:00:53.207Z" },
]
[[package]]
name = "anyio"
version = "4.12.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "idna" },
{ name = "typing-extensions", marker = "python_full_version < '3.13'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/96/f0/5eb65b2bb0d09ac6776f2eb54adee6abe8228ea05b20a5ad0e4945de8aac/anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703", size = 228685, upload-time = "2026-01-06T11:45:21.246Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" },
]
[[package]]
name = "attrs"
version = "26.1.0"
@ -221,6 +262,24 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/dc/c4/da7089cd7aa4ab554f56e18a7fb08dcfed8fd2ae91fa528f5b1be207a148/deepdiff-9.0.0-py3-none-any.whl", hash = "sha256:b1ae0dd86290d86a03de5fbee728fde43095c1472ae4974bdab23ab4656305bd", size = 170540, upload-time = "2026-03-30T05:52:22.008Z" },
]
[[package]]
name = "distro"
version = "1.9.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" },
]
[[package]]
name = "docstring-parser"
version = "0.17.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/b2/9d/c3b43da9515bd270df0f80548d9944e389870713cc1fe2b8fb35fe2bcefd/docstring_parser-0.17.0.tar.gz", hash = "sha256:583de4a309722b3315439bb31d64ba3eebada841f2e2cee23b99df001434c912", size = 27442, upload-time = "2025-07-21T07:35:01.868Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/55/e2/2537ebcff11c1ee1ff17d8d0b6f4db75873e3b0fb32c2d4a2ee31ecb310a/docstring_parser-0.17.0-py3-none-any.whl", hash = "sha256:cf2569abd23dce8099b300f9b4fa8191e9582dda731fd533daf54c4551658708", size = 36896, upload-time = "2025-07-21T07:35:00.684Z" },
]
[[package]]
name = "h11"
version = "0.16.0"
@ -230,6 +289,34 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" },
]
[[package]]
name = "httpcore"
version = "1.0.9"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "certifi" },
{ name = "h11" },
]
sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" },
]
[[package]]
name = "httpx"
version = "0.28.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "certifi" },
{ name = "httpcore" },
{ name = "idna" },
]
sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" },
]
[[package]]
name = "idna"
version = "3.11"
@ -248,6 +335,74 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
]
[[package]]
name = "jiter"
version = "0.13.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/0d/5e/4ec91646aee381d01cdb9974e30882c9cd3b8c5d1079d6b5ff4af522439a/jiter-0.13.0.tar.gz", hash = "sha256:f2839f9c2c7e2dffc1bc5929a510e14ce0a946be9365fd1219e7ef342dae14f4", size = 164847, upload-time = "2026-02-02T12:37:56.441Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/2e/30/7687e4f87086829955013ca12a9233523349767f69653ebc27036313def9/jiter-0.13.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0a2bd69fc1d902e89925fc34d1da51b2128019423d7b339a45d9e99c894e0663", size = 307958, upload-time = "2026-02-02T12:35:57.165Z" },
{ url = "https://files.pythonhosted.org/packages/c3/27/e57f9a783246ed95481e6749cc5002a8a767a73177a83c63ea71f0528b90/jiter-0.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f917a04240ef31898182f76a332f508f2cc4b57d2b4d7ad2dbfebbfe167eb505", size = 318597, upload-time = "2026-02-02T12:35:58.591Z" },
{ url = "https://files.pythonhosted.org/packages/cf/52/e5719a60ac5d4d7c5995461a94ad5ef962a37c8bf5b088390e6fad59b2ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1e2b199f446d3e82246b4fd9236d7cb502dc2222b18698ba0d986d2fecc6152", size = 348821, upload-time = "2026-02-02T12:36:00.093Z" },
{ url = "https://files.pythonhosted.org/packages/61/db/c1efc32b8ba4c740ab3fc2d037d8753f67685f475e26b9d6536a4322bcdd/jiter-0.13.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04670992b576fa65bd056dbac0c39fe8bd67681c380cb2b48efa885711d9d726", size = 364163, upload-time = "2026-02-02T12:36:01.937Z" },
{ url = "https://files.pythonhosted.org/packages/55/8a/fb75556236047c8806995671a18e4a0ad646ed255276f51a20f32dceaeec/jiter-0.13.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a1aff1fbdb803a376d4d22a8f63f8e7ccbce0b4890c26cc7af9e501ab339ef0", size = 483709, upload-time = "2026-02-02T12:36:03.41Z" },
{ url = "https://files.pythonhosted.org/packages/7e/16/43512e6ee863875693a8e6f6d532e19d650779d6ba9a81593ae40a9088ff/jiter-0.13.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b3fb8c2053acaef8580809ac1d1f7481a0a0bdc012fd7f5d8b18fb696a5a089", size = 370480, upload-time = "2026-02-02T12:36:04.791Z" },
{ url = "https://files.pythonhosted.org/packages/f8/4c/09b93e30e984a187bc8aaa3510e1ec8dcbdcd71ca05d2f56aac0492453aa/jiter-0.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdaba7d87e66f26a2c45d8cbadcbfc4bf7884182317907baf39cfe9775bb4d93", size = 360735, upload-time = "2026-02-02T12:36:06.994Z" },
{ url = "https://files.pythonhosted.org/packages/1a/1b/46c5e349019874ec5dfa508c14c37e29864ea108d376ae26d90bee238cd7/jiter-0.13.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7b88d649135aca526da172e48083da915ec086b54e8e73a425ba50999468cc08", size = 391814, upload-time = "2026-02-02T12:36:08.368Z" },
{ url = "https://files.pythonhosted.org/packages/15/9e/26184760e85baee7162ad37b7912797d2077718476bf91517641c92b3639/jiter-0.13.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e404ea551d35438013c64b4f357b0474c7abf9f781c06d44fcaf7a14c69ff9e2", size = 513990, upload-time = "2026-02-02T12:36:09.993Z" },
{ url = "https://files.pythonhosted.org/packages/e9/34/2c9355247d6debad57a0a15e76ab1566ab799388042743656e566b3b7de1/jiter-0.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1f4748aad1b4a93c8bdd70f604d0f748cdc0e8744c5547798acfa52f10e79228", size = 548021, upload-time = "2026-02-02T12:36:11.376Z" },
{ url = "https://files.pythonhosted.org/packages/ac/4a/9f2c23255d04a834398b9c2e0e665382116911dc4d06b795710503cdad25/jiter-0.13.0-cp312-cp312-win32.whl", hash = "sha256:0bf670e3b1445fc4d31612199f1744f67f889ee1bbae703c4b54dc097e5dd394", size = 203024, upload-time = "2026-02-02T12:36:12.682Z" },
{ url = "https://files.pythonhosted.org/packages/09/ee/f0ae675a957ae5a8f160be3e87acea6b11dc7b89f6b7ab057e77b2d2b13a/jiter-0.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:15db60e121e11fe186c0b15236bd5d18381b9ddacdcf4e659feb96fc6c969c92", size = 205424, upload-time = "2026-02-02T12:36:13.93Z" },
{ url = "https://files.pythonhosted.org/packages/1b/02/ae611edf913d3cbf02c97cdb90374af2082c48d7190d74c1111dde08bcdd/jiter-0.13.0-cp312-cp312-win_arm64.whl", hash = "sha256:41f92313d17989102f3cb5dd533a02787cdb99454d494344b0361355da52fcb9", size = 186818, upload-time = "2026-02-02T12:36:15.308Z" },
{ url = "https://files.pythonhosted.org/packages/91/9c/7ee5a6ff4b9991e1a45263bfc46731634c4a2bde27dfda6c8251df2d958c/jiter-0.13.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1f8a55b848cbabf97d861495cd65f1e5c590246fabca8b48e1747c4dfc8f85bf", size = 306897, upload-time = "2026-02-02T12:36:16.748Z" },
{ url = "https://files.pythonhosted.org/packages/7c/02/be5b870d1d2be5dd6a91bdfb90f248fbb7dcbd21338f092c6b89817c3dbf/jiter-0.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f556aa591c00f2c45eb1b89f68f52441a016034d18b65da60e2d2875bbbf344a", size = 317507, upload-time = "2026-02-02T12:36:18.351Z" },
{ url = "https://files.pythonhosted.org/packages/da/92/b25d2ec333615f5f284f3a4024f7ce68cfa0604c322c6808b2344c7f5d2b/jiter-0.13.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7e1d61da332ec412350463891923f960c3073cf1aae93b538f0bb4c8cd46efb", size = 350560, upload-time = "2026-02-02T12:36:19.746Z" },
{ url = "https://files.pythonhosted.org/packages/be/ec/74dcb99fef0aca9fbe56b303bf79f6bd839010cb18ad41000bf6cc71eec0/jiter-0.13.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3097d665a27bc96fd9bbf7f86178037db139f319f785e4757ce7ccbf390db6c2", size = 363232, upload-time = "2026-02-02T12:36:21.243Z" },
{ url = "https://files.pythonhosted.org/packages/1b/37/f17375e0bb2f6a812d4dd92d7616e41917f740f3e71343627da9db2824ce/jiter-0.13.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d01ecc3a8cbdb6f25a37bd500510550b64ddf9f7d64a107d92f3ccb25035d0f", size = 483727, upload-time = "2026-02-02T12:36:22.688Z" },
{ url = "https://files.pythonhosted.org/packages/77/d2/a71160a5ae1a1e66c1395b37ef77da67513b0adba73b993a27fbe47eb048/jiter-0.13.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed9bbc30f5d60a3bdf63ae76beb3f9db280d7f195dfcfa61af792d6ce912d159", size = 370799, upload-time = "2026-02-02T12:36:24.106Z" },
{ url = "https://files.pythonhosted.org/packages/01/99/ed5e478ff0eb4e8aa5fd998f9d69603c9fd3f32de3bd16c2b1194f68361c/jiter-0.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98fbafb6e88256f4454de33c1f40203d09fc33ed19162a68b3b257b29ca7f663", size = 359120, upload-time = "2026-02-02T12:36:25.519Z" },
{ url = "https://files.pythonhosted.org/packages/16/be/7ffd08203277a813f732ba897352797fa9493faf8dc7995b31f3d9cb9488/jiter-0.13.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5467696f6b827f1116556cb0db620440380434591e93ecee7fd14d1a491b6daa", size = 390664, upload-time = "2026-02-02T12:36:26.866Z" },
{ url = "https://files.pythonhosted.org/packages/d1/84/e0787856196d6d346264d6dcccb01f741e5f0bd014c1d9a2ebe149caf4f3/jiter-0.13.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2d08c9475d48b92892583df9da592a0e2ac49bcd41fae1fec4f39ba6cf107820", size = 513543, upload-time = "2026-02-02T12:36:28.217Z" },
{ url = "https://files.pythonhosted.org/packages/65/50/ecbd258181c4313cf79bca6c88fb63207d04d5bf5e4f65174114d072aa55/jiter-0.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:aed40e099404721d7fcaf5b89bd3b4568a4666358bcac7b6b15c09fb6252ab68", size = 547262, upload-time = "2026-02-02T12:36:29.678Z" },
{ url = "https://files.pythonhosted.org/packages/27/da/68f38d12e7111d2016cd198161b36e1f042bd115c169255bcb7ec823a3bf/jiter-0.13.0-cp313-cp313-win32.whl", hash = "sha256:36ebfbcffafb146d0e6ffb3e74d51e03d9c35ce7c625c8066cdbfc7b953bdc72", size = 200630, upload-time = "2026-02-02T12:36:31.808Z" },
{ url = "https://files.pythonhosted.org/packages/25/65/3bd1a972c9a08ecd22eb3b08a95d1941ebe6938aea620c246cf426ae09c2/jiter-0.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:8d76029f077379374cf0dbc78dbe45b38dec4a2eb78b08b5194ce836b2517afc", size = 202602, upload-time = "2026-02-02T12:36:33.679Z" },
{ url = "https://files.pythonhosted.org/packages/15/fe/13bd3678a311aa67686bb303654792c48206a112068f8b0b21426eb6851e/jiter-0.13.0-cp313-cp313-win_arm64.whl", hash = "sha256:bb7613e1a427cfcb6ea4544f9ac566b93d5bf67e0d48c787eca673ff9c9dff2b", size = 185939, upload-time = "2026-02-02T12:36:35.065Z" },
{ url = "https://files.pythonhosted.org/packages/49/19/a929ec002ad3228bc97ca01dbb14f7632fffdc84a95ec92ceaf4145688ae/jiter-0.13.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fa476ab5dd49f3bf3a168e05f89358c75a17608dbabb080ef65f96b27c19ab10", size = 316616, upload-time = "2026-02-02T12:36:36.579Z" },
{ url = "https://files.pythonhosted.org/packages/52/56/d19a9a194afa37c1728831e5fb81b7722c3de18a3109e8f282bfc23e587a/jiter-0.13.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade8cb6ff5632a62b7dbd4757d8c5573f7a2e9ae285d6b5b841707d8363205ef", size = 346850, upload-time = "2026-02-02T12:36:38.058Z" },
{ url = "https://files.pythonhosted.org/packages/36/4a/94e831c6bf287754a8a019cb966ed39ff8be6ab78cadecf08df3bb02d505/jiter-0.13.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9950290340acc1adaded363edd94baebcee7dabdfa8bee4790794cd5cfad2af6", size = 358551, upload-time = "2026-02-02T12:36:39.417Z" },
{ url = "https://files.pythonhosted.org/packages/a2/ec/a4c72c822695fa80e55d2b4142b73f0012035d9fcf90eccc56bc060db37c/jiter-0.13.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2b4972c6df33731aac0742b64fd0d18e0a69bc7d6e03108ce7d40c85fd9e3e6d", size = 201950, upload-time = "2026-02-02T12:36:40.791Z" },
{ url = "https://files.pythonhosted.org/packages/b6/00/393553ec27b824fbc29047e9c7cd4a3951d7fbe4a76743f17e44034fa4e4/jiter-0.13.0-cp313-cp313t-win_arm64.whl", hash = "sha256:701a1e77d1e593c1b435315ff625fd071f0998c5f02792038a5ca98899261b7d", size = 185852, upload-time = "2026-02-02T12:36:42.077Z" },
{ url = "https://files.pythonhosted.org/packages/6e/f5/f1997e987211f6f9bd71b8083047b316208b4aca0b529bb5f8c96c89ef3e/jiter-0.13.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:cc5223ab19fe25e2f0bf2643204ad7318896fe3729bf12fde41b77bfc4fafff0", size = 308804, upload-time = "2026-02-02T12:36:43.496Z" },
{ url = "https://files.pythonhosted.org/packages/cd/8f/5482a7677731fd44881f0204981ce2d7175db271f82cba2085dd2212e095/jiter-0.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9776ebe51713acf438fd9b4405fcd86893ae5d03487546dae7f34993217f8a91", size = 318787, upload-time = "2026-02-02T12:36:45.071Z" },
{ url = "https://files.pythonhosted.org/packages/f3/b9/7257ac59778f1cd025b26a23c5520a36a424f7f1b068f2442a5b499b7464/jiter-0.13.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:879e768938e7b49b5e90b7e3fecc0dbec01b8cb89595861fb39a8967c5220d09", size = 353880, upload-time = "2026-02-02T12:36:47.365Z" },
{ url = "https://files.pythonhosted.org/packages/c3/87/719eec4a3f0841dad99e3d3604ee4cba36af4419a76f3cb0b8e2e691ad67/jiter-0.13.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:682161a67adea11e3aae9038c06c8b4a9a71023228767477d683f69903ebc607", size = 366702, upload-time = "2026-02-02T12:36:48.871Z" },
{ url = "https://files.pythonhosted.org/packages/d2/65/415f0a75cf6921e43365a1bc227c565cb949caca8b7532776e430cbaa530/jiter-0.13.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a13b68cd1cd8cc9de8f244ebae18ccb3e4067ad205220ef324c39181e23bbf66", size = 486319, upload-time = "2026-02-02T12:36:53.006Z" },
{ url = "https://files.pythonhosted.org/packages/54/a2/9e12b48e82c6bbc6081fd81abf915e1443add1b13d8fc586e1d90bb02bb8/jiter-0.13.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87ce0f14c6c08892b610686ae8be350bf368467b6acd5085a5b65441e2bf36d2", size = 372289, upload-time = "2026-02-02T12:36:54.593Z" },
{ url = "https://files.pythonhosted.org/packages/4e/c1/e4693f107a1789a239c759a432e9afc592366f04e901470c2af89cfd28e1/jiter-0.13.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c365005b05505a90d1c47856420980d0237adf82f70c4aff7aebd3c1cc143ad", size = 360165, upload-time = "2026-02-02T12:36:56.112Z" },
{ url = "https://files.pythonhosted.org/packages/17/08/91b9ea976c1c758240614bd88442681a87672eebc3d9a6dde476874e706b/jiter-0.13.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1317fdffd16f5873e46ce27d0e0f7f4f90f0cdf1d86bf6abeaea9f63ca2c401d", size = 389634, upload-time = "2026-02-02T12:36:57.495Z" },
{ url = "https://files.pythonhosted.org/packages/18/23/58325ef99390d6d40427ed6005bf1ad54f2577866594bcf13ce55675f87d/jiter-0.13.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c05b450d37ba0c9e21c77fef1f205f56bcee2330bddca68d344baebfc55ae0df", size = 514933, upload-time = "2026-02-02T12:36:58.909Z" },
{ url = "https://files.pythonhosted.org/packages/5b/25/69f1120c7c395fd276c3996bb8adefa9c6b84c12bb7111e5c6ccdcd8526d/jiter-0.13.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:775e10de3849d0631a97c603f996f518159272db00fdda0a780f81752255ee9d", size = 548842, upload-time = "2026-02-02T12:37:00.433Z" },
{ url = "https://files.pythonhosted.org/packages/18/05/981c9669d86850c5fbb0d9e62bba144787f9fba84546ba43d624ee27ef29/jiter-0.13.0-cp314-cp314-win32.whl", hash = "sha256:632bf7c1d28421c00dd8bbb8a3bac5663e1f57d5cd5ed962bce3c73bf62608e6", size = 202108, upload-time = "2026-02-02T12:37:01.718Z" },
{ url = "https://files.pythonhosted.org/packages/8d/96/cdcf54dd0b0341db7d25413229888a346c7130bd20820530905fdb65727b/jiter-0.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:f22ef501c3f87ede88f23f9b11e608581c14f04db59b6a801f354397ae13739f", size = 204027, upload-time = "2026-02-02T12:37:03.075Z" },
{ url = "https://files.pythonhosted.org/packages/fb/f9/724bcaaab7a3cd727031fe4f6995cb86c4bd344909177c186699c8dec51a/jiter-0.13.0-cp314-cp314-win_arm64.whl", hash = "sha256:07b75fe09a4ee8e0c606200622e571e44943f47254f95e2436c8bdcaceb36d7d", size = 187199, upload-time = "2026-02-02T12:37:04.414Z" },
{ url = "https://files.pythonhosted.org/packages/62/92/1661d8b9fd6a3d7a2d89831db26fe3c1509a287d83ad7838831c7b7a5c7e/jiter-0.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:964538479359059a35fb400e769295d4b315ae61e4105396d355a12f7fef09f0", size = 318423, upload-time = "2026-02-02T12:37:05.806Z" },
{ url = "https://files.pythonhosted.org/packages/4f/3b/f77d342a54d4ebcd128e520fc58ec2f5b30a423b0fd26acdfc0c6fef8e26/jiter-0.13.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e104da1db1c0991b3eaed391ccd650ae8d947eab1480c733e5a3fb28d4313e40", size = 351438, upload-time = "2026-02-02T12:37:07.189Z" },
{ url = "https://files.pythonhosted.org/packages/76/b3/ba9a69f0e4209bd3331470c723c2f5509e6f0482e416b612431a5061ed71/jiter-0.13.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e3a5f0cde8ff433b8e88e41aa40131455420fb3649a3c7abdda6145f8cb7202", size = 364774, upload-time = "2026-02-02T12:37:08.579Z" },
{ url = "https://files.pythonhosted.org/packages/b3/16/6cdb31fa342932602458dbb631bfbd47f601e03d2e4950740e0b2100b570/jiter-0.13.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57aab48f40be1db920a582b30b116fe2435d184f77f0e4226f546794cedd9cf0", size = 487238, upload-time = "2026-02-02T12:37:10.066Z" },
{ url = "https://files.pythonhosted.org/packages/ed/b1/956cc7abaca8d95c13aa8d6c9b3f3797241c246cd6e792934cc4c8b250d2/jiter-0.13.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7772115877c53f62beeb8fd853cab692dbc04374ef623b30f997959a4c0e7e95", size = 372892, upload-time = "2026-02-02T12:37:11.656Z" },
{ url = "https://files.pythonhosted.org/packages/26/c4/97ecde8b1e74f67b8598c57c6fccf6df86ea7861ed29da84629cdbba76c4/jiter-0.13.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1211427574b17b633cfceba5040de8081e5abf114f7a7602f73d2e16f9fdaa59", size = 360309, upload-time = "2026-02-02T12:37:13.244Z" },
{ url = "https://files.pythonhosted.org/packages/4b/d7/eabe3cf46715854ccc80be2cd78dd4c36aedeb30751dbf85a1d08c14373c/jiter-0.13.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7beae3a3d3b5212d3a55d2961db3c292e02e302feb43fce6a3f7a31b90ea6dfe", size = 389607, upload-time = "2026-02-02T12:37:14.881Z" },
{ url = "https://files.pythonhosted.org/packages/df/2d/03963fc0804e6109b82decfb9974eb92df3797fe7222428cae12f8ccaa0c/jiter-0.13.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e5562a0f0e90a6223b704163ea28e831bd3a9faa3512a711f031611e6b06c939", size = 514986, upload-time = "2026-02-02T12:37:16.326Z" },
{ url = "https://files.pythonhosted.org/packages/f6/6c/8c83b45eb3eb1c1e18d841fe30b4b5bc5619d781267ca9bc03e005d8fd0a/jiter-0.13.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:6c26a424569a59140fb51160a56df13f438a2b0967365e987889186d5fc2f6f9", size = 548756, upload-time = "2026-02-02T12:37:17.736Z" },
{ url = "https://files.pythonhosted.org/packages/47/66/eea81dfff765ed66c68fd2ed8c96245109e13c896c2a5015c7839c92367e/jiter-0.13.0-cp314-cp314t-win32.whl", hash = "sha256:24dc96eca9f84da4131cdf87a95e6ce36765c3b156fc9ae33280873b1c32d5f6", size = 201196, upload-time = "2026-02-02T12:37:19.101Z" },
{ url = "https://files.pythonhosted.org/packages/ff/32/4ac9c7a76402f8f00d00842a7f6b83b284d0cf7c1e9d4227bc95aa6d17fa/jiter-0.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0a8d76c7524087272c8ae913f5d9d608bd839154b62c4322ef65723d2e5bb0b8", size = 204215, upload-time = "2026-02-02T12:37:20.495Z" },
{ url = "https://files.pythonhosted.org/packages/f9/8e/7def204fea9f9be8b3c21a6f2dd6c020cf56c7d5ff753e0e23ed7f9ea57e/jiter-0.13.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2c26cf47e2cad140fa23b6d58d435a7c0161f5c514284802f25e87fddfe11024", size = 187152, upload-time = "2026-02-02T12:37:22.124Z" },
{ url = "https://files.pythonhosted.org/packages/80/60/e50fa45dd7e2eae049f0ce964663849e897300433921198aef94b6ffa23a/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:3d744a6061afba08dd7ae375dcde870cffb14429b7477e10f67e9e6d68772a0a", size = 305169, upload-time = "2026-02-02T12:37:50.376Z" },
{ url = "https://files.pythonhosted.org/packages/d2/73/a009f41c5eed71c49bec53036c4b33555afcdee70682a18c6f66e396c039/jiter-0.13.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:ff732bd0a0e778f43d5009840f20b935e79087b4dc65bd36f1cd0f9b04b8ff7f", size = 303808, upload-time = "2026-02-02T12:37:52.092Z" },
{ url = "https://files.pythonhosted.org/packages/c4/10/528b439290763bff3d939268085d03382471b442f212dca4ff5f12802d43/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab44b178f7981fcaea7e0a5df20e773c663d06ffda0198f1a524e91b2fde7e59", size = 337384, upload-time = "2026-02-02T12:37:53.582Z" },
{ url = "https://files.pythonhosted.org/packages/67/8a/a342b2f0251f3dac4ca17618265d93bf244a2a4d089126e81e4c1056ac50/jiter-0.13.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bb00b6d26db67a05fe3e12c76edc75f32077fb51deed13822dc648fa373bc19", size = 343768, upload-time = "2026-02-02T12:37:55.055Z" },
]
[[package]]
name = "markupsafe"
version = "3.0.3"
@ -311,6 +466,25 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/70/bc/6f1c2f612465f5fa89b95bead1f44dcb607670fd42891d8fdcd5d039f4f4/markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa", size = 14146, upload-time = "2025-09-27T18:37:28.327Z" },
]
[[package]]
name = "openai"
version = "2.21.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "distro" },
{ name = "httpx" },
{ name = "jiter" },
{ name = "pydantic" },
{ name = "sniffio" },
{ name = "tqdm" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/92/e5/3d197a0947a166649f566706d7a4c8f7fe38f1fa7b24c9bcffe4c7591d44/openai-2.21.0.tar.gz", hash = "sha256:81b48ce4b8bbb2cc3af02047ceb19561f7b1dc0d4e52d1de7f02abfd15aa59b7", size = 644374, upload-time = "2026-02-14T00:12:01.577Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/cc/56/0a89092a453bb2c676d66abee44f863e742b2110d4dbb1dbcca3f7e5fc33/openai-2.21.0-py3-none-any.whl", hash = "sha256:0bc1c775e5b1536c294eded39ee08f8407656537ccc71b1004104fe1602e267c", size = 1103065, upload-time = "2026-02-14T00:11:59.603Z" },
]
[[package]]
name = "orderly-set"
version = "5.5.0"
@ -346,7 +520,9 @@ name = "plano-mock-tests"
version = "0.0.1"
source = { editable = "." }
dependencies = [
{ name = "anthropic" },
{ name = "deepdiff" },
{ name = "openai" },
{ name = "pytest" },
{ name = "pytest-httpserver" },
{ name = "pytest-retry" },
@ -362,7 +538,9 @@ dev = [
[package.metadata]
requires-dist = [
{ name = "anthropic", specifier = ">=0.66.0" },
{ name = "deepdiff", specifier = ">=8.0.1" },
{ name = "openai", specifier = ">=1.0.0" },
{ name = "pytest", specifier = ">=8.3.3" },
{ name = "pytest-cov", marker = "extra == 'dev'", specifier = ">=4.1.0" },
{ name = "pytest-httpserver", specifier = ">=1.1.0" },
@ -391,6 +569,92 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/0c/c3/44f3fbbfa403ea2a7c779186dc20772604442dde72947e7d01069cbe98e3/pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992", size = 48172, upload-time = "2026-01-21T14:26:50.693Z" },
]
[[package]]
name = "pydantic"
version = "2.12.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "annotated-types" },
{ name = "pydantic-core" },
{ name = "typing-extensions" },
{ name = "typing-inspection" },
]
sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" },
]
[[package]]
name = "pydantic-core"
version = "2.41.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" },
{ url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" },
{ url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" },
{ url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" },
{ url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" },
{ url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" },
{ url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" },
{ url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" },
{ url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" },
{ url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" },
{ url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" },
{ url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" },
{ url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" },
{ url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" },
{ url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" },
{ url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" },
{ url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" },
{ url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" },
{ url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" },
{ url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" },
{ url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" },
{ url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" },
{ url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" },
{ url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" },
{ url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" },
{ url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" },
{ url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" },
{ url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" },
{ url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" },
{ url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" },
{ url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" },
{ url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" },
{ url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" },
{ url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" },
{ url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" },
{ url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" },
{ url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" },
{ url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" },
{ url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" },
{ url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" },
{ url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" },
{ url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" },
{ url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" },
{ url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" },
{ url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" },
{ url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" },
{ url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" },
{ url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" },
{ url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" },
{ url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" },
{ url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" },
{ url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" },
{ url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" },
{ url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" },
{ url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" },
{ url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" },
{ url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" },
{ url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" },
{ url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" },
{ url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" },
]
[[package]]
name = "pygments"
version = "2.20.0"
@ -535,6 +799,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/33/d1/8bb87d21e9aeb323cc03034f5eaf2c8f69841e40e4853c2627edf8111ed3/termcolor-3.3.0-py3-none-any.whl", hash = "sha256:cf642efadaf0a8ebbbf4bc7a31cec2f9b5f21a9f726f4ccbb08192c9c26f43a5", size = 7734, upload-time = "2025-12-29T12:55:20.718Z" },
]
[[package]]
name = "tqdm"
version = "4.67.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/09/a9/6ba95a270c6f1fbcd8dac228323f2777d886cb206987444e4bce66338dd4/tqdm-4.67.3.tar.gz", hash = "sha256:7d825f03f89244ef73f1d4ce193cb1774a8179fd96f31d7e1dcde62092b960bb", size = 169598, upload-time = "2026-02-03T17:35:53.048Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/16/e1/3079a9ff9b8e11b846c6ac5c8b5bfb7ff225eee721825310c91b3b50304f/tqdm-4.67.3-py3-none-any.whl", hash = "sha256:ee1e4c0e59148062281c49d80b25b67771a127c85fc9676d3be5f243206826bf", size = 78374, upload-time = "2026-02-03T17:35:50.982Z" },
]
[[package]]
name = "trio"
version = "0.33.0"
@ -575,6 +851,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
]
[[package]]
name = "typing-inspection"
version = "0.4.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" },
]
[[package]]
name = "urllib3"
version = "2.6.3"

View file

@ -120,7 +120,8 @@ def test_openai_client_with_alias_arch_summarize_v1():
response_content = completion.choices[0].message.content
logger.info(f"Response from arch.summarize.v1 alias: {response_content}")
assert response_content == "Hello from alias arch.summarize.v1!"
assert response_content is not None
assert len(response_content) > 0
def test_openai_client_with_alias_arch_v1():
@ -146,7 +147,8 @@ def test_openai_client_with_alias_arch_v1():
response_content = completion.choices[0].message.content
logger.info(f"Response from arch.v1 alias: {response_content}")
assert response_content == "Hello from alias arch.v1!"
assert response_content is not None
assert len(response_content) > 0
def test_anthropic_client_with_alias_arch_summarize_v1():
@ -171,7 +173,8 @@ def test_anthropic_client_with_alias_arch_summarize_v1():
logger.info(
f"Response from arch.summarize.v1 alias via Anthropic: {response_content}"
)
assert response_content == "Hello from alias arch.summarize.v1 via Anthropic!"
assert response_content is not None
assert len(response_content) > 0
def test_anthropic_client_with_alias_arch_v1():
@ -194,7 +197,8 @@ def test_anthropic_client_with_alias_arch_v1():
response_content = "".join(b.text for b in message.content if b.type == "text")
logger.info(f"Response from arch.v1 alias via Anthropic: {response_content}")
assert response_content == "Hello from alias arch.v1 via Anthropic!"
assert response_content is not None
assert len(response_content) > 0
def test_openai_client_with_alias_streaming():
@ -228,7 +232,8 @@ def test_openai_client_with_alias_streaming():
full_content = "".join(content_chunks)
logger.info(f"Streaming response from arch.summarize.v1 alias: {full_content}")
assert full_content == "Hello from streaming alias!"
assert full_content is not None
assert len(full_content) > 0
def test_anthropic_client_with_alias_streaming():
@ -256,7 +261,8 @@ def test_anthropic_client_with_alias_streaming():
logger.info(
f"Streaming response from arch.summarize.v1 alias via Anthropic: {full_text}"
)
assert full_text == "Hello from streaming alias via Anthropic!"
assert full_text is not None
assert len(full_text) > 0
def test_400_error_handling_with_alias():
@ -400,7 +406,8 @@ def test_direct_model_4o_mini_openai():
response_content = completion.choices[0].message.content
logger.info(f"Response from direct 4o-mini: {response_content}")
assert response_content == "Hello from direct 4o-mini!"
assert response_content is not None
assert len(response_content) > 0
def test_direct_model_4o_mini_anthropic():
@ -423,7 +430,8 @@ def test_direct_model_4o_mini_anthropic():
response_content = "".join(b.text for b in message.content if b.type == "text")
logger.info(f"Response from direct 4o-mini via Anthropic: {response_content}")
assert response_content == "Hello from direct 4o-mini via Anthropic!"
assert response_content is not None
assert len(response_content) > 0
def test_anthropic_thinking_mode_streaming():

View file

@ -405,7 +405,8 @@ def test_claude_v1_messages_api():
],
)
assert message.content[0].text == "Hello from Claude!"
assert message.content[0].text is not None
assert len(message.content[0].text) > 0
def test_claude_v1_messages_api_streaming():
@ -432,8 +433,10 @@ def test_claude_v1_messages_api_streaming():
# A safe way to reassemble text from the content blocks:
final_text = "".join(b.text for b in final.content if b.type == "text")
assert full_text == "Hello from Claude!"
assert final_text == "Hello from Claude!"
assert full_text is not None
assert len(full_text) > 0
assert final_text is not None
assert len(final_text) > 0
def test_anthropic_client_with_openai_model_streaming():
@ -463,8 +466,10 @@ def test_anthropic_client_with_openai_model_streaming():
# A safe way to reassemble text from the content blocks:
final_text = "".join(b.text for b in final.content if b.type == "text")
assert full_text == "Hello from ChatGPT!"
assert final_text == "Hello from ChatGPT!"
assert full_text is not None
assert len(full_text) > 0
assert final_text is not None
assert len(final_text) > 0
def test_openai_gpt4o_mini_v1_messages_api():
@ -488,7 +493,8 @@ def test_openai_gpt4o_mini_v1_messages_api():
],
)
assert completion.choices[0].message.content == "Hello from GPT-4o-mini!"
assert completion.choices[0].message.content is not None
assert len(completion.choices[0].message.content) > 0
def test_openai_gpt4o_mini_v1_messages_api_streaming():
@ -521,7 +527,8 @@ def test_openai_gpt4o_mini_v1_messages_api_streaming():
# Reconstruct the full message
full_content = "".join(content_chunks)
assert full_content == "Hello from GPT-4o-mini!"
assert full_content is not None
assert len(full_content) > 0
def test_openai_client_with_claude_model_streaming():