Add mock-based E2E tests and gate live tests to main/nightly

Introduce a new mock-based E2E test suite that uses pytest_httpserver to
simulate LLM provider responses, eliminating the need for real API keys
on PR builds. The mock tests cover model alias routing, protocol
transformation (OpenAI↔Anthropic), Responses API passthrough/translation,
streaming, tool calls, thinking mode, and multi-turn state management.

CI changes:
- Add mock-e2e-tests job (zero secrets, runs on every PR)
- Gate all live E2E jobs to main pushes + nightly schedule
- Scope secrets to only the keys each job actually needs
- Add daily cron schedule for full live test coverage

Also relaxes exact-match assertions in live e2e tests to structural
checks (non-null, non-empty) since LLM output is non-deterministic.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Adil Hafeez 2026-02-18 19:33:48 +00:00
parent baeee56f6b
commit 3a6a672c9d
11 changed files with 1758 additions and 43 deletions

View file

@ -405,7 +405,8 @@ def test_claude_v1_messages_api():
],
)
assert message.content[0].text == "Hello from Claude!"
assert message.content[0].text is not None
assert len(message.content[0].text) > 0
def test_claude_v1_messages_api_streaming():
@ -432,8 +433,10 @@ def test_claude_v1_messages_api_streaming():
# A safe way to reassemble text from the content blocks:
final_text = "".join(b.text for b in final.content if b.type == "text")
assert full_text == "Hello from Claude!"
assert final_text == "Hello from Claude!"
assert full_text is not None
assert len(full_text) > 0
assert final_text is not None
assert len(final_text) > 0
def test_anthropic_client_with_openai_model_streaming():
@ -463,8 +466,10 @@ def test_anthropic_client_with_openai_model_streaming():
# A safe way to reassemble text from the content blocks:
final_text = "".join(b.text for b in final.content if b.type == "text")
assert full_text == "Hello from ChatGPT!"
assert final_text == "Hello from ChatGPT!"
assert full_text is not None
assert len(full_text) > 0
assert final_text is not None
assert len(final_text) > 0
def test_openai_gpt4o_mini_v1_messages_api():
@ -488,7 +493,8 @@ def test_openai_gpt4o_mini_v1_messages_api():
],
)
assert completion.choices[0].message.content == "Hello from GPT-4o-mini!"
assert completion.choices[0].message.content is not None
assert len(completion.choices[0].message.content) > 0
def test_openai_gpt4o_mini_v1_messages_api_streaming():
@ -521,7 +527,8 @@ def test_openai_gpt4o_mini_v1_messages_api_streaming():
# Reconstruct the full message
full_content = "".join(content_chunks)
assert full_content == "Hello from GPT-4o-mini!"
assert full_content is not None
assert len(full_content) > 0
def test_openai_client_with_claude_model_streaming():