add support for v1/messages and transformations (#558)

* pushing draft PR

* transformations are working. Now need to add some tests next

* updated tests and added necessary response transformations for Anthropics' message response object

* fixed bugs for integration tests

* fixed doc tests

* fixed serialization issues with enums on response

* adding some debug logs to help

* fixed issues with non-streaming responses

* updated the stream_context to update response bytes

* the serialized bytes length must be set in the response side

* fixed the debug statement that was causing the integration tests for wasm to fail

* fixing json parsing errors

* intentionally removing the headers

* making sure that we convert the raw bytes to the correct provider type upstream

* fixing non-streaming responses to tranform correctly

* /v1/messages works with transformations to and from /v1/chat/completions

* updating the CLI and demos to support anthropic vs. claude

* adding the anthropic key to the preference based routing tests

* fixed test cases and added more structured logs

* fixed integration tests and cleaned up logs

* added python client tests for anthropic and openai

* cleaned up logs and fixed issue with connectivity for llm gateway in weather forecast demo

* fixing the tests. python dependency order was broken

* updated the openAI client to fix demos

* removed the raw response debug statement

* fixed the dup cloning issue and cleaned up the ProviderRequestType enum and traits

* fixing logs

* moved away from string literals to consts

* fixed streaming from Anthropic Client to OpenAI

* removed debug statement that would likely trip up integration tests

* fixed integration tests for llm_gateway

* cleaned up test cases and removed unnecessary crates

* fixing comments from PR

* fixed bug whereby we were sending an OpenAIChatCompletions request object to llm_gateway even though the request may have been AnthropicMessages

---------

Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-4.local>
Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-9.local>
Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-10.local>
Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-41.local>
Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-136.local>
This commit is contained in:
Salman Paracha 2025-09-10 07:40:30 -07:00 committed by GitHub
parent bb71d041a0
commit fb0581fd39
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
38 changed files with 2842 additions and 919 deletions

View file

@ -3,9 +3,12 @@ import pytest
import requests
from deepdiff import DeepDiff
import re
import anthropic
import openai
from common import (
PROMPT_GATEWAY_ENDPOINT,
LLM_GATEWAY_ENDPOINT,
PREFILL_LIST,
get_arch_messages,
get_data_chunks,
@ -352,3 +355,178 @@ def test_prompt_gateway_prompt_guard_jailbreak(stream):
response_json.get("choices")[0]["message"]["content"]
== "Looks like you're curious about my abilities, but I can only provide assistance for weather forecasting."
)
def test_claude_v1_messages_api():
"""Test Claude client using /v1/messages API through llm_gateway (port 12000)"""
# Get the base URL from the LLM gateway endpoint
base_url = LLM_GATEWAY_ENDPOINT.replace("/v1/chat/completions", "")
client = anthropic.Anthropic(
api_key="test-key", base_url=base_url # Dummy key for testing
)
message = client.messages.create(
model="claude-sonnet-4-20250514", # Use working model from smoke test
max_tokens=50,
messages=[
{
"role": "user",
"content": "Hello, please respond with exactly: Hello from Claude!",
}
],
)
assert message.content[0].text == "Hello from Claude!"
def test_claude_v1_messages_api_streaming():
base_url = LLM_GATEWAY_ENDPOINT.replace("/v1/chat/completions", "")
client = anthropic.Anthropic(api_key="test-key", base_url=base_url)
with client.messages.stream(
model="claude-sonnet-4-20250514",
max_tokens=50,
messages=[
{
"role": "user",
"content": "Hello, please respond with exactly: Hello from Claude!",
}
],
) as stream:
# This yields only text deltas in order
pieces = [t for t in stream.text_stream]
full_text = "".join(pieces)
# You can also get the fully-assembled Message object
final = stream.get_final_message()
# A safe way to reassemble text from the content blocks:
final_text = "".join(b.text for b in final.content if b.type == "text")
assert full_text == "Hello from Claude!"
assert final_text == "Hello from Claude!"
def test_anthropic_client_with_openai_model_streaming():
"""Test Anthropic client using /v1/messages API with OpenAI model (gpt-4o-mini)
This tests the transformation: OpenAI upstream -> Anthropic client format with proper event lines
"""
base_url = LLM_GATEWAY_ENDPOINT.replace("/v1/chat/completions", "")
client = anthropic.Anthropic(api_key="test-key", base_url=base_url)
with client.messages.stream(
model="gpt-4o-mini", # OpenAI model via Anthropic client
max_tokens=50,
messages=[
{
"role": "user",
"content": "Hello, please respond with exactly: Hello from GPT-4o-mini via Anthropic!",
}
],
) as stream:
# This yields only text deltas in order
pieces = [t for t in stream.text_stream]
full_text = "".join(pieces)
# You can also get the fully-assembled Message object
final = stream.get_final_message()
# A safe way to reassemble text from the content blocks:
final_text = "".join(b.text for b in final.content if b.type == "text")
assert full_text == "Hello from GPT-4o-mini via Anthropic!"
assert final_text == "Hello from GPT-4o-mini via Anthropic!"
def test_openai_gpt4o_mini_v1_messages_api():
"""Test OpenAI GPT-4o-mini using /v1/chat/completions API through llm_gateway (port 12000)"""
# Get the base URL from the LLM gateway endpoint
base_url = LLM_GATEWAY_ENDPOINT.replace("/v1/chat/completions", "")
client = openai.OpenAI(
api_key="test-key", # Dummy key for testing
base_url=f"{base_url}/v1", # OpenAI needs /v1 suffix in base_url
)
completion = client.chat.completions.create(
model="gpt-4o-mini",
max_tokens=50,
messages=[
{
"role": "user",
"content": "Hello, please respond with exactly: Hello from GPT-4o-mini!",
}
],
)
assert completion.choices[0].message.content == "Hello from GPT-4o-mini!"
def test_openai_gpt4o_mini_v1_messages_api_streaming():
"""Test OpenAI GPT-4o-mini using /v1/chat/completions API with streaming through llm_gateway (port 12000)"""
# Get the base URL from the LLM gateway endpoint
base_url = LLM_GATEWAY_ENDPOINT.replace("/v1/chat/completions", "")
client = openai.OpenAI(
api_key="test-key", # Dummy key for testing
base_url=f"{base_url}/v1", # OpenAI needs /v1 suffix in base_url
)
stream = client.chat.completions.create(
model="gpt-4o-mini",
max_tokens=50,
messages=[
{
"role": "user",
"content": "Hello, please respond with exactly: Hello from GPT-4o-mini!",
}
],
stream=True,
)
# Collect all the streaming chunks
content_chunks = []
for chunk in stream:
if chunk.choices[0].delta.content:
content_chunks.append(chunk.choices[0].delta.content)
# Reconstruct the full message
full_content = "".join(content_chunks)
assert full_content == "Hello from GPT-4o-mini!"
def test_openai_client_with_claude_model_streaming():
"""Test OpenAI client using /v1/chat/completions API with Claude model (claude-sonnet-4-20250514)
This tests the transformation: Anthropic upstream -> OpenAI client format with proper chunk handling
"""
# Get the base URL from the LLM gateway endpoint
base_url = LLM_GATEWAY_ENDPOINT.replace("/v1/chat/completions", "")
client = openai.OpenAI(
api_key="test-key", # Dummy key for testing
base_url=f"{base_url}/v1", # OpenAI needs /v1 suffix in base_url
)
stream = client.chat.completions.create(
model="claude-sonnet-4-20250514", # Claude model via OpenAI client
max_tokens=50,
messages=[
{
"role": "user",
"content": "Who are you? ALWAYS RESPOND WITH:I appreciate the request, but I should clarify that I'm Claude, made by Anthropic, not OpenAI. I don't want to create confusion about my origins.",
}
],
stream=True,
temperature=0.1,
)
# Collect all the streaming chunks
content_chunks = []
for chunk in stream:
if chunk.choices[0].delta.content:
content_chunks.append(chunk.choices[0].delta.content)
# Reconstruct the full message
full_content = "".join(content_chunks)
assert full_content is not None