add support for v1/messages and transformations (#558)

* pushing draft PR

* transformations are working. Now need to add some tests next

* updated tests and added necessary response transformations for Anthropics' message response object

* fixed bugs for integration tests

* fixed doc tests

* fixed serialization issues with enums on response

* adding some debug logs to help

* fixed issues with non-streaming responses

* updated the stream_context to update response bytes

* the serialized bytes length must be set in the response side

* fixed the debug statement that was causing the integration tests for wasm to fail

* fixing json parsing errors

* intentionally removing the headers

* making sure that we convert the raw bytes to the correct provider type upstream

* fixing non-streaming responses to tranform correctly

* /v1/messages works with transformations to and from /v1/chat/completions

* updating the CLI and demos to support anthropic vs. claude

* adding the anthropic key to the preference based routing tests

* fixed test cases and added more structured logs

* fixed integration tests and cleaned up logs

* added python client tests for anthropic and openai

* cleaned up logs and fixed issue with connectivity for llm gateway in weather forecast demo

* fixing the tests. python dependency order was broken

* updated the openAI client to fix demos

* removed the raw response debug statement

* fixed the dup cloning issue and cleaned up the ProviderRequestType enum and traits

* fixing logs

* moved away from string literals to consts

* fixed streaming from Anthropic Client to OpenAI

* removed debug statement that would likely trip up integration tests

* fixed integration tests for llm_gateway

* cleaned up test cases and removed unnecessary crates

* fixing comments from PR

* fixed bug whereby we were sending an OpenAIChatCompletions request object to llm_gateway even though the request may have been AnthropicMessages

---------

Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-4.local>
Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-9.local>
Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-10.local>
Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-41.local>
Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-136.local>
This commit is contained in:
Salman Paracha 2025-09-10 07:40:30 -07:00 committed by GitHub
parent bb71d041a0
commit fb0581fd39
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
38 changed files with 2842 additions and 919 deletions

View file

@ -1,110 +1,9 @@
pub mod anthropic;
pub mod openai;
// Re-export all types for convenience
pub use anthropic::*;
pub use openai::*;
/// Common trait that all API definitions must implement
///
/// This trait ensures consistency across different AI provider API definitions
/// and makes it easy to add new providers like Gemini, Claude, etc.
///
/// Note: This is different from the `ApiProvider` enum in `clients::endpoints`
/// which represents provider identification, while this trait defines API capabilities.
///
/// # Benefits
///
/// - **Consistency**: All API providers implement the same interface
/// - **Extensibility**: Easy to add new providers without breaking existing code
/// - **Type Safety**: Compile-time guarantees that all providers implement required methods
/// - **Discoverability**: Clear documentation of what capabilities each API supports
///
/// # Example implementation for a new provider:
///
/// ```rust,ignore
/// use serde::{Deserialize, Serialize};
/// use super::ApiDefinition;
///
/// #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
/// pub enum GeminiApi {
/// GenerateContent,
/// ChatCompletions,
/// }
///
/// impl GeminiApi {
/// pub fn endpoint(&self) -> &'static str {
/// match self {
/// GeminiApi::GenerateContent => "/v1/models/gemini-pro:generateContent",
/// GeminiApi::ChatCompletions => "/v1/models/gemini-pro:chat",
/// }
/// }
///
/// pub fn from_endpoint(endpoint: &str) -> Option<Self> {
/// match endpoint {
/// "/v1/models/gemini-pro:generateContent" => Some(GeminiApi::GenerateContent),
/// "/v1/models/gemini-pro:chat" => Some(GeminiApi::ChatCompletions),
/// _ => None,
/// }
/// }
///
/// pub fn supports_streaming(&self) -> bool {
/// match self {
/// GeminiApi::GenerateContent => true,
/// GeminiApi::ChatCompletions => true,
/// }
/// }
///
/// pub fn supports_tools(&self) -> bool {
/// match self {
/// GeminiApi::GenerateContent => true,
/// GeminiApi::ChatCompletions => false,
/// }
/// }
///
/// pub fn supports_vision(&self) -> bool {
/// match self {
/// GeminiApi::GenerateContent => true,
/// GeminiApi::ChatCompletions => false,
/// }
/// }
/// }
///
/// impl ApiDefinition for GeminiApi {
/// fn endpoint(&self) -> &'static str {
/// self.endpoint()
/// }
///
/// fn from_endpoint(endpoint: &str) -> Option<Self> {
/// Self::from_endpoint(endpoint)
/// }
///
/// fn supports_streaming(&self) -> bool {
/// self.supports_streaming()
/// }
///
/// fn supports_tools(&self) -> bool {
/// self.supports_tools()
/// }
///
/// fn supports_vision(&self) -> bool {
/// self.supports_vision()
/// }
/// }
///
/// // Now you can use generic code that works with any API:
/// fn print_api_info<T: ApiDefinition>(api: &T) {
/// println!("Endpoint: {}", api.endpoint());
/// println!("Supports streaming: {}", api.supports_streaming());
/// println!("Supports tools: {}", api.supports_tools());
/// println!("Supports vision: {}", api.supports_vision());
/// }
///
/// // Works with both OpenAI and Anthropic (and future Gemini)
/// print_api_info(&OpenAIApi::ChatCompletions);
/// print_api_info(&AnthropicApi::Messages);
/// print_api_info(&GeminiApi::GenerateContent);
/// ```
pub trait ApiDefinition {
/// Returns the endpoint path for this API
fn endpoint(&self) -> &'static str;
@ -132,6 +31,7 @@ pub trait ApiDefinition {
#[cfg(test)]
mod tests {
use super::*;
use crate::{CHAT_COMPLETIONS_PATH, MESSAGES_PATH};
#[test]
fn test_generic_api_functionality() {
@ -150,8 +50,8 @@ mod tests {
fn test_api_detection_from_endpoints() {
// Test that we can detect APIs from endpoints using the trait
let endpoints = vec![
"/v1/chat/completions",
"/v1/messages",
CHAT_COMPLETIONS_PATH,
MESSAGES_PATH,
"/v1/unknown"
];