Use mcp tools for filter chain (#621)

* agents framework demo

* more changes

* add more changes

* pending changes

* fix tests

* fix more

* rebase with main and better handle error from mcp

* add trace for filters

* add test for client error, server error and for mcp error

* update schema validate code and rename kind => type in agent_filter

* fix agent description and pre-commit

* fix tests

* add provider specific request parsing in agents chat

* fix precommit and tests

* cleanup demo

* update readme

* fix pre-commit

* refactor tracing

* fix fmt

* fix: handle MessageContent enum in responses API conversion

- Update request.rs to handle new MessageContent enum structure from main
- MessageContent can now be Text(String) or Items(Vec<InputContent>)
- Handle new InputItem variants (ItemReference, FunctionCallOutput)
- Fixes compilation error after merging latest main (#632)

* address pr feedback

* fix span

* fix build

* update openai version
This commit is contained in:
Adil Hafeez 2025-12-17 17:30:14 -08:00 committed by GitHub
parent cb82a83c7b
commit 2f9121407b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
40 changed files with 4886 additions and 190 deletions

View file

@ -47,6 +47,28 @@ pub trait ProviderRequest: Send + Sync {
fn remove_metadata_key(&mut self, key: &str) -> bool;
fn get_temperature(&self) -> Option<f32>;
/// Get message history as OpenAI Message format
/// This is useful for processing chat history across different provider formats
fn get_messages(&self) -> Vec<crate::apis::openai::Message>;
/// Set message history from OpenAI Message format
/// This converts OpenAI messages to the appropriate format for each provider type
fn set_messages(&mut self, messages: &[crate::apis::openai::Message]);
}
impl ProviderRequestType {
/// Set message history from OpenAI Message format
/// This converts OpenAI messages to the appropriate format for each provider type
pub fn set_messages(&mut self, messages: &[crate::apis::openai::Message]) {
match self {
Self::ChatCompletionsRequest(r) => r.set_messages(messages),
Self::MessagesRequest(r) => r.set_messages(messages),
Self::BedrockConverse(r) => r.set_messages(messages),
Self::BedrockConverseStream(r) => r.set_messages(messages),
Self::ResponsesAPIRequest(r) => r.set_messages(messages),
}
}
}
impl ProviderRequest for ProviderRequestType {
@ -149,6 +171,26 @@ impl ProviderRequest for ProviderRequestType {
Self::ResponsesAPIRequest(r) => r.get_temperature(),
}
}
fn get_messages(&self) -> Vec<crate::apis::openai::Message> {
match self {
Self::ChatCompletionsRequest(r) => r.get_messages(),
Self::MessagesRequest(r) => r.get_messages(),
Self::BedrockConverse(r) => r.get_messages(),
Self::BedrockConverseStream(r) => r.get_messages(),
Self::ResponsesAPIRequest(r) => r.get_messages(),
}
}
fn set_messages(&mut self, messages: &[crate::apis::openai::Message]) {
match self {
Self::ChatCompletionsRequest(r) => r.set_messages(messages),
Self::MessagesRequest(r) => r.set_messages(messages),
Self::BedrockConverse(r) => r.set_messages(messages),
Self::BedrockConverseStream(r) => r.set_messages(messages),
Self::ResponsesAPIRequest(r) => r.set_messages(messages),
}
}
}
/// Parse the client API from a byte slice.
@ -934,4 +976,131 @@ mod tests {
.message
.contains("OpenAI ChatCompletions, Anthropic Messages, and OpenAI Responses"));
}
#[test]
fn test_get_message_history_chat_completions() {
use crate::apis::openai::{Message, MessageContent, Role};
let chat_req = ChatCompletionsRequest {
model: "gpt-4".to_string(),
messages: vec![
Message {
role: Role::System,
content: MessageContent::Text("You are helpful".to_string()),
name: None,
tool_calls: None,
tool_call_id: None,
},
Message {
role: Role::User,
content: MessageContent::Text("Hello!".to_string()),
name: None,
tool_calls: None,
tool_call_id: None,
},
],
..Default::default()
};
let provider_req = ProviderRequestType::ChatCompletionsRequest(chat_req);
let messages = provider_req.get_messages();
assert_eq!(messages.len(), 2);
assert_eq!(messages[0].role, Role::System);
assert_eq!(messages[1].role, Role::User);
}
#[test]
fn test_get_message_history_anthropic_messages() {
use crate::apis::anthropic::{
MessagesMessage, MessagesMessageContent, MessagesRequest, MessagesRole,
MessagesSystemPrompt,
};
let anthropic_req = MessagesRequest {
model: "claude-3-sonnet".to_string(),
messages: vec![MessagesMessage {
role: MessagesRole::User,
content: MessagesMessageContent::Single("Hello!".to_string()),
}],
system: Some(MessagesSystemPrompt::Single(
"You are helpful".to_string(),
)),
max_tokens: 100,
container: None,
mcp_servers: None,
metadata: None,
service_tier: None,
thinking: None,
temperature: None,
top_p: None,
top_k: None,
stream: None,
stop_sequences: None,
tools: None,
tool_choice: None,
};
let provider_req = ProviderRequestType::MessagesRequest(anthropic_req);
let messages = provider_req.get_messages();
// Should have system message + user message
assert_eq!(messages.len(), 2);
assert_eq!(
messages[0].role,
crate::apis::openai::Role::System
);
assert_eq!(
messages[1].role,
crate::apis::openai::Role::User
);
}
#[test]
fn test_get_message_history_responses_api() {
use crate::apis::openai_responses::{InputParam, ResponsesAPIRequest};
let responses_req = ResponsesAPIRequest {
model: "gpt-4o".to_string(),
input: InputParam::Text("Hello, world!".to_string()),
instructions: Some("Be helpful".to_string()),
temperature: None,
max_output_tokens: None,
stream: None,
metadata: None,
tools: None,
tool_choice: None,
parallel_tool_calls: None,
modalities: None,
user: None,
store: None,
reasoning_effort: None,
include: None,
audio: None,
text: None,
service_tier: None,
top_p: None,
top_logprobs: None,
stream_options: None,
truncation: None,
conversation: None,
previous_response_id: None,
max_tool_calls: None,
background: None,
};
let provider_req = ProviderRequestType::ResponsesAPIRequest(responses_req);
let messages = provider_req.get_messages();
// Should have system message (instructions) + user message (input)
assert_eq!(messages.len(), 2);
assert_eq!(
messages[0].role,
crate::apis::openai::Role::System
);
assert_eq!(
messages[1].role,
crate::apis::openai::Role::User
);
}
}