Add support for Amazon Bedrock Converse and ConverseStream (#588)

* first commit to get Bedrock Converse API working. Next commit support for streaming and binary frames

* adding translation from BedrockBinaryFrameDecoder to AnthropicMessagesEvent

* Claude Code works with Amazon Bedrock

* added tests for openai streaming from bedrock

* PR comments fixed

* adding support for bedrock in docs as supported provider

* cargo fmt

* revertted to chatgpt models for claude code routing

---------

Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-288.local>
Co-authored-by: Adil Hafeez <adil.hafeez@gmail.com>
This commit is contained in:
Salman Paracha 2025-10-22 11:31:21 -07:00 committed by GitHub
parent ba826b1961
commit 9407ae6af7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
35 changed files with 7362 additions and 1493 deletions

View file

@ -0,0 +1,231 @@
use crate::apis::anthropic::{MessagesContentBlock, MessagesImageSource};
use crate::apis::openai::{ContentPart, FunctionCall, ImageUrl, Message, MessageContent, ToolCall};
use crate::clients::TransformError;
use serde_json::Value;
use std::time::{SystemTime, UNIX_EPOCH};
pub trait ExtractText {
fn extract_text(&self) -> String;
}
/// Trait for utility functions on content collections
pub trait ContentUtils<T> {
fn extract_tool_calls(&self) -> Result<Option<Vec<ToolCall>>, TransformError>;
fn split_for_openai(
&self,
) -> Result<(Vec<ContentPart>, Vec<ToolCall>, Vec<(String, String, bool)>), TransformError>;
}
/// Helper to create a current unix timestamp
pub fn current_timestamp() -> u64 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs()
}
// Content Utilities
impl ContentUtils<ToolCall> for Vec<MessagesContentBlock> {
fn extract_tool_calls(&self) -> Result<Option<Vec<ToolCall>>, TransformError> {
let mut tool_calls = Vec::new();
for block in self {
match block {
MessagesContentBlock::ToolUse {
id, name, input, ..
}
| MessagesContentBlock::ServerToolUse { id, name, input }
| MessagesContentBlock::McpToolUse { id, name, input } => {
let arguments = serde_json::to_string(&input)?;
tool_calls.push(ToolCall {
id: id.clone(),
call_type: "function".to_string(),
function: FunctionCall {
name: name.clone(),
arguments,
},
});
}
_ => continue,
}
}
Ok(if tool_calls.is_empty() {
None
} else {
Some(tool_calls)
})
}
fn split_for_openai(
&self,
) -> Result<(Vec<ContentPart>, Vec<ToolCall>, Vec<(String, String, bool)>), TransformError>
{
let mut content_parts = Vec::new();
let mut tool_calls = Vec::new();
let mut tool_results = Vec::new();
for block in self {
match block {
MessagesContentBlock::Text { text, .. } => {
content_parts.push(ContentPart::Text { text: text.clone() });
}
MessagesContentBlock::Image { source } => {
let url = convert_image_source_to_url(source);
content_parts.push(ContentPart::ImageUrl {
image_url: ImageUrl {
url,
detail: Some("auto".to_string()),
},
});
}
MessagesContentBlock::ToolUse {
id, name, input, ..
}
| MessagesContentBlock::ServerToolUse { id, name, input }
| MessagesContentBlock::McpToolUse { id, name, input } => {
let arguments = serde_json::to_string(&input)?;
tool_calls.push(ToolCall {
id: id.clone(),
call_type: "function".to_string(),
function: FunctionCall {
name: name.clone(),
arguments,
},
});
}
MessagesContentBlock::ToolResult {
tool_use_id,
content,
is_error,
..
} => {
let result_text = content.extract_text();
tool_results.push((
tool_use_id.clone(),
result_text,
is_error.unwrap_or(false),
));
}
MessagesContentBlock::WebSearchToolResult {
tool_use_id,
content,
is_error,
}
| MessagesContentBlock::CodeExecutionToolResult {
tool_use_id,
content,
is_error,
}
| MessagesContentBlock::McpToolResult {
tool_use_id,
content,
is_error,
} => {
let result_text = content.extract_text();
tool_results.push((
tool_use_id.clone(),
result_text,
is_error.unwrap_or(false),
));
}
_ => {
// Skip unsupported content types
continue;
}
}
}
Ok((content_parts, tool_calls, tool_results))
}
}
/// Convert image source to URL
pub fn convert_image_source_to_url(source: &MessagesImageSource) -> String {
match source {
MessagesImageSource::Base64 { media_type, data } => {
format!("data:{};base64,{}", media_type, data)
}
MessagesImageSource::Url { url } => url.clone(),
}
}
/// Convert image URL to Anthropic image source
fn convert_image_url_to_source(image_url: &ImageUrl) -> MessagesImageSource {
if image_url.url.starts_with("data:") {
// Parse data URL
let parts: Vec<&str> = image_url.url.splitn(2, ',').collect();
if parts.len() == 2 {
let header = parts[0];
let data = parts[1];
let media_type = header
.strip_prefix("data:")
.and_then(|s| s.split(';').next())
.unwrap_or("image/jpeg")
.to_string();
MessagesImageSource::Base64 {
media_type,
data: data.to_string(),
}
} else {
MessagesImageSource::Url {
url: image_url.url.clone(),
}
}
} else {
MessagesImageSource::Url {
url: image_url.url.clone(),
}
}
}
/// Convert OpenAI message to Anthropic content blocks
pub fn convert_openai_message_to_anthropic_content(
message: &Message,
) -> Result<Vec<MessagesContentBlock>, TransformError> {
let mut blocks = Vec::new();
// Handle regular content
match &message.content {
MessageContent::Text(text) => {
if !text.is_empty() {
blocks.push(MessagesContentBlock::Text {
text: text.clone(),
cache_control: None,
});
}
}
MessageContent::Parts(parts) => {
for part in parts {
match part {
ContentPart::Text { text } => {
blocks.push(MessagesContentBlock::Text {
text: text.clone(),
cache_control: None,
});
}
ContentPart::ImageUrl { image_url } => {
let source = convert_image_url_to_source(image_url);
blocks.push(MessagesContentBlock::Image { source });
}
}
}
}
}
// Handle tool calls
if let Some(tool_calls) = &message.tool_calls {
for tool_call in tool_calls {
let input: Value = serde_json::from_str(&tool_call.function.arguments)?;
blocks.push(MessagesContentBlock::ToolUse {
id: tool_call.id.clone(),
name: tool_call.function.name.clone(),
input,
cache_control: None,
});
}
}
Ok(blocks)
}

View file

@ -0,0 +1,25 @@
//! API transformation modules
//!
//! This module provides organized transformations between the two main LLM API formats:
//! - `/v1/chat/completions` (OpenAI format)
//! - `/v1/messages` (Anthropic format)
//!
//! Provider-specific transformations (Bedrock, Groq, etc.) are handled internally
//! by the gateway, but the external API surface remains these two standard formats.
//! The transformations are split into logical modules for maintainability.
pub mod lib;
pub mod request;
pub mod response;
// Re-export commonly used items for convenience
pub use lib::*;
pub use request::*;
pub use response::*;
// ============================================================================
// CONSTANTS
// ============================================================================
/// Default maximum tokens when converting from OpenAI to Anthropic and no max_tokens is specified
pub const DEFAULT_MAX_TOKENS: u32 = 4096;

View file

@ -0,0 +1,704 @@
use crate::apis::amazon_bedrock::{
AnyChoice, AutoChoice, ContentBlock, ConversationRole, ConverseRequest, ImageBlock,
ImageSource, InferenceConfiguration, Message as BedrockMessage, SystemContentBlock,
Tool as BedrockTool, ToolChoice as BedrockToolChoice, ToolChoiceSpec, ToolConfiguration,
ToolInputSchema, ToolResultBlock, ToolResultContentBlock, ToolResultStatus, ToolSpecDefinition,
ToolUseBlock,
};
use crate::apis::anthropic::{
MessagesMessage, MessagesMessageContent, MessagesRequest, MessagesRole, MessagesStopReason,
MessagesSystemPrompt, MessagesTool, MessagesToolChoice, MessagesToolChoiceType, MessagesUsage,
ToolResultContent,
};
use crate::apis::openai::{
ChatCompletionsRequest, ContentPart, FinishReason, Function, FunctionChoice, Message,
MessageContent, Role, Tool, ToolCall, ToolChoice, ToolChoiceType, Usage,
};
use crate::clients::TransformError;
use crate::transforms::lib::*;
type AnthropicMessagesRequest = MessagesRequest;
// Conversion from Anthropic MessagesRequest to OpenAI ChatCompletionsRequest
impl TryFrom<AnthropicMessagesRequest> for ChatCompletionsRequest {
type Error = TransformError;
fn try_from(req: AnthropicMessagesRequest) -> Result<Self, Self::Error> {
let mut openai_messages: Vec<Message> = Vec::new();
// Convert system prompt to system message if present
if let Some(system) = req.system {
openai_messages.push(system.into());
}
// Convert messages
for message in req.messages {
let converted_messages: Vec<Message> = message.try_into()?;
openai_messages.extend(converted_messages);
}
// Convert tools and tool choice
let openai_tools = req.tools.map(|tools| convert_anthropic_tools(tools));
let (openai_tool_choice, parallel_tool_calls) =
convert_anthropic_tool_choice(req.tool_choice);
let mut _chat_completions_req: ChatCompletionsRequest = ChatCompletionsRequest {
model: req.model,
messages: openai_messages,
temperature: req.temperature,
top_p: req.top_p,
max_completion_tokens: Some(req.max_tokens),
stream: req.stream,
stop: req.stop_sequences,
tools: openai_tools,
tool_choice: openai_tool_choice,
parallel_tool_calls,
..Default::default()
};
_chat_completions_req.suppress_max_tokens_if_o3();
_chat_completions_req.fix_temperature_if_gpt5();
Ok(_chat_completions_req)
}
}
// Conversion from Anthropic MessagesRequest to Amazon Bedrock ConverseRequest
impl TryFrom<AnthropicMessagesRequest> for ConverseRequest {
type Error = TransformError;
fn try_from(req: AnthropicMessagesRequest) -> Result<Self, Self::Error> {
// Convert system prompt to SystemContentBlock if present
let system: Option<Vec<SystemContentBlock>> = req.system.map(|system_prompt| {
let text = match system_prompt {
MessagesSystemPrompt::Single(text) => text,
MessagesSystemPrompt::Blocks(blocks) => blocks.extract_text(),
};
vec![SystemContentBlock::Text { text }]
});
// Convert messages to Bedrock format
let messages = if req.messages.is_empty() {
None
} else {
let mut bedrock_messages = Vec::new();
for anthropic_message in req.messages {
let bedrock_message: BedrockMessage = anthropic_message.try_into()?;
bedrock_messages.push(bedrock_message);
}
Some(bedrock_messages)
};
// Build inference configuration
// Anthropic always requires max_tokens, so we should always include inferenceConfig
let inference_config = Some(InferenceConfiguration {
max_tokens: Some(req.max_tokens),
temperature: req.temperature,
top_p: req.top_p,
stop_sequences: req.stop_sequences,
});
// Convert tools and tool choice to ToolConfiguration
let tool_config = if req.tools.is_some() || req.tool_choice.is_some() {
let tools = req.tools.map(|anthropic_tools| {
anthropic_tools
.into_iter()
.map(|tool| BedrockTool::ToolSpec {
tool_spec: ToolSpecDefinition {
name: tool.name,
description: tool.description,
input_schema: ToolInputSchema {
json: tool.input_schema,
},
},
})
.collect()
});
let tool_choice = req.tool_choice.map(|choice| {
match choice.kind {
MessagesToolChoiceType::Auto => BedrockToolChoice::Auto {
auto: AutoChoice {},
},
MessagesToolChoiceType::Any => BedrockToolChoice::Any { any: AnyChoice {} },
MessagesToolChoiceType::None => BedrockToolChoice::Auto {
auto: AutoChoice {},
}, // Bedrock doesn't have explicit "none"
MessagesToolChoiceType::Tool => {
if let Some(name) = choice.name {
BedrockToolChoice::Tool {
tool: ToolChoiceSpec { name },
}
} else {
BedrockToolChoice::Auto {
auto: AutoChoice {},
}
}
}
}
});
Some(ToolConfiguration { tools, tool_choice })
} else {
None
};
Ok(ConverseRequest {
model_id: req.model,
messages,
system,
inference_config,
tool_config,
stream: req.stream.unwrap_or(false),
guardrail_config: None,
additional_model_request_fields: None,
additional_model_response_field_paths: None,
performance_config: None,
prompt_variables: None,
request_metadata: None,
metadata: None,
})
}
}
// Message Conversions
impl TryFrom<MessagesMessage> for Vec<Message> {
type Error = TransformError;
fn try_from(message: MessagesMessage) -> Result<Self, Self::Error> {
let mut result = Vec::new();
match message.content {
MessagesMessageContent::Single(text) => {
result.push(Message {
role: message.role.into(),
content: MessageContent::Text(text),
name: None,
tool_calls: None,
tool_call_id: None,
});
}
MessagesMessageContent::Blocks(blocks) => {
let (content_parts, tool_calls, tool_results) = blocks.split_for_openai()?;
// Add tool result messages
for (tool_use_id, result_text, _is_error) in tool_results {
result.push(Message {
role: Role::Tool,
content: MessageContent::Text(result_text),
name: None,
tool_calls: None,
tool_call_id: Some(tool_use_id),
});
}
// Only create main message if there's actual content or tool calls
// Skip creating empty content messages (e.g., when message only contains tool_result blocks)
if !content_parts.is_empty() || !tool_calls.is_empty() {
let content = build_openai_content(content_parts, &tool_calls);
let main_message = Message {
role: message.role.into(),
content,
name: None,
tool_calls: if tool_calls.is_empty() {
None
} else {
Some(tool_calls)
},
tool_call_id: None,
};
result.push(main_message);
}
}
}
Ok(result)
}
}
// Role Conversions
impl Into<Role> for MessagesRole {
fn into(self) -> Role {
match self {
MessagesRole::User => Role::User,
MessagesRole::Assistant => Role::Assistant,
}
}
}
impl Into<MessagesStopReason> for FinishReason {
fn into(self) -> MessagesStopReason {
match self {
FinishReason::Stop => MessagesStopReason::EndTurn,
FinishReason::Length => MessagesStopReason::MaxTokens,
FinishReason::ToolCalls => MessagesStopReason::ToolUse,
FinishReason::ContentFilter => MessagesStopReason::Refusal,
FinishReason::FunctionCall => MessagesStopReason::ToolUse,
}
}
}
impl Into<MessagesUsage> for Usage {
fn into(self) -> MessagesUsage {
MessagesUsage {
input_tokens: self.prompt_tokens,
output_tokens: self.completion_tokens,
cache_creation_input_tokens: None,
cache_read_input_tokens: None,
}
}
}
// System Prompt Conversions
impl Into<Message> for MessagesSystemPrompt {
fn into(self) -> Message {
let system_content = match self {
MessagesSystemPrompt::Single(text) => MessageContent::Text(text),
MessagesSystemPrompt::Blocks(blocks) => MessageContent::Text(blocks.extract_text()),
};
Message {
role: Role::System,
content: system_content,
name: None,
tool_calls: None,
tool_call_id: None,
}
}
}
//Utility Functions
/// Convert Anthropic tools to OpenAI format
fn convert_anthropic_tools(tools: Vec<MessagesTool>) -> Vec<Tool> {
tools
.into_iter()
.map(|tool| Tool {
tool_type: "function".to_string(),
function: Function {
name: tool.name,
description: tool.description,
parameters: tool.input_schema,
strict: None,
},
})
.collect()
}
/// Convert Anthropic tool choice to OpenAI format
fn convert_anthropic_tool_choice(
tool_choice: Option<MessagesToolChoice>,
) -> (Option<ToolChoice>, Option<bool>) {
match tool_choice {
Some(choice) => {
let openai_choice = match choice.kind {
MessagesToolChoiceType::Auto => ToolChoice::Type(ToolChoiceType::Auto),
MessagesToolChoiceType::Any => ToolChoice::Type(ToolChoiceType::Required),
MessagesToolChoiceType::None => ToolChoice::Type(ToolChoiceType::None),
MessagesToolChoiceType::Tool => {
if let Some(name) = choice.name {
ToolChoice::Function {
choice_type: "function".to_string(),
function: FunctionChoice { name },
}
} else {
ToolChoice::Type(ToolChoiceType::Auto)
}
}
};
let parallel = choice.disable_parallel_tool_use.map(|disable| !disable);
(Some(openai_choice), parallel)
}
None => (None, None),
}
}
/// Build OpenAI message content from parts and tool calls
fn build_openai_content(
content_parts: Vec<ContentPart>,
tool_calls: &[ToolCall],
) -> MessageContent {
if content_parts.len() == 1 && tool_calls.is_empty() {
match &content_parts[0] {
ContentPart::Text { text } => MessageContent::Text(text.clone()),
_ => MessageContent::Parts(content_parts),
}
} else if content_parts.is_empty() {
MessageContent::Text("".to_string())
} else {
MessageContent::Parts(content_parts)
}
}
impl TryFrom<MessagesMessage> for BedrockMessage {
type Error = TransformError;
fn try_from(message: MessagesMessage) -> Result<Self, Self::Error> {
let role = match message.role {
MessagesRole::User => ConversationRole::User,
MessagesRole::Assistant => ConversationRole::Assistant,
};
let mut content_blocks = Vec::new();
// Convert content blocks
match message.content {
MessagesMessageContent::Single(text) => {
if !text.is_empty() {
content_blocks.push(ContentBlock::Text { text });
}
}
MessagesMessageContent::Blocks(blocks) => {
for block in blocks {
match block {
crate::apis::anthropic::MessagesContentBlock::Text { text, .. } => {
if !text.is_empty() {
content_blocks.push(ContentBlock::Text { text });
}
}
crate::apis::anthropic::MessagesContentBlock::ToolUse {
id,
name,
input,
..
} => {
content_blocks.push(ContentBlock::ToolUse {
tool_use: ToolUseBlock {
tool_use_id: id,
name,
input,
},
});
}
crate::apis::anthropic::MessagesContentBlock::ToolResult {
tool_use_id,
is_error,
content,
..
} => {
// Convert Anthropic ToolResultContent to Bedrock ToolResultContentBlock
let tool_result_content = match content {
ToolResultContent::Text(text) => {
vec![ToolResultContentBlock::Text { text }]
}
ToolResultContent::Blocks(blocks) => {
let mut result_blocks = Vec::new();
for result_block in blocks {
match result_block {
crate::apis::anthropic::MessagesContentBlock::Text { text, .. } => {
result_blocks.push(ToolResultContentBlock::Text { text });
}
// For now, skip other content types in tool results
_ => {}
}
}
result_blocks
}
};
// Ensure we have at least one content block
let final_content = if tool_result_content.is_empty() {
vec![ToolResultContentBlock::Text {
text: " ".to_string(),
}]
} else {
tool_result_content
};
let status = if is_error.unwrap_or(false) {
Some(ToolResultStatus::Error)
} else {
Some(ToolResultStatus::Success)
};
content_blocks.push(ContentBlock::ToolResult {
tool_result: ToolResultBlock {
tool_use_id,
content: final_content,
status,
},
});
}
crate::apis::anthropic::MessagesContentBlock::Image { source } => {
// Convert Anthropic image to Bedrock image format
match source {
crate::apis::anthropic::MessagesImageSource::Base64 {
media_type,
data,
} => {
content_blocks.push(ContentBlock::Image {
image: ImageBlock {
source: ImageSource::Base64 { media_type, data },
},
});
}
crate::apis::anthropic::MessagesImageSource::Url { .. } => {
// Bedrock doesn't support URL-based images, skip for now
// Could potentially download and convert to base64, but not implemented
}
}
}
// Skip other content types for now (Thinking, Document, etc.)
_ => {}
}
}
}
}
// Ensure we have at least one content block
if content_blocks.is_empty() {
content_blocks.push(ContentBlock::Text {
text: " ".to_string(),
});
}
Ok(BedrockMessage {
role,
content: content_blocks,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::apis::amazon_bedrock::{
ContentBlock, ConversationRole, ConverseRequest, SystemContentBlock,
ToolChoice as BedrockToolChoice,
};
use crate::apis::anthropic::{
MessagesMessage, MessagesMessageContent, MessagesRequest, MessagesRole,
MessagesSystemPrompt, MessagesTool, MessagesToolChoice, MessagesToolChoiceType,
};
use serde_json::json;
#[test]
fn test_anthropic_to_bedrock_basic_request() {
let anthropic_request = MessagesRequest {
model: "claude-3-5-sonnet-20241022".to_string(),
messages: vec![MessagesMessage {
role: MessagesRole::User,
content: MessagesMessageContent::Single("Hello, how are you?".to_string()),
}],
max_tokens: 1000,
container: None,
mcp_servers: None,
system: Some(MessagesSystemPrompt::Single(
"You are a helpful assistant.".to_string(),
)),
metadata: None,
service_tier: None,
thinking: None,
temperature: Some(0.7),
top_p: Some(0.9),
top_k: None,
stream: Some(false),
stop_sequences: Some(vec!["STOP".to_string()]),
tools: None,
tool_choice: None,
};
let bedrock_request: ConverseRequest = anthropic_request.try_into().unwrap();
assert_eq!(bedrock_request.model_id, "claude-3-5-sonnet-20241022");
assert!(bedrock_request.system.is_some());
assert_eq!(bedrock_request.system.as_ref().unwrap().len(), 1);
assert!(bedrock_request.messages.is_some());
let messages = bedrock_request.messages.as_ref().unwrap();
assert_eq!(messages.len(), 1);
assert_eq!(messages[0].role, ConversationRole::User);
if let ContentBlock::Text { text } = &messages[0].content[0] {
assert_eq!(text, "Hello, how are you?");
} else {
panic!("Expected text content block");
}
let inference_config = bedrock_request.inference_config.as_ref().unwrap();
assert_eq!(inference_config.temperature, Some(0.7));
assert_eq!(inference_config.top_p, Some(0.9));
assert_eq!(inference_config.max_tokens, Some(1000));
assert_eq!(
inference_config.stop_sequences,
Some(vec!["STOP".to_string()])
);
}
#[test]
fn test_anthropic_to_bedrock_with_tools() {
let anthropic_request = MessagesRequest {
model: "claude-3-5-sonnet-20241022".to_string(),
messages: vec![MessagesMessage {
role: MessagesRole::User,
content: MessagesMessageContent::Single("What's the weather like?".to_string()),
}],
max_tokens: 1000,
container: None,
mcp_servers: None,
system: None,
metadata: None,
service_tier: None,
thinking: None,
temperature: None,
top_p: None,
top_k: None,
stream: None,
stop_sequences: None,
tools: Some(vec![MessagesTool {
name: "get_weather".to_string(),
description: Some("Get current weather information".to_string()),
input_schema: json!({
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name"
}
},
"required": ["location"]
}),
}]),
tool_choice: Some(MessagesToolChoice {
kind: MessagesToolChoiceType::Tool,
name: Some("get_weather".to_string()),
disable_parallel_tool_use: None,
}),
};
let bedrock_request: ConverseRequest = anthropic_request.try_into().unwrap();
assert_eq!(bedrock_request.model_id, "claude-3-5-sonnet-20241022");
assert!(bedrock_request.tool_config.is_some());
let tool_config = bedrock_request.tool_config.as_ref().unwrap();
assert!(tool_config.tools.is_some());
let tools = tool_config.tools.as_ref().unwrap();
assert_eq!(tools.len(), 1);
let crate::apis::amazon_bedrock::Tool::ToolSpec { tool_spec } = &tools[0];
assert_eq!(tool_spec.name, "get_weather");
assert_eq!(
tool_spec.description,
Some("Get current weather information".to_string())
);
if let Some(BedrockToolChoice::Tool { tool }) = &tool_config.tool_choice {
assert_eq!(tool.name, "get_weather");
} else {
panic!("Expected specific tool choice");
}
}
#[test]
fn test_anthropic_to_bedrock_auto_tool_choice() {
let anthropic_request = MessagesRequest {
model: "claude-3-5-sonnet-20241022".to_string(),
messages: vec![MessagesMessage {
role: MessagesRole::User,
content: MessagesMessageContent::Single("Help me with something".to_string()),
}],
max_tokens: 500,
container: None,
mcp_servers: None,
system: None,
metadata: None,
service_tier: None,
thinking: None,
temperature: None,
top_p: None,
top_k: None,
stream: None,
stop_sequences: None,
tools: Some(vec![MessagesTool {
name: "help_tool".to_string(),
description: Some("A helpful tool".to_string()),
input_schema: json!({
"type": "object",
"properties": {}
}),
}]),
tool_choice: Some(MessagesToolChoice {
kind: MessagesToolChoiceType::Auto,
name: None,
disable_parallel_tool_use: None,
}),
};
let bedrock_request: ConverseRequest = anthropic_request.try_into().unwrap();
assert!(bedrock_request.tool_config.is_some());
let tool_config = bedrock_request.tool_config.as_ref().unwrap();
assert!(matches!(
tool_config.tool_choice,
Some(BedrockToolChoice::Auto { .. })
));
}
#[test]
fn test_anthropic_to_bedrock_multi_message_conversation() {
let anthropic_request = MessagesRequest {
model: "claude-3-5-sonnet-20241022".to_string(),
messages: vec![
MessagesMessage {
role: MessagesRole::User,
content: MessagesMessageContent::Single("Hello".to_string()),
},
MessagesMessage {
role: MessagesRole::Assistant,
content: MessagesMessageContent::Single(
"Hi there! How can I help you?".to_string(),
),
},
MessagesMessage {
role: MessagesRole::User,
content: MessagesMessageContent::Single("What's 2+2?".to_string()),
},
],
max_tokens: 100,
container: None,
mcp_servers: None,
system: Some(MessagesSystemPrompt::Single("Be concise".to_string())),
metadata: None,
service_tier: None,
thinking: None,
temperature: Some(0.5),
top_p: None,
top_k: None,
stream: None,
stop_sequences: None,
tools: None,
tool_choice: None,
};
let bedrock_request: ConverseRequest = anthropic_request.try_into().unwrap();
assert!(bedrock_request.messages.is_some());
let messages = bedrock_request.messages.as_ref().unwrap();
assert_eq!(messages.len(), 3);
assert_eq!(messages[0].role, ConversationRole::User);
assert_eq!(messages[1].role, ConversationRole::Assistant);
assert_eq!(messages[2].role, ConversationRole::User);
// Check system prompt
assert!(bedrock_request.system.is_some());
if let SystemContentBlock::Text { text } = &bedrock_request.system.as_ref().unwrap()[0] {
assert_eq!(text, "Be concise");
} else {
panic!("Expected system text block");
}
}
#[test]
fn test_anthropic_message_to_bedrock_conversion() {
let anthropic_message = MessagesMessage {
role: MessagesRole::User,
content: MessagesMessageContent::Single("Test message".to_string()),
};
let bedrock_message: BedrockMessage = anthropic_message.try_into().unwrap();
assert_eq!(bedrock_message.role, ConversationRole::User);
assert_eq!(bedrock_message.content.len(), 1);
if let ContentBlock::Text { text } = &bedrock_message.content[0] {
assert_eq!(text, "Test message");
} else {
panic!("Expected text content block");
}
}
}

View file

@ -0,0 +1,782 @@
use crate::apis::amazon_bedrock::{
AnyChoice, AutoChoice, ContentBlock, ConversationRole, ConverseRequest, InferenceConfiguration,
Message as BedrockMessage, SystemContentBlock, Tool as BedrockTool,
ToolChoice as BedrockToolChoice, ToolChoiceSpec, ToolConfiguration, ToolInputSchema,
ToolSpecDefinition,
};
use crate::apis::anthropic::{
MessagesContentBlock, MessagesMessage, MessagesMessageContent, MessagesRequest, MessagesRole,
MessagesSystemPrompt, MessagesTool, MessagesToolChoice, MessagesToolChoiceType,
ToolResultContent,
};
use crate::apis::openai::{
ChatCompletionsRequest, Message, MessageContent, Role, Tool, ToolChoice, ToolChoiceType,
};
use crate::clients::TransformError;
use crate::transforms::lib::ExtractText;
use crate::transforms::lib::*;
use crate::transforms::*;
type AnthropicMessagesRequest = MessagesRequest;
// ============================================================================
// MAIN REQUEST TRANSFORMATIONS
// ============================================================================
impl Into<MessagesSystemPrompt> for Message {
fn into(self) -> MessagesSystemPrompt {
let system_text = match self.content {
MessageContent::Text(text) => text,
MessageContent::Parts(parts) => parts.extract_text(),
};
MessagesSystemPrompt::Single(system_text)
}
}
impl TryFrom<Message> for MessagesMessage {
type Error = TransformError;
fn try_from(message: Message) -> Result<Self, Self::Error> {
let role = match message.role {
Role::User => MessagesRole::User,
Role::Assistant => MessagesRole::Assistant,
Role::Tool => {
// Tool messages become user messages with tool results
let tool_call_id = message.tool_call_id.ok_or_else(|| {
TransformError::MissingField(
"tool_call_id required for Tool messages".to_string(),
)
})?;
return Ok(MessagesMessage {
role: MessagesRole::User,
content: MessagesMessageContent::Blocks(vec![
MessagesContentBlock::ToolResult {
tool_use_id: tool_call_id,
is_error: None,
content: ToolResultContent::Blocks(vec![MessagesContentBlock::Text {
text: message.content.extract_text(),
cache_control: None,
}]),
cache_control: None,
},
]),
});
}
Role::System => {
return Err(TransformError::UnsupportedConversion(
"System messages should be handled separately".to_string(),
));
}
};
let content_blocks = convert_openai_message_to_anthropic_content(&message)?;
let content = build_anthropic_content(content_blocks);
Ok(MessagesMessage { role, content })
}
}
impl TryFrom<Message> for BedrockMessage {
type Error = TransformError;
fn try_from(message: Message) -> Result<Self, Self::Error> {
let role = match message.role {
Role::User => ConversationRole::User,
Role::Assistant => ConversationRole::Assistant,
Role::Tool => ConversationRole::User, // Tool results become user messages in Bedrock
Role::System => {
return Err(TransformError::UnsupportedConversion(
"System messages should be handled separately in Bedrock".to_string(),
));
}
};
let mut content_blocks = Vec::new();
// Handle different message types
match message.role {
Role::User => {
// Convert user message content to content blocks
match message.content {
MessageContent::Text(text) => {
if !text.is_empty() {
content_blocks.push(ContentBlock::Text { text });
}
}
MessageContent::Parts(parts) => {
// Convert OpenAI content parts to Bedrock ContentBlocks
for part in parts {
match part {
crate::apis::openai::ContentPart::Text { text } => {
if !text.is_empty() {
content_blocks.push(ContentBlock::Text { text });
}
}
crate::apis::openai::ContentPart::ImageUrl { image_url } => {
// Convert image URL to Bedrock image format
if image_url.url.starts_with("data:") {
if let Some((media_type, data)) =
parse_data_url(&image_url.url)
{
content_blocks.push(ContentBlock::Image {
image: crate::apis::amazon_bedrock::ImageBlock {
source: crate::apis::amazon_bedrock::ImageSource::Base64 {
media_type,
data,
},
},
});
} else {
return Err(TransformError::UnsupportedConversion(
format!(
"Invalid data URL format: {}",
image_url.url
),
));
}
} else {
return Err(TransformError::UnsupportedConversion(
"Only base64 data URLs are supported for images in Bedrock".to_string()
));
}
}
}
}
}
}
// Ensure we have at least one content block
if content_blocks.is_empty() {
content_blocks.push(ContentBlock::Text {
text: " ".to_string(),
});
}
}
Role::Assistant => {
// Handle text content - but only add if non-empty OR if we don't have tool calls
let text_content = message.content.extract_text();
let has_tool_calls = message
.tool_calls
.as_ref()
.map_or(false, |calls| !calls.is_empty());
// Add text content if it's non-empty, or if we have no tool calls (to avoid empty content)
if !text_content.is_empty() {
content_blocks.push(ContentBlock::Text { text: text_content });
} else if !has_tool_calls {
// If we have empty content and no tool calls, add a minimal placeholder
// This prevents the "blank text field" error
content_blocks.push(ContentBlock::Text {
text: " ".to_string(),
});
}
// Convert tool calls to ToolUse content blocks
if let Some(tool_calls) = message.tool_calls {
for tool_call in tool_calls {
// Parse the arguments string as JSON
let input: serde_json::Value =
serde_json::from_str(&tool_call.function.arguments).map_err(|e| {
TransformError::UnsupportedConversion(format!(
"Failed to parse tool arguments as JSON: {}. Arguments: {}",
e, tool_call.function.arguments
))
})?;
content_blocks.push(ContentBlock::ToolUse {
tool_use: crate::apis::amazon_bedrock::ToolUseBlock {
tool_use_id: tool_call.id,
name: tool_call.function.name,
input,
},
});
}
}
// Bedrock requires at least one content block
if content_blocks.is_empty() {
content_blocks.push(ContentBlock::Text {
text: " ".to_string(),
});
}
}
Role::Tool => {
// Tool messages become user messages with ToolResult content blocks
let tool_call_id = message.tool_call_id.ok_or_else(|| {
TransformError::MissingField(
"tool_call_id required for Tool messages".to_string(),
)
})?;
let tool_content = message.content.extract_text();
// Create ToolResult content block
let tool_result_content = if tool_content.is_empty() {
// Even for tool results, we need non-empty content
vec![crate::apis::amazon_bedrock::ToolResultContentBlock::Text {
text: " ".to_string(),
}]
} else {
vec![crate::apis::amazon_bedrock::ToolResultContentBlock::Text {
text: tool_content,
}]
};
content_blocks.push(ContentBlock::ToolResult {
tool_result: crate::apis::amazon_bedrock::ToolResultBlock {
tool_use_id: tool_call_id,
content: tool_result_content,
status: Some(crate::apis::amazon_bedrock::ToolResultStatus::Success), // Default to success
},
});
}
Role::System => {
// Already handled above with early return
unreachable!()
}
}
Ok(BedrockMessage {
role,
content: content_blocks,
})
}
}
impl TryFrom<ChatCompletionsRequest> for AnthropicMessagesRequest {
type Error = TransformError;
fn try_from(req: ChatCompletionsRequest) -> Result<Self, Self::Error> {
let mut system_prompt = None;
let mut messages = Vec::new();
for message in req.messages {
match message.role {
Role::System => {
system_prompt = Some(message.into());
}
_ => {
let anthropic_message: MessagesMessage = message.try_into()?;
messages.push(anthropic_message);
}
}
}
// Convert tools and tool choice
let anthropic_tools = req.tools.map(|tools| convert_openai_tools(tools));
let anthropic_tool_choice =
convert_openai_tool_choice(req.tool_choice, req.parallel_tool_calls);
Ok(AnthropicMessagesRequest {
model: req.model,
system: system_prompt,
messages,
max_tokens: req
.max_completion_tokens
.or(req.max_tokens)
.unwrap_or(DEFAULT_MAX_TOKENS),
container: None,
mcp_servers: None,
service_tier: None,
thinking: None,
temperature: req.temperature,
top_p: req.top_p,
top_k: None, // OpenAI doesn't have top_k
stream: req.stream,
stop_sequences: req.stop,
tools: anthropic_tools,
tool_choice: anthropic_tool_choice,
metadata: None,
})
}
}
impl TryFrom<ChatCompletionsRequest> for ConverseRequest {
type Error = TransformError;
fn try_from(req: ChatCompletionsRequest) -> Result<Self, Self::Error> {
// Separate system messages from user/assistant messages
let mut system_messages = Vec::new();
let mut conversation_messages = Vec::new();
for message in req.messages {
match message.role {
Role::System => {
let system_text = match message.content {
MessageContent::Text(text) => text,
MessageContent::Parts(parts) => parts.extract_text(),
};
system_messages.push(SystemContentBlock::Text { text: system_text });
}
_ => {
let bedrock_message: BedrockMessage = message.try_into()?;
conversation_messages.push(bedrock_message);
}
}
}
// Convert system messages
let system = if system_messages.is_empty() {
None
} else {
Some(system_messages)
};
// Convert conversation messages
let messages = if conversation_messages.is_empty() {
None
} else {
Some(conversation_messages)
};
// Build inference configuration
let max_tokens = req.max_completion_tokens.or(req.max_tokens);
let inference_config = if max_tokens.is_some()
|| req.temperature.is_some()
|| req.top_p.is_some()
|| req.stop.is_some()
{
Some(InferenceConfiguration {
max_tokens,
temperature: req.temperature,
top_p: req.top_p,
stop_sequences: req.stop,
})
} else {
None
};
// Convert tools and tool choice to ToolConfiguration
let tool_config = if req.tools.is_some() || req.tool_choice.is_some() {
let tools = req.tools.map(|openai_tools| {
openai_tools
.into_iter()
.map(|tool| BedrockTool::ToolSpec {
tool_spec: ToolSpecDefinition {
name: tool.function.name,
description: tool.function.description,
input_schema: ToolInputSchema {
json: tool.function.parameters,
},
},
})
.collect()
});
let tool_choice = req
.tool_choice
.map(|choice| {
match choice {
ToolChoice::Type(tool_type) => match tool_type {
ToolChoiceType::Auto => BedrockToolChoice::Auto {
auto: AutoChoice {},
},
ToolChoiceType::Required => {
BedrockToolChoice::Any { any: AnyChoice {} }
}
ToolChoiceType::None => BedrockToolChoice::Auto {
auto: AutoChoice {},
}, // Bedrock doesn't have explicit "none"
},
ToolChoice::Function { function, .. } => BedrockToolChoice::Tool {
tool: ToolChoiceSpec {
name: function.name,
},
},
}
})
.or_else(|| {
// If tools are present but no tool_choice specified, default to "auto"
if tools.is_some() {
Some(BedrockToolChoice::Auto {
auto: AutoChoice {},
})
} else {
None
}
});
Some(ToolConfiguration { tools, tool_choice })
} else {
None
};
Ok(ConverseRequest {
model_id: req.model,
messages,
system,
inference_config,
tool_config,
stream: req.stream.unwrap_or(false),
guardrail_config: None,
additional_model_request_fields: None,
additional_model_response_field_paths: None,
performance_config: None,
prompt_variables: None,
request_metadata: None,
metadata: None,
})
}
}
/// Convert OpenAI tools to Anthropic format
fn convert_openai_tools(tools: Vec<Tool>) -> Vec<MessagesTool> {
tools
.into_iter()
.map(|tool| MessagesTool {
name: tool.function.name,
description: tool.function.description,
input_schema: tool.function.parameters,
})
.collect()
}
/// Convert OpenAI tool choice to Anthropic format
fn convert_openai_tool_choice(
tool_choice: Option<ToolChoice>,
parallel_tool_calls: Option<bool>,
) -> Option<MessagesToolChoice> {
tool_choice.map(|choice| match choice {
ToolChoice::Type(tool_type) => match tool_type {
ToolChoiceType::Auto => MessagesToolChoice {
kind: MessagesToolChoiceType::Auto,
name: None,
disable_parallel_tool_use: parallel_tool_calls.map(|p| !p),
},
ToolChoiceType::Required => MessagesToolChoice {
kind: MessagesToolChoiceType::Any,
name: None,
disable_parallel_tool_use: parallel_tool_calls.map(|p| !p),
},
ToolChoiceType::None => MessagesToolChoice {
kind: MessagesToolChoiceType::None,
name: None,
disable_parallel_tool_use: None,
},
},
ToolChoice::Function { function, .. } => MessagesToolChoice {
kind: MessagesToolChoiceType::Tool,
name: Some(function.name),
disable_parallel_tool_use: parallel_tool_calls.map(|p| !p),
},
})
}
/// Build Anthropic message content from content blocks
fn build_anthropic_content(content_blocks: Vec<MessagesContentBlock>) -> MessagesMessageContent {
if content_blocks.len() == 1 {
match &content_blocks[0] {
MessagesContentBlock::Text { text, .. } => MessagesMessageContent::Single(text.clone()),
_ => MessagesMessageContent::Blocks(content_blocks),
}
} else if content_blocks.is_empty() {
MessagesMessageContent::Single("".to_string())
} else {
MessagesMessageContent::Blocks(content_blocks)
}
}
/// Parse a data URL into media type and base64 data
/// Supports format: data:image/jpeg;base64,<data>
fn parse_data_url(url: &str) -> Option<(String, String)> {
if !url.starts_with("data:") {
return None;
}
let without_prefix = &url[5..]; // Remove "data:" prefix
let parts: Vec<&str> = without_prefix.splitn(2, ',').collect();
if parts.len() != 2 {
return None;
}
let header = parts[0];
let data = parts[1];
// Parse header: "image/jpeg;base64" or just "image/jpeg"
let header_parts: Vec<&str> = header.split(';').collect();
if header_parts.is_empty() {
return None;
}
let media_type = header_parts[0].to_string();
// Check if it's base64 encoded
if header_parts.len() > 1 && header_parts[1] == "base64" {
Some((media_type, data.to_string()))
} else {
// For now, only support base64 encoding
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::apis::amazon_bedrock::{
ContentBlock, ConversationRole, ConverseRequest, SystemContentBlock,
ToolChoice as BedrockToolChoice,
};
use crate::apis::openai::{
ChatCompletionsRequest, Function, FunctionChoice, Message, MessageContent, Role, Tool,
ToolChoice, ToolChoiceType,
};
use serde_json::json;
#[test]
fn test_openai_to_bedrock_basic_request() {
let openai_request = ChatCompletionsRequest {
model: "gpt-4".to_string(),
messages: vec![
Message {
role: Role::System,
content: MessageContent::Text("You are a helpful assistant.".to_string()),
name: None,
tool_call_id: None,
tool_calls: None,
},
Message {
role: Role::User,
content: MessageContent::Text("Hello, how are you?".to_string()),
name: None,
tool_call_id: None,
tool_calls: None,
},
],
temperature: Some(0.7),
top_p: Some(0.9),
max_completion_tokens: Some(1000),
stop: Some(vec!["STOP".to_string()]),
stream: Some(false),
tools: None,
tool_choice: None,
..Default::default()
};
let bedrock_request: ConverseRequest = openai_request.try_into().unwrap();
assert_eq!(bedrock_request.model_id, "gpt-4");
assert!(bedrock_request.system.is_some());
assert_eq!(bedrock_request.system.as_ref().unwrap().len(), 1);
if let SystemContentBlock::Text { text } = &bedrock_request.system.as_ref().unwrap()[0] {
assert_eq!(text, "You are a helpful assistant.");
} else {
panic!("Expected system text block");
}
assert!(bedrock_request.messages.is_some());
let messages = bedrock_request.messages.as_ref().unwrap();
assert_eq!(messages.len(), 1);
assert_eq!(messages[0].role, ConversationRole::User);
if let ContentBlock::Text { text } = &messages[0].content[0] {
assert_eq!(text, "Hello, how are you?");
} else {
panic!("Expected text content block");
}
let inference_config = bedrock_request.inference_config.as_ref().unwrap();
assert_eq!(inference_config.temperature, Some(0.7));
assert_eq!(inference_config.top_p, Some(0.9));
assert_eq!(inference_config.max_tokens, Some(1000));
assert_eq!(
inference_config.stop_sequences,
Some(vec!["STOP".to_string()])
);
}
#[test]
fn test_openai_to_bedrock_with_tools() {
let openai_request = ChatCompletionsRequest {
model: "gpt-4".to_string(),
messages: vec![Message {
role: Role::User,
content: MessageContent::Text("What's the weather like?".to_string()),
name: None,
tool_call_id: None,
tool_calls: None,
}],
temperature: None,
top_p: None,
max_completion_tokens: Some(1000),
stop: None,
stream: None,
tools: Some(vec![Tool {
tool_type: "function".to_string(),
function: Function {
name: "get_weather".to_string(),
description: Some("Get current weather information".to_string()),
parameters: json!({
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city name"
}
},
"required": ["location"]
}),
strict: None,
},
}]),
tool_choice: Some(ToolChoice::Function {
choice_type: "function".to_string(),
function: FunctionChoice {
name: "get_weather".to_string(),
},
}),
..Default::default()
};
let bedrock_request: ConverseRequest = openai_request.try_into().unwrap();
assert_eq!(bedrock_request.model_id, "gpt-4");
assert!(bedrock_request.tool_config.is_some());
let tool_config = bedrock_request.tool_config.as_ref().unwrap();
assert!(tool_config.tools.is_some());
let tools = tool_config.tools.as_ref().unwrap();
assert_eq!(tools.len(), 1);
let crate::apis::amazon_bedrock::Tool::ToolSpec { tool_spec } = &tools[0];
assert_eq!(tool_spec.name, "get_weather");
assert_eq!(
tool_spec.description,
Some("Get current weather information".to_string())
);
if let Some(BedrockToolChoice::Tool { tool }) = &tool_config.tool_choice {
assert_eq!(tool.name, "get_weather");
} else {
panic!("Expected specific tool choice");
}
}
#[test]
fn test_openai_to_bedrock_auto_tool_choice() {
let openai_request = ChatCompletionsRequest {
model: "gpt-4".to_string(),
messages: vec![Message {
role: Role::User,
content: MessageContent::Text("Help me with something".to_string()),
name: None,
tool_call_id: None,
tool_calls: None,
}],
temperature: None,
top_p: None,
max_completion_tokens: Some(500),
stop: None,
stream: None,
tools: Some(vec![Tool {
tool_type: "function".to_string(),
function: Function {
name: "help_tool".to_string(),
description: Some("A helpful tool".to_string()),
parameters: json!({
"type": "object",
"properties": {}
}),
strict: None,
},
}]),
tool_choice: Some(ToolChoice::Type(ToolChoiceType::Auto)),
..Default::default()
};
let bedrock_request: ConverseRequest = openai_request.try_into().unwrap();
assert!(bedrock_request.tool_config.is_some());
let tool_config = bedrock_request.tool_config.as_ref().unwrap();
assert!(matches!(
tool_config.tool_choice,
Some(BedrockToolChoice::Auto { .. })
));
}
#[test]
fn test_openai_to_bedrock_multi_message_conversation() {
let openai_request = ChatCompletionsRequest {
model: "gpt-4".to_string(),
messages: vec![
Message {
role: Role::System,
content: MessageContent::Text("Be concise".to_string()),
name: None,
tool_call_id: None,
tool_calls: None,
},
Message {
role: Role::User,
content: MessageContent::Text("Hello".to_string()),
name: None,
tool_call_id: None,
tool_calls: None,
},
Message {
role: Role::Assistant,
content: MessageContent::Text("Hi there! How can I help you?".to_string()),
name: None,
tool_call_id: None,
tool_calls: None,
},
Message {
role: Role::User,
content: MessageContent::Text("What's 2+2?".to_string()),
name: None,
tool_call_id: None,
tool_calls: None,
},
],
temperature: Some(0.5),
top_p: None,
max_completion_tokens: Some(100),
stop: None,
stream: None,
tools: None,
tool_choice: None,
..Default::default()
};
let bedrock_request: ConverseRequest = openai_request.try_into().unwrap();
assert!(bedrock_request.messages.is_some());
let messages = bedrock_request.messages.as_ref().unwrap();
assert_eq!(messages.len(), 3); // System message is separate
assert_eq!(messages[0].role, ConversationRole::User);
assert_eq!(messages[1].role, ConversationRole::Assistant);
assert_eq!(messages[2].role, ConversationRole::User);
// Check system prompt
assert!(bedrock_request.system.is_some());
if let SystemContentBlock::Text { text } = &bedrock_request.system.as_ref().unwrap()[0] {
assert_eq!(text, "Be concise");
} else {
panic!("Expected system text block");
}
}
#[test]
fn test_openai_message_to_bedrock_conversion() {
let openai_message = Message {
role: Role::User,
content: MessageContent::Text("Test message".to_string()),
name: None,
tool_call_id: None,
tool_calls: None,
};
let bedrock_message: BedrockMessage = openai_message.try_into().unwrap();
assert_eq!(bedrock_message.role, ConversationRole::User);
assert_eq!(bedrock_message.content.len(), 1);
if let ContentBlock::Text { text } = &bedrock_message.content[0] {
assert_eq!(text, "Test message");
} else {
panic!("Expected text content block");
}
}
}

View file

@ -0,0 +1,4 @@
//! Request transformation modules
pub mod from_anthropic;
pub mod from_openai;

View file

@ -0,0 +1,3 @@
//! Response transformation modules
pub mod to_anthropic;
pub mod to_openai;

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff