Add Codex CLI support; xAI response improvements

This commit is contained in:
Musa 2026-03-05 13:49:14 -08:00
parent a1508f4de1
commit 76dc2badd6
No known key found for this signature in database
18 changed files with 1252 additions and 166 deletions

View file

@ -108,7 +108,7 @@ pub struct ChatCompletionsRequest {
pub top_p: Option<f32>,
pub top_logprobs: Option<u32>,
pub user: Option<String>,
// pub web_search: Option<bool>, // GOOD FIRST ISSUE: Future support for web search
pub web_search_options: Option<Value>,
// VLLM-specific parameters (used by Arch-Function)
pub top_k: Option<u32>,

View file

@ -116,6 +116,8 @@ pub enum InputParam {
Text(String),
/// Array of input items (messages, references, outputs, etc.)
Items(Vec<InputItem>),
/// Single input item (some clients send object instead of array)
SingleItem(InputItem),
}
/// Input item - can be a message, item reference, function call output, etc.
@ -130,13 +132,23 @@ pub enum InputItem {
item_type: String,
id: String,
},
/// Function call emitted by model in prior turn
FunctionCall {
#[serde(rename = "type")]
item_type: String,
name: String,
arguments: String,
call_id: String,
},
/// Function call output
FunctionCallOutput {
#[serde(rename = "type")]
item_type: String,
call_id: String,
output: String,
output: serde_json::Value,
},
/// Forward-compat fallback for unknown input item shapes.
Unknown(serde_json::Value),
}
/// Input message with role and content
@ -166,6 +178,7 @@ pub enum MessageRole {
Assistant,
System,
Developer,
Tool,
}
/// Input content types
@ -173,6 +186,7 @@ pub enum MessageRole {
#[serde(tag = "type", rename_all = "snake_case")]
pub enum InputContent {
/// Text input
#[serde(rename = "input_text", alias = "text", alias = "output_text")]
InputText { text: String },
/// Image input via URL
InputImage {
@ -180,12 +194,16 @@ pub enum InputContent {
detail: Option<String>,
},
/// File input via URL
#[serde(rename = "input_file", alias = "file")]
InputFile { file_url: String },
/// Audio input
InputAudio {
data: Option<String>,
format: Option<String>,
},
/// Forward-compat fallback for unknown content parts.
#[serde(other)]
Unknown,
}
/// Modality options
@ -207,10 +225,11 @@ pub struct AudioConfig {
}
/// Text configuration
#[skip_serializing_none]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TextConfig {
/// Text format configuration
pub format: TextFormat,
pub format: Option<TextFormat>,
}
/// Text format
@ -285,6 +304,7 @@ pub enum Tool {
filters: Option<serde_json::Value>,
},
/// Web search tool
#[serde(rename = "web_search", alias = "web_search_preview")]
WebSearchPreview {
domains: Option<Vec<String>>,
search_context_size: Option<String>,
@ -298,6 +318,12 @@ pub enum Tool {
display_height_px: Option<i32>,
display_number: Option<i32>,
},
/// Custom tool (provider/SDK-specific tool contract)
Custom {
name: Option<String>,
description: Option<String>,
format: Option<serde_json::Value>,
},
}
/// Ranking options for file search
@ -1031,6 +1057,27 @@ impl ProviderRequest for ResponsesAPIRequest {
fn extract_messages_text(&self) -> String {
match &self.input {
InputParam::Text(text) => text.clone(),
InputParam::SingleItem(item) => {
// Normalize single-item input for extraction behavior parity.
match item {
InputItem::Message(msg) => match &msg.content {
MessageContent::Text(text) => text.clone(),
MessageContent::Items(content_items) => {
content_items.iter().fold(String::new(), |acc, content| {
acc + " "
+ &match content {
InputContent::InputText { text } => text.clone(),
InputContent::InputImage { .. } => "[Image]".to_string(),
InputContent::InputFile { .. } => "[File]".to_string(),
InputContent::InputAudio { .. } => "[Audio]".to_string(),
InputContent::Unknown => String::new(),
}
})
}
},
_ => String::new(),
}
}
InputParam::Items(items) => {
items.iter().fold(String::new(), |acc, item| {
match item {
@ -1051,6 +1098,7 @@ impl ProviderRequest for ResponsesAPIRequest {
InputContent::InputAudio { .. } => {
"[Audio]".to_string()
}
InputContent::Unknown => String::new(),
}
})
}
@ -1068,6 +1116,20 @@ impl ProviderRequest for ResponsesAPIRequest {
fn get_recent_user_message(&self) -> Option<String> {
match &self.input {
InputParam::Text(text) => Some(text.clone()),
InputParam::SingleItem(item) => match item {
InputItem::Message(msg) if matches!(msg.role, MessageRole::User) => {
match &msg.content {
MessageContent::Text(text) => Some(text.clone()),
MessageContent::Items(content_items) => {
content_items.iter().find_map(|content| match content {
InputContent::InputText { text } => Some(text.clone()),
_ => None,
})
}
}
}
_ => None,
},
InputParam::Items(items) => {
items.iter().rev().find_map(|item| {
match item {
@ -1097,6 +1159,9 @@ impl ProviderRequest for ResponsesAPIRequest {
.iter()
.filter_map(|tool| match tool {
Tool::Function { name, .. } => Some(name.clone()),
Tool::Custom {
name: Some(name), ..
} => Some(name.clone()),
// Other tool types don't have user-defined names
_ => None,
})
@ -1366,6 +1431,7 @@ impl crate::providers::streaming_response::ProviderStreamResponse for ResponsesA
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[test]
fn test_response_output_text_delta_deserialization() {
@ -1506,4 +1572,87 @@ mod tests {
_ => panic!("Expected ResponseCompleted event"),
}
}
#[test]
fn test_request_deserializes_custom_tool() {
let request = json!({
"model": "gpt-5.3-codex",
"input": "apply the patch",
"tools": [
{
"type": "custom",
"name": "run_patch",
"description": "Apply patch text",
"format": {
"kind": "patch",
"version": "v1"
}
}
]
});
let bytes = serde_json::to_vec(&request).unwrap();
let parsed = ResponsesAPIRequest::try_from(bytes.as_slice()).unwrap();
let tools = parsed.tools.expect("tools should be present");
assert_eq!(tools.len(), 1);
match &tools[0] {
Tool::Custom {
name,
description,
format,
} => {
assert_eq!(name.as_deref(), Some("run_patch"));
assert_eq!(description.as_deref(), Some("Apply patch text"));
assert!(format.is_some());
}
_ => panic!("expected custom tool"),
}
}
#[test]
fn test_request_deserializes_web_search_tool_alias() {
let request = json!({
"model": "gpt-5.3-codex",
"input": "find repository info",
"tools": [
{
"type": "web_search",
"domains": ["github.com"],
"search_context_size": "medium"
}
]
});
let bytes = serde_json::to_vec(&request).unwrap();
let parsed = ResponsesAPIRequest::try_from(bytes.as_slice()).unwrap();
let tools = parsed.tools.expect("tools should be present");
assert_eq!(tools.len(), 1);
match &tools[0] {
Tool::WebSearchPreview {
domains,
search_context_size,
..
} => {
assert_eq!(domains.as_ref().map(Vec::len), Some(1));
assert_eq!(search_context_size.as_deref(), Some("medium"));
}
_ => panic!("expected web search preview tool"),
}
}
#[test]
fn test_request_deserializes_text_config_without_format() {
let request = json!({
"model": "gpt-5.3-codex",
"input": "hello",
"text": {}
});
let bytes = serde_json::to_vec(&request).unwrap();
let parsed = ResponsesAPIRequest::try_from(bytes.as_slice()).unwrap();
assert!(parsed.text.is_some());
assert!(parsed.text.unwrap().format.is_none());
}
}

View file

@ -74,6 +74,7 @@ pub struct ResponsesAPIStreamBuffer {
/// Lifecycle state flags
created_emitted: bool,
in_progress_emitted: bool,
finalized: bool,
/// Track which output items we've added
output_items_added: HashMap<i32, String>, // output_index -> item_id
@ -109,6 +110,7 @@ impl ResponsesAPIStreamBuffer {
upstream_response_metadata: None,
created_emitted: false,
in_progress_emitted: false,
finalized: false,
output_items_added: HashMap::new(),
text_content: HashMap::new(),
function_arguments: HashMap::new(),
@ -236,7 +238,7 @@ impl ResponsesAPIStreamBuffer {
}),
store: Some(true),
text: Some(TextConfig {
format: TextFormat::Text,
format: Some(TextFormat::Text),
}),
audio: None,
modalities: None,
@ -255,8 +257,38 @@ impl ResponsesAPIStreamBuffer {
/// Finalize the response by emitting all *.done events and response.completed.
/// Call this when the stream is complete (after seeing [DONE] or end_of_stream).
pub fn finalize(&mut self) {
// Idempotent finalize: avoid duplicate response.completed loops.
if self.finalized {
return;
}
self.finalized = true;
let mut events = Vec::new();
// Ensure lifecycle prelude is emitted even if finalize is triggered
// by finish_reason before any prior delta was processed.
if !self.created_emitted {
if self.response_id.is_none() {
self.response_id = Some(format!(
"resp_{}",
uuid::Uuid::new_v4().to_string().replace("-", "")
));
self.created_at = Some(
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs() as i64,
);
self.model = Some("unknown".to_string());
}
events.push(self.create_response_created_event());
self.created_emitted = true;
}
if !self.in_progress_emitted {
events.push(self.create_response_in_progress_event());
self.in_progress_emitted = true;
}
// Emit done events for all accumulated content
// Text content done events
@ -443,6 +475,12 @@ impl SseStreamBufferTrait for ResponsesAPIStreamBuffer {
}
};
// Explicit completion marker from transform layer.
if matches!(stream_event.as_ref(), ResponsesAPIStreamEvent::Done { .. }) {
self.finalize();
return;
}
let mut events = Vec::new();
// Capture upstream metadata from ResponseCreated or ResponseInProgress if present
@ -789,4 +827,30 @@ mod tests {
println!("✓ NO completion events (partial stream, no [DONE])");
println!("✓ Arguments accumulated: '{{\"location\":\"'\n");
}
#[test]
fn test_finish_reason_without_done_still_finalizes_once() {
let raw_input = r#"data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1234567890,"model":"gpt-4o","choices":[{"index":0,"delta":{"role":"assistant","content":"Hello"},"finish_reason":null}]}
data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1234567890,"model":"gpt-4o","choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}"#;
let client_api = SupportedAPIsFromClient::OpenAIResponsesAPI(OpenAIApi::Responses);
let upstream_api = SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions);
let stream_iter = SseStreamIter::try_from(raw_input.as_bytes()).unwrap();
let mut buffer = ResponsesAPIStreamBuffer::new();
for raw_event in stream_iter {
let transformed_event =
SseEvent::try_from((raw_event, &client_api, &upstream_api)).unwrap();
buffer.add_transformed_event(transformed_event);
}
let output = String::from_utf8_lossy(&buffer.to_bytes()).to_string();
let completed_count = output.matches("event: response.completed").count();
assert_eq!(
completed_count, 1,
"response.completed should be emitted exactly once"
);
}
}

View file

@ -184,8 +184,8 @@ impl SupportedAPIsFromClient {
SupportedAPIsFromClient::OpenAIResponsesAPI(_) => {
// For Responses API, check if provider supports it, otherwise translate to chat/completions
match provider_id {
// OpenAI and compatible providers that support /v1/responses
ProviderId::OpenAI => route_by_provider("/responses"),
// Providers that support /v1/responses natively
ProviderId::OpenAI | ProviderId::XAI => route_by_provider("/responses"),
// All other providers: translate to /chat/completions
_ => route_by_provider("/chat/completions"),
}
@ -654,4 +654,19 @@ mod tests {
"/custom/azure/path/gpt-4-deployment/chat/completions?api-version=2025-01-01-preview"
);
}
#[test]
fn test_responses_api_targets_xai_native_responses_endpoint() {
let api = SupportedAPIsFromClient::OpenAIResponsesAPI(OpenAIApi::Responses);
assert_eq!(
api.target_endpoint_for_provider(
&ProviderId::XAI,
"/v1/responses",
"grok-4-1-fast-reasoning",
false,
None
),
"/v1/responses"
);
}
}

View file

@ -166,10 +166,11 @@ impl ProviderId {
SupportedAPIsFromClient::OpenAIChatCompletions(_),
) => SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions),
// OpenAI Responses API - only OpenAI supports this
(ProviderId::OpenAI, SupportedAPIsFromClient::OpenAIResponsesAPI(_)) => {
SupportedUpstreamAPIs::OpenAIResponsesAPI(OpenAIApi::Responses)
}
// OpenAI Responses API - OpenAI and xAI support this natively
(
ProviderId::OpenAI | ProviderId::XAI,
SupportedAPIsFromClient::OpenAIResponsesAPI(_),
) => SupportedUpstreamAPIs::OpenAIResponsesAPI(OpenAIApi::Responses),
// Amazon Bedrock natively supports Bedrock APIs
(ProviderId::AmazonBedrock, SupportedAPIsFromClient::OpenAIChatCompletions(_)) => {
@ -328,4 +329,16 @@ mod tests {
"AmazonBedrock should have models (mapped to amazon)"
);
}
#[test]
fn test_xai_uses_responses_api_for_responses_clients() {
use crate::clients::endpoints::{SupportedAPIsFromClient, SupportedUpstreamAPIs};
let client_api = SupportedAPIsFromClient::OpenAIResponsesAPI(OpenAIApi::Responses);
let upstream = ProviderId::XAI.compatible_api_for_client(&client_api, false);
assert!(matches!(
upstream,
SupportedUpstreamAPIs::OpenAIResponsesAPI(OpenAIApi::Responses)
));
}
}

View file

@ -10,7 +10,8 @@ use crate::apis::anthropic::{
ToolResultContent,
};
use crate::apis::openai::{
ChatCompletionsRequest, Message, MessageContent, Role, Tool, ToolChoice, ToolChoiceType,
ChatCompletionsRequest, FunctionCall as OpenAIFunctionCall, Message, MessageContent, Role,
Tool, ToolCall as OpenAIToolCall, ToolChoice, ToolChoiceType,
};
use crate::apis::openai_responses::{
@ -65,6 +66,14 @@ impl TryFrom<ResponsesInputConverter> for Vec<Message> {
Ok(messages)
}
InputParam::SingleItem(item) => {
// Some clients send a single object instead of an array.
let nested = ResponsesInputConverter {
input: InputParam::Items(vec![item]),
instructions: converter.instructions,
};
Vec::<Message>::try_from(nested)
}
InputParam::Items(items) => {
// Convert input items to messages
let mut converted_messages = Vec::new();
@ -82,82 +91,147 @@ impl TryFrom<ResponsesInputConverter> for Vec<Message> {
// Convert each input item
for item in items {
if let InputItem::Message(input_msg) = item {
let role = match input_msg.role {
MessageRole::User => Role::User,
MessageRole::Assistant => Role::Assistant,
MessageRole::System => Role::System,
MessageRole::Developer => Role::System, // Map developer to system
};
match item {
InputItem::Message(input_msg) => {
let role = match input_msg.role {
MessageRole::User => Role::User,
MessageRole::Assistant => Role::Assistant,
MessageRole::System => Role::System,
MessageRole::Developer => Role::System, // Map developer to system
MessageRole::Tool => Role::Tool,
};
// Convert content based on MessageContent type
let content = match &input_msg.content {
crate::apis::openai_responses::MessageContent::Text(text) => {
// Simple text content
MessageContent::Text(text.clone())
}
crate::apis::openai_responses::MessageContent::Items(content_items) => {
// Check if it's a single text item (can use simple text format)
if content_items.len() == 1 {
if let InputContent::InputText { text } = &content_items[0] {
MessageContent::Text(text.clone())
// Convert content based on MessageContent type
let content = match &input_msg.content {
crate::apis::openai_responses::MessageContent::Text(text) => {
// Simple text content
MessageContent::Text(text.clone())
}
crate::apis::openai_responses::MessageContent::Items(
content_items,
) => {
// Check if it's a single text item (can use simple text format)
if content_items.len() == 1 {
if let InputContent::InputText { text } = &content_items[0]
{
MessageContent::Text(text.clone())
} else {
// Single non-text item - use parts format
MessageContent::Parts(
content_items
.iter()
.filter_map(|c| match c {
InputContent::InputText { text } => {
Some(crate::apis::openai::ContentPart::Text {
text: text.clone(),
})
}
InputContent::InputImage { image_url, .. } => {
Some(crate::apis::openai::ContentPart::ImageUrl {
image_url: crate::apis::openai::ImageUrl {
url: image_url.clone(),
detail: None,
},
})
}
InputContent::InputFile { .. } => None, // Skip files for now
InputContent::InputAudio { .. } => None, // Skip audio for now
InputContent::Unknown => None,
})
.collect(),
)
}
} else {
// Single non-text item - use parts format
// Multiple content items - convert to parts
MessageContent::Parts(
content_items.iter()
content_items
.iter()
.filter_map(|c| match c {
InputContent::InputText { text } => {
Some(crate::apis::openai::ContentPart::Text { text: text.clone() })
Some(crate::apis::openai::ContentPart::Text {
text: text.clone(),
})
}
InputContent::InputImage { image_url, .. } => {
Some(crate::apis::openai::ContentPart::ImageUrl {
image_url: crate::apis::openai::ImageUrl {
url: image_url.clone(),
detail: None,
}
},
})
}
InputContent::InputFile { .. } => None, // Skip files for now
InputContent::InputAudio { .. } => None, // Skip audio for now
InputContent::Unknown => None,
})
.collect()
.collect(),
)
}
} else {
// Multiple content items - convert to parts
MessageContent::Parts(
content_items
.iter()
.filter_map(|c| match c {
InputContent::InputText { text } => {
Some(crate::apis::openai::ContentPart::Text {
text: text.clone(),
})
}
InputContent::InputImage { image_url, .. } => Some(
crate::apis::openai::ContentPart::ImageUrl {
image_url: crate::apis::openai::ImageUrl {
url: image_url.clone(),
detail: None,
},
},
),
InputContent::InputFile { .. } => None, // Skip files for now
InputContent::InputAudio { .. } => None, // Skip audio for now
})
.collect(),
)
}
};
converted_messages.push(Message {
role,
content: Some(content),
name: None,
tool_call_id: None,
tool_calls: None,
});
}
InputItem::FunctionCallOutput {
item_type: _,
call_id,
output,
} => {
// Preserve tool result so upstream models do not re-issue the same tool call.
let output_text = match output {
serde_json::Value::String(s) => s.clone(),
other => serde_json::to_string(&other).unwrap_or_default(),
};
converted_messages.push(Message {
role: Role::Tool,
content: Some(MessageContent::Text(output_text)),
name: None,
tool_call_id: Some(call_id),
tool_calls: None,
});
}
InputItem::FunctionCall {
item_type: _,
name,
arguments,
call_id,
} => {
let tool_call = OpenAIToolCall {
id: call_id,
call_type: "function".to_string(),
function: OpenAIFunctionCall { name, arguments },
};
// Prefer attaching tool_calls to the preceding assistant message when present.
if let Some(last) = converted_messages.last_mut() {
if matches!(last.role, Role::Assistant) {
if let Some(existing) = &mut last.tool_calls {
existing.push(tool_call);
} else {
last.tool_calls = Some(vec![tool_call]);
}
continue;
}
}
};
converted_messages.push(Message {
role,
content: Some(content),
name: None,
tool_call_id: None,
tool_calls: None,
});
converted_messages.push(Message {
role: Role::Assistant,
content: None,
name: None,
tool_call_id: None,
tool_calls: Some(vec![tool_call]),
});
}
InputItem::ItemReference { .. } | InputItem::Unknown(_) => {
// Item references/unknown entries are metadata-like and can be skipped
// for chat-completions conversion.
}
}
}
@ -397,6 +471,170 @@ impl TryFrom<ResponsesAPIRequest> for ChatCompletionsRequest {
type Error = TransformError;
fn try_from(req: ResponsesAPIRequest) -> Result<Self, Self::Error> {
fn normalize_function_parameters(
parameters: Option<serde_json::Value>,
fallback_extra: Option<serde_json::Value>,
) -> serde_json::Value {
// ChatCompletions function tools require JSON Schema with top-level type=object.
let mut base = serde_json::json!({
"type": "object",
"properties": {},
});
if let Some(serde_json::Value::Object(mut obj)) = parameters {
// Enforce a valid object schema shape regardless of upstream tool format.
obj.insert(
"type".to_string(),
serde_json::Value::String("object".to_string()),
);
if !obj.contains_key("properties") {
obj.insert(
"properties".to_string(),
serde_json::Value::Object(serde_json::Map::new()),
);
}
base = serde_json::Value::Object(obj);
}
if let Some(extra) = fallback_extra {
if let serde_json::Value::Object(ref mut map) = base {
map.insert("x-custom-format".to_string(), extra);
}
}
base
}
let mut converted_chat_tools: Vec<Tool> = Vec::new();
let mut web_search_options: Option<serde_json::Value> = None;
if let Some(tools) = req.tools.clone() {
for (idx, tool) in tools.into_iter().enumerate() {
match tool {
ResponsesTool::Function {
name,
description,
parameters,
strict,
} => converted_chat_tools.push(Tool {
tool_type: "function".to_string(),
function: crate::apis::openai::Function {
name,
description,
parameters: normalize_function_parameters(parameters, None),
strict,
},
}),
ResponsesTool::WebSearchPreview {
search_context_size,
user_location,
..
} => {
if web_search_options.is_none() {
let user_location_value = user_location.map(|loc| {
let mut approx = serde_json::Map::new();
if let Some(city) = loc.city {
approx.insert(
"city".to_string(),
serde_json::Value::String(city),
);
}
if let Some(country) = loc.country {
approx.insert(
"country".to_string(),
serde_json::Value::String(country),
);
}
if let Some(region) = loc.region {
approx.insert(
"region".to_string(),
serde_json::Value::String(region),
);
}
if let Some(timezone) = loc.timezone {
approx.insert(
"timezone".to_string(),
serde_json::Value::String(timezone),
);
}
serde_json::json!({
"type": loc.location_type,
"approximate": serde_json::Value::Object(approx),
})
});
let mut web_search = serde_json::Map::new();
if let Some(size) = search_context_size {
web_search.insert(
"search_context_size".to_string(),
serde_json::Value::String(size),
);
}
if let Some(location) = user_location_value {
web_search.insert("user_location".to_string(), location);
}
web_search_options = Some(serde_json::Value::Object(web_search));
}
}
ResponsesTool::Custom {
name,
description,
format,
} => {
// Custom tools do not have a strict ChatCompletions equivalent for all
// providers. Map them to a permissive function tool for compatibility.
let tool_name = name.unwrap_or_else(|| format!("custom_tool_{}", idx + 1));
let parameters = normalize_function_parameters(
Some(serde_json::json!({
"type": "object",
"properties": {
"input": { "type": "string" }
},
"required": ["input"],
"additionalProperties": true,
})),
format,
);
converted_chat_tools.push(Tool {
tool_type: "function".to_string(),
function: crate::apis::openai::Function {
name: tool_name,
description,
parameters,
strict: Some(false),
},
});
}
ResponsesTool::FileSearch { .. } => {
return Err(TransformError::UnsupportedConversion(
"FileSearch tool is not supported in ChatCompletions API. Only function/custom/web search tools are supported in this conversion."
.to_string(),
));
}
ResponsesTool::CodeInterpreter => {
return Err(TransformError::UnsupportedConversion(
"CodeInterpreter tool is not supported in ChatCompletions API conversion."
.to_string(),
));
}
ResponsesTool::Computer { .. } => {
return Err(TransformError::UnsupportedConversion(
"Computer tool is not supported in ChatCompletions API conversion."
.to_string(),
));
}
}
}
}
let tools = if converted_chat_tools.is_empty() {
None
} else {
Some(converted_chat_tools)
};
// Convert input to messages using the shared converter
let converter = ResponsesInputConverter {
input: req.input,
@ -418,57 +656,24 @@ impl TryFrom<ResponsesAPIRequest> for ChatCompletionsRequest {
service_tier: req.service_tier,
top_logprobs: req.top_logprobs.map(|t| t as u32),
modalities: req.modalities.map(|mods| {
mods.into_iter().map(|m| {
match m {
mods.into_iter()
.map(|m| match m {
Modality::Text => "text".to_string(),
Modality::Audio => "audio".to_string(),
}
}).collect()
})
.collect()
}),
stream_options: req.stream_options.map(|opts| {
crate::apis::openai::StreamOptions {
stream_options: req
.stream_options
.map(|opts| crate::apis::openai::StreamOptions {
include_usage: opts.include_usage,
}
}),
reasoning_effort: req.reasoning_effort.map(|effort| match effort {
ReasoningEffort::Low => "low".to_string(),
ReasoningEffort::Medium => "medium".to_string(),
ReasoningEffort::High => "high".to_string(),
}),
reasoning_effort: req.reasoning_effort.map(|effort| {
match effort {
ReasoningEffort::Low => "low".to_string(),
ReasoningEffort::Medium => "medium".to_string(),
ReasoningEffort::High => "high".to_string(),
}
}),
tools: req.tools.map(|tools| {
tools.into_iter().map(|tool| {
// Only convert Function tools - other types are not supported in ChatCompletions
match tool {
ResponsesTool::Function { name, description, parameters, strict } => Ok(Tool {
tool_type: "function".to_string(),
function: crate::apis::openai::Function {
name,
description,
parameters: parameters.unwrap_or_else(|| serde_json::json!({
"type": "object",
"properties": {}
})),
strict,
}
}),
ResponsesTool::FileSearch { .. } => Err(TransformError::UnsupportedConversion(
"FileSearch tool is not supported in ChatCompletions API. Only function tools are supported.".to_string()
)),
ResponsesTool::WebSearchPreview { .. } => Err(TransformError::UnsupportedConversion(
"WebSearchPreview tool is not supported in ChatCompletions API. Only function tools are supported.".to_string()
)),
ResponsesTool::CodeInterpreter => Err(TransformError::UnsupportedConversion(
"CodeInterpreter tool is not supported in ChatCompletions API. Only function tools are supported.".to_string()
)),
ResponsesTool::Computer { .. } => Err(TransformError::UnsupportedConversion(
"Computer tool is not supported in ChatCompletions API. Only function tools are supported.".to_string()
)),
}
}).collect::<Result<Vec<_>, _>>()
}).transpose()?,
tools,
tool_choice: req.tool_choice.map(|choice| {
match choice {
ResponsesToolChoice::String(s) => {
@ -481,11 +686,14 @@ impl TryFrom<ResponsesAPIRequest> for ChatCompletionsRequest {
}
ResponsesToolChoice::Named { function, .. } => ToolChoice::Function {
choice_type: "function".to_string(),
function: crate::apis::openai::FunctionChoice { name: function.name }
}
function: crate::apis::openai::FunctionChoice {
name: function.name,
},
},
}
}),
parallel_tool_calls: req.parallel_tool_calls,
web_search_options,
..Default::default()
})
}
@ -1027,4 +1235,235 @@ mod tests {
panic!("Expected text content block");
}
}
#[test]
fn test_responses_custom_tool_maps_to_function_tool_for_chat_completions() {
use crate::apis::openai_responses::{
InputParam, ResponsesAPIRequest, Tool as ResponsesTool,
};
let req = ResponsesAPIRequest {
model: "gpt-5.3-codex".to_string(),
input: InputParam::Text("use custom tool".to_string()),
tools: Some(vec![ResponsesTool::Custom {
name: Some("run_patch".to_string()),
description: Some("Apply structured patch".to_string()),
format: Some(serde_json::json!({
"kind": "patch",
"version": "v1"
})),
}]),
include: None,
parallel_tool_calls: None,
store: None,
instructions: None,
stream: None,
stream_options: None,
conversation: None,
tool_choice: None,
max_output_tokens: None,
temperature: None,
top_p: None,
metadata: None,
previous_response_id: None,
modalities: None,
audio: None,
text: None,
reasoning_effort: None,
truncation: None,
user: None,
max_tool_calls: None,
service_tier: None,
background: None,
top_logprobs: None,
};
let converted = ChatCompletionsRequest::try_from(req).expect("conversion should succeed");
let tools = converted.tools.expect("tools should be present");
assert_eq!(tools.len(), 1);
assert_eq!(tools[0].tool_type, "function");
assert_eq!(tools[0].function.name, "run_patch");
assert_eq!(
tools[0].function.description.as_deref(),
Some("Apply structured patch")
);
}
#[test]
fn test_responses_web_search_maps_to_chat_web_search_options() {
use crate::apis::openai_responses::{
InputParam, ResponsesAPIRequest, Tool as ResponsesTool, UserLocation,
};
let req = ResponsesAPIRequest {
model: "gpt-5.3-codex".to_string(),
input: InputParam::Text("find project docs".to_string()),
tools: Some(vec![ResponsesTool::WebSearchPreview {
domains: Some(vec!["docs.planoai.dev".to_string()]),
search_context_size: Some("medium".to_string()),
user_location: Some(UserLocation {
location_type: "approximate".to_string(),
city: Some("San Francisco".to_string()),
country: Some("US".to_string()),
region: Some("CA".to_string()),
timezone: Some("America/Los_Angeles".to_string()),
}),
}]),
include: None,
parallel_tool_calls: None,
store: None,
instructions: None,
stream: None,
stream_options: None,
conversation: None,
tool_choice: None,
max_output_tokens: None,
temperature: None,
top_p: None,
metadata: None,
previous_response_id: None,
modalities: None,
audio: None,
text: None,
reasoning_effort: None,
truncation: None,
user: None,
max_tool_calls: None,
service_tier: None,
background: None,
top_logprobs: None,
};
let converted = ChatCompletionsRequest::try_from(req).expect("conversion should succeed");
assert!(converted.web_search_options.is_some());
}
#[test]
fn test_responses_function_call_output_maps_to_tool_message() {
use crate::apis::openai_responses::{
InputItem, InputParam, ResponsesAPIRequest, Tool as ResponsesTool,
};
let req = ResponsesAPIRequest {
model: "gpt-5.3-codex".to_string(),
input: InputParam::Items(vec![InputItem::FunctionCallOutput {
item_type: "function_call_output".to_string(),
call_id: "call_123".to_string(),
output: serde_json::json!({"status":"ok","stdout":"hello"}),
}]),
tools: Some(vec![ResponsesTool::Function {
name: "exec_command".to_string(),
description: Some("Execute a shell command".to_string()),
parameters: Some(serde_json::json!({
"type": "object",
"properties": {
"cmd": { "type": "string" }
},
"required": ["cmd"]
})),
strict: Some(false),
}]),
include: None,
parallel_tool_calls: None,
store: None,
instructions: None,
stream: None,
stream_options: None,
conversation: None,
tool_choice: None,
max_output_tokens: None,
temperature: None,
top_p: None,
metadata: None,
previous_response_id: None,
modalities: None,
audio: None,
text: None,
reasoning_effort: None,
truncation: None,
user: None,
max_tool_calls: None,
service_tier: None,
background: None,
top_logprobs: None,
};
let converted = ChatCompletionsRequest::try_from(req).expect("conversion should succeed");
assert_eq!(converted.messages.len(), 1);
assert!(matches!(converted.messages[0].role, Role::Tool));
assert_eq!(
converted.messages[0].tool_call_id.as_deref(),
Some("call_123")
);
}
#[test]
fn test_responses_function_call_and_output_preserve_call_id_link() {
use crate::apis::openai_responses::{
InputItem, InputMessage, MessageContent as ResponsesMessageContent, MessageRole,
ResponsesAPIRequest,
};
let req = ResponsesAPIRequest {
model: "gpt-5.3-codex".to_string(),
input: InputParam::Items(vec![
InputItem::Message(InputMessage {
role: MessageRole::Assistant,
content: ResponsesMessageContent::Items(vec![]),
}),
InputItem::FunctionCall {
item_type: "function_call".to_string(),
name: "exec_command".to_string(),
arguments: "{\"cmd\":\"pwd\"}".to_string(),
call_id: "toolu_abc123".to_string(),
},
InputItem::FunctionCallOutput {
item_type: "function_call_output".to_string(),
call_id: "toolu_abc123".to_string(),
output: serde_json::Value::String("ok".to_string()),
},
]),
tools: None,
include: None,
parallel_tool_calls: None,
store: None,
instructions: None,
stream: None,
stream_options: None,
conversation: None,
tool_choice: None,
max_output_tokens: None,
temperature: None,
top_p: None,
metadata: None,
previous_response_id: None,
modalities: None,
audio: None,
text: None,
reasoning_effort: None,
truncation: None,
user: None,
max_tool_calls: None,
service_tier: None,
background: None,
top_logprobs: None,
};
let converted = ChatCompletionsRequest::try_from(req).expect("conversion should succeed");
assert_eq!(converted.messages.len(), 2);
assert!(matches!(converted.messages[0].role, Role::Assistant));
let tool_calls = converted.messages[0]
.tool_calls
.as_ref()
.expect("assistant tool_calls should be present");
assert_eq!(tool_calls.len(), 1);
assert_eq!(tool_calls[0].id, "toolu_abc123");
assert!(matches!(converted.messages[1].role, Role::Tool));
assert_eq!(
converted.messages[1].tool_call_id.as_deref(),
Some("toolu_abc123")
);
}
}

View file

@ -512,19 +512,12 @@ impl TryFrom<ChatCompletionsStreamResponse> for ResponsesAPIStreamEvent {
}
}
// Handle finish_reason - this is a completion signal
// Return an empty delta that the buffer can use to detect completion
// Handle finish_reason - this is a completion signal.
// Emit an explicit Done marker so the buffering layer can finalize
// even if an upstream [DONE] marker is missing/delayed.
if choice.finish_reason.is_some() {
// Return a minimal text delta to signal completion
// The buffer will handle the finish_reason and generate response.completed
return Ok(ResponsesAPIStreamEvent::ResponseOutputTextDelta {
item_id: "".to_string(), // Buffer will fill this
output_index: choice.index as i32,
content_index: 0,
delta: "".to_string(), // Empty delta signals completion
logprobs: vec![],
obfuscation: None,
sequence_number: 0, // Buffer will fill this
return Ok(ResponsesAPIStreamEvent::Done {
sequence_number: 0, // Buffer will assign final sequence
});
}