Add first-class ChatGPT subscription provider support

This commit is contained in:
Spherrrical 2026-04-10 13:29:20 -07:00
parent 128059e7c1
commit bbe9946207
16 changed files with 637 additions and 7 deletions

View file

@ -370,6 +370,8 @@ pub enum LlmProviderType {
AmazonBedrock,
#[serde(rename = "plano")]
Plano,
#[serde(rename = "chatgpt")]
ChatGPT,
}
impl Display for LlmProviderType {
@ -391,6 +393,7 @@ impl Display for LlmProviderType {
LlmProviderType::Qwen => write!(f, "qwen"),
LlmProviderType::AmazonBedrock => write!(f, "amazon_bedrock"),
LlmProviderType::Plano => write!(f, "plano"),
LlmProviderType::ChatGPT => write!(f, "chatgpt"),
}
}
}
@ -457,6 +460,7 @@ pub struct LlmProvider {
pub base_url_path_prefix: Option<String>,
pub internal: Option<bool>,
pub passthrough_auth: Option<bool>,
pub headers: Option<HashMap<String, String>>,
}
pub trait IntoModels {
@ -500,6 +504,7 @@ impl Default for LlmProvider {
base_url_path_prefix: None,
internal: None,
passthrough_auth: None,
headers: None,
}
}
}

View file

@ -277,6 +277,7 @@ mod tests {
internal: None,
stream: None,
passthrough_auth: None,
headers: None,
}
}

View file

@ -328,7 +328,11 @@ providers:
- xiaomi/mimo-v2-flash
- xiaomi/mimo-v2-omni
- xiaomi/mimo-v2-pro
chatgpt:
- chatgpt/gpt-5.4
- chatgpt/gpt-5.3-codex
- chatgpt/gpt-5.2
metadata:
total_providers: 11
total_models: 316
total_providers: 12
total_models: 319
last_updated: 2026-04-03T23:14:46.956158+00:00

View file

@ -192,7 +192,9 @@ impl SupportedAPIsFromClient {
// For Responses API, check if provider supports it, otherwise translate to chat/completions
match provider_id {
// Providers that support /v1/responses natively
ProviderId::OpenAI | ProviderId::XAI => route_by_provider("/responses"),
ProviderId::OpenAI | ProviderId::XAI | ProviderId::ChatGPT => {
route_by_provider("/responses")
}
// All other providers: translate to /chat/completions
_ => route_by_provider("/chat/completions"),
}

View file

@ -44,6 +44,7 @@ pub enum ProviderId {
Zhipu,
Qwen,
AmazonBedrock,
ChatGPT,
}
impl TryFrom<&str> for ProviderId {
@ -71,6 +72,7 @@ impl TryFrom<&str> for ProviderId {
"qwen" => Ok(ProviderId::Qwen),
"amazon_bedrock" => Ok(ProviderId::AmazonBedrock),
"amazon" => Ok(ProviderId::AmazonBedrock), // alias
"chatgpt" => Ok(ProviderId::ChatGPT),
_ => Err(format!("Unknown provider: {}", value)),
}
}
@ -95,6 +97,7 @@ impl ProviderId {
ProviderId::Moonshotai => "moonshotai",
ProviderId::Zhipu => "z-ai",
ProviderId::Qwen => "qwen",
ProviderId::ChatGPT => "chatgpt",
_ => return Vec::new(),
};
@ -148,7 +151,8 @@ impl ProviderId {
| ProviderId::Ollama
| ProviderId::Moonshotai
| ProviderId::Zhipu
| ProviderId::Qwen,
| ProviderId::Qwen
| ProviderId::ChatGPT,
SupportedAPIsFromClient::AnthropicMessagesAPI(_),
) => SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions),
@ -167,13 +171,14 @@ impl ProviderId {
| ProviderId::Ollama
| ProviderId::Moonshotai
| ProviderId::Zhipu
| ProviderId::Qwen,
| ProviderId::Qwen
| ProviderId::ChatGPT,
SupportedAPIsFromClient::OpenAIChatCompletions(_),
) => SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions),
// OpenAI Responses API - OpenAI and xAI support this natively
// OpenAI Responses API - OpenAI, xAI, and ChatGPT support this natively
(
ProviderId::OpenAI | ProviderId::XAI,
ProviderId::OpenAI | ProviderId::XAI | ProviderId::ChatGPT,
SupportedAPIsFromClient::OpenAIResponsesAPI(_),
) => SupportedUpstreamAPIs::OpenAIResponsesAPI(OpenAIApi::Responses),
@ -234,6 +239,7 @@ impl Display for ProviderId {
ProviderId::Zhipu => write!(f, "zhipu"),
ProviderId::Qwen => write!(f, "qwen"),
ProviderId::AmazonBedrock => write!(f, "amazon_bedrock"),
ProviderId::ChatGPT => write!(f, "chatgpt"),
}
}
}

View file

@ -89,6 +89,41 @@ impl ProviderRequestType {
req.web_search_options = None;
}
}
// ChatGPT requires instructions, store=false, and input as a list
if provider_id == ProviderId::ChatGPT {
if let Self::ResponsesAPIRequest(req) = self {
use crate::apis::openai_responses::{
InputItem, InputMessage, InputParam, MessageContent, MessageRole,
};
const CHATGPT_BASE_INSTRUCTIONS: &str =
"You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.";
match &req.instructions {
Some(existing) if existing.contains(CHATGPT_BASE_INSTRUCTIONS) => {}
Some(existing) => {
req.instructions =
Some(format!("{}\n\n{}", CHATGPT_BASE_INSTRUCTIONS, existing));
}
None => {
req.instructions = Some(CHATGPT_BASE_INSTRUCTIONS.to_string());
}
}
req.store = Some(false);
req.stream = Some(true);
// ChatGPT backend requires input to be a list, not a plain string
if let InputParam::Text(text) = &req.input {
req.input = InputParam::Items(vec![InputItem::Message(InputMessage {
role: MessageRole::User,
content: MessageContent::Text(text.clone()),
})]);
}
if let InputParam::SingleItem(item) = &req.input {
req.input = InputParam::Items(vec![item.clone()]);
}
}
}
}
}

View file

@ -229,6 +229,14 @@ impl StreamContext {
}
}
// Apply any extra headers configured on the provider (e.g., ChatGPT-Account-Id, originator)
let headers = self.llm_provider().headers.clone();
if let Some(headers) = headers {
for (key, value) in &headers {
self.set_http_request_header(key, Some(value));
}
}
Ok(())
}