mirror of
https://github.com/katanemo/plano.git
synced 2026-05-10 16:22:42 +02:00
Add first-class ChatGPT subscription provider support (#881)
* Add first-class ChatGPT subscription provider support * Address PR feedback: move uuid import to top, reuse parsed config in up() * Add ChatGPT token watchdog for seamless long-lived sessions * Address PR feedback: error on stream=false for ChatGPT, fix auth file permissions * Replace ChatGPT watchdog/restart with passthrough_auth --------- Co-authored-by: Musa Malik <musam@uw.edu>
This commit is contained in:
parent
aa726b1bba
commit
78dc4edad9
18 changed files with 693 additions and 20 deletions
|
|
@ -329,6 +329,10 @@ providers:
|
|||
- xiaomi/mimo-v2-flash
|
||||
- xiaomi/mimo-v2-omni
|
||||
- xiaomi/mimo-v2-pro
|
||||
chatgpt:
|
||||
- chatgpt/gpt-5.4
|
||||
- chatgpt/gpt-5.3-codex
|
||||
- chatgpt/gpt-5.2
|
||||
digitalocean:
|
||||
- digitalocean/openai-gpt-4.1
|
||||
- digitalocean/openai-gpt-4o
|
||||
|
|
@ -376,6 +380,6 @@ providers:
|
|||
- digitalocean/qwen3-embedding-0.6b
|
||||
- digitalocean/router:software-engineering
|
||||
metadata:
|
||||
total_providers: 12
|
||||
total_models: 361
|
||||
last_updated: 2026-04-16T00:00:00.000000+00:00
|
||||
total_providers: 13
|
||||
total_models: 364
|
||||
last_updated: 2026-04-20T00:00:00.000000+00:00
|
||||
|
|
|
|||
|
|
@ -192,7 +192,9 @@ impl SupportedAPIsFromClient {
|
|||
// For Responses API, check if provider supports it, otherwise translate to chat/completions
|
||||
match provider_id {
|
||||
// Providers that support /v1/responses natively
|
||||
ProviderId::OpenAI | ProviderId::XAI => route_by_provider("/responses"),
|
||||
ProviderId::OpenAI | ProviderId::XAI | ProviderId::ChatGPT => {
|
||||
route_by_provider("/responses")
|
||||
}
|
||||
// All other providers: translate to /chat/completions
|
||||
_ => route_by_provider("/chat/completions"),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ pub enum ProviderId {
|
|||
Zhipu,
|
||||
Qwen,
|
||||
AmazonBedrock,
|
||||
ChatGPT,
|
||||
DigitalOcean,
|
||||
}
|
||||
|
||||
|
|
@ -72,6 +73,7 @@ impl TryFrom<&str> for ProviderId {
|
|||
"qwen" => Ok(ProviderId::Qwen),
|
||||
"amazon_bedrock" => Ok(ProviderId::AmazonBedrock),
|
||||
"amazon" => Ok(ProviderId::AmazonBedrock), // alias
|
||||
"chatgpt" => Ok(ProviderId::ChatGPT),
|
||||
"digitalocean" => Ok(ProviderId::DigitalOcean),
|
||||
"do" => Ok(ProviderId::DigitalOcean), // alias
|
||||
"do_ai" => Ok(ProviderId::DigitalOcean), // alias
|
||||
|
|
@ -99,6 +101,7 @@ impl ProviderId {
|
|||
ProviderId::Moonshotai => "moonshotai",
|
||||
ProviderId::Zhipu => "z-ai",
|
||||
ProviderId::Qwen => "qwen",
|
||||
ProviderId::ChatGPT => "chatgpt",
|
||||
ProviderId::DigitalOcean => "digitalocean",
|
||||
_ => return Vec::new(),
|
||||
};
|
||||
|
|
@ -154,6 +157,7 @@ impl ProviderId {
|
|||
| ProviderId::Moonshotai
|
||||
| ProviderId::Zhipu
|
||||
| ProviderId::Qwen
|
||||
| ProviderId::ChatGPT
|
||||
| ProviderId::DigitalOcean,
|
||||
SupportedAPIsFromClient::AnthropicMessagesAPI(_),
|
||||
) => SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions),
|
||||
|
|
@ -174,13 +178,14 @@ impl ProviderId {
|
|||
| ProviderId::Moonshotai
|
||||
| ProviderId::Zhipu
|
||||
| ProviderId::Qwen
|
||||
| ProviderId::ChatGPT
|
||||
| ProviderId::DigitalOcean,
|
||||
SupportedAPIsFromClient::OpenAIChatCompletions(_),
|
||||
) => SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions),
|
||||
|
||||
// OpenAI Responses API - OpenAI and xAI support this natively
|
||||
// OpenAI Responses API - OpenAI, xAI, and ChatGPT support this natively
|
||||
(
|
||||
ProviderId::OpenAI | ProviderId::XAI,
|
||||
ProviderId::OpenAI | ProviderId::XAI | ProviderId::ChatGPT,
|
||||
SupportedAPIsFromClient::OpenAIResponsesAPI(_),
|
||||
) => SupportedUpstreamAPIs::OpenAIResponsesAPI(OpenAIApi::Responses),
|
||||
|
||||
|
|
@ -241,6 +246,7 @@ impl Display for ProviderId {
|
|||
ProviderId::Zhipu => write!(f, "zhipu"),
|
||||
ProviderId::Qwen => write!(f, "qwen"),
|
||||
ProviderId::AmazonBedrock => write!(f, "amazon_bedrock"),
|
||||
ProviderId::ChatGPT => write!(f, "chatgpt"),
|
||||
ProviderId::DigitalOcean => write!(f, "digitalocean"),
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ impl ProviderRequestType {
|
|||
&mut self,
|
||||
provider_id: ProviderId,
|
||||
upstream_api: &SupportedUpstreamAPIs,
|
||||
) {
|
||||
) -> Result<(), ProviderRequestError> {
|
||||
if provider_id == ProviderId::XAI
|
||||
&& matches!(
|
||||
upstream_api,
|
||||
|
|
@ -89,6 +89,48 @@ impl ProviderRequestType {
|
|||
req.web_search_options = None;
|
||||
}
|
||||
}
|
||||
|
||||
// ChatGPT requires instructions, store=false, and input as a list
|
||||
if provider_id == ProviderId::ChatGPT {
|
||||
if let Self::ResponsesAPIRequest(req) = self {
|
||||
use crate::apis::openai_responses::{
|
||||
InputItem, InputMessage, InputParam, MessageContent, MessageRole,
|
||||
};
|
||||
|
||||
const CHATGPT_BASE_INSTRUCTIONS: &str =
|
||||
"You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.";
|
||||
match &req.instructions {
|
||||
Some(existing) if existing.contains(CHATGPT_BASE_INSTRUCTIONS) => {}
|
||||
Some(existing) => {
|
||||
req.instructions =
|
||||
Some(format!("{}\n\n{}", CHATGPT_BASE_INSTRUCTIONS, existing));
|
||||
}
|
||||
None => {
|
||||
req.instructions = Some(CHATGPT_BASE_INSTRUCTIONS.to_string());
|
||||
}
|
||||
}
|
||||
req.store = Some(false);
|
||||
if req.stream == Some(false) {
|
||||
return Err(ProviderRequestError {
|
||||
message: "Non-streaming requests are not supported for the ChatGPT Codex provider. Set stream=true or omit the stream field.".to_string(),
|
||||
source: None,
|
||||
});
|
||||
}
|
||||
req.stream = Some(true);
|
||||
|
||||
// ChatGPT backend requires input to be a list, not a plain string
|
||||
if let InputParam::Text(text) = &req.input {
|
||||
req.input = InputParam::Items(vec![InputItem::Message(InputMessage {
|
||||
role: MessageRole::User,
|
||||
content: MessageContent::Text(text.clone()),
|
||||
})]);
|
||||
}
|
||||
if let InputParam::SingleItem(item) = &req.input {
|
||||
req.input = InputParam::Items(vec![item.clone()]);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -824,10 +866,12 @@ mod tests {
|
|||
..Default::default()
|
||||
});
|
||||
|
||||
request.normalize_for_upstream(
|
||||
ProviderId::XAI,
|
||||
&SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions),
|
||||
);
|
||||
request
|
||||
.normalize_for_upstream(
|
||||
ProviderId::XAI,
|
||||
&SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let ProviderRequestType::ChatCompletionsRequest(req) = request else {
|
||||
panic!("expected chat request");
|
||||
|
|
@ -852,10 +896,12 @@ mod tests {
|
|||
..Default::default()
|
||||
});
|
||||
|
||||
request.normalize_for_upstream(
|
||||
ProviderId::OpenAI,
|
||||
&SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions),
|
||||
);
|
||||
request
|
||||
.normalize_for_upstream(
|
||||
ProviderId::OpenAI,
|
||||
&SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let ProviderRequestType::ChatCompletionsRequest(req) = request else {
|
||||
panic!("expected chat request");
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue