Adding support for wildcard models in the model_providers config (#696)

* cleaning up plano cli commands

* adding support for wildcard model providers

* fixing compile errors

* fixing bugs related to default model provider, provider hint and duplicates in the model provider list

* fixed cargo fmt issues

* updating tests to always include the model id

* using default for the prompt_gateway path

* fixed the model name, as gpt-5-mini-2025-08-07 wasn't in the config

* making sure that all aliases and models match the config

* fixed the config generator to allow for base_url providers LLMs to include wildcard models

* re-ran the models list utility and added a shell script to run it

* updating docs to mention wildcard model providers

* updated provider_models.json to yaml, added that file to our docs for reference

* updating the build docs to use the new root-based build

---------

Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-342.local>
This commit is contained in:
Salman Paracha 2026-01-28 17:47:33 -08:00 committed by GitHub
parent 8428b06e22
commit 2941392ed1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
42 changed files with 1748 additions and 202 deletions

View file

@ -151,16 +151,15 @@ pub async fn router_chat_get_upstream_model(
Ok(RoutingResult { model_name })
}
None => {
// No route determined, use default model from request
// No route determined, return sentinel value "none"
// This signals to llm.rs to use the original validated request model
info!(
"[PLANO_REQ_ID: {}] | ROUTER_REQ | No route determined, using default model from request: {}",
request_id,
chat_request.model
"[PLANO_REQ_ID: {}] | ROUTER_REQ | No route determined, returning sentinel 'none'",
request_id
);
let default_model = chat_request.model.clone();
let mut attrs = HashMap::new();
attrs.insert("route.selected_model".to_string(), default_model.clone());
attrs.insert("route.selected_model".to_string(), "none".to_string());
record_routing_span(
trace_collector,
traceparent,
@ -171,7 +170,7 @@ pub async fn router_chat_get_upstream_model(
.await;
Ok(RoutingResult {
model_name: default_model,
model_name: "none".to_string(),
})
}
},