Adding support for wildcard models in the model_providers config (#696)

* cleaning up plano cli commands

* adding support for wildcard model providers

* fixing compile errors

* fixing bugs related to default model provider, provider hint and duplicates in the model provider list

* fixed cargo fmt issues

* updating tests to always include the model id

* using default for the prompt_gateway path

* fixed the model name, as gpt-5-mini-2025-08-07 wasn't in the config

* making sure that all aliases and models match the config

* fixed the config generator to allow for base_url providers LLMs to include wildcard models

* re-ran the models list utility and added a shell script to run it

* updating docs to mention wildcard model providers

* updated provider_models.json to yaml, added that file to our docs for reference

* updating the build docs to use the new root-based build

---------

Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-342.local>
This commit is contained in:
Salman Paracha 2026-01-28 17:47:33 -08:00 committed by GitHub
parent 8428b06e22
commit 2941392ed1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
42 changed files with 1748 additions and 202 deletions

View file

@ -0,0 +1,412 @@
// Fetch latest provider models from canonical provider APIs and update provider_models.yaml
// Usage:
// Optional: OPENAI_API_KEY, ANTHROPIC_API_KEY, DEEPSEEK_API_KEY, GROK_API_KEY,
// DASHSCOPE_API_KEY, MOONSHOT_API_KEY, ZHIPU_API_KEY, GOOGLE_API_KEY
// Required: AWS CLI configured for Amazon Bedrock models
// cargo run --bin fetch_models
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
fn main() {
// Default to writing in the same directory as this source file
let default_path = std::path::Path::new(file!())
.parent()
.unwrap()
.join("provider_models.yaml");
let output_path = std::env::args()
.nth(1)
.unwrap_or_else(|| default_path.to_string_lossy().to_string());
println!("Fetching latest models from provider APIs...");
match fetch_all_models() {
Ok(models) => {
let yaml = serde_yaml::to_string(&models).expect("Failed to serialize models");
std::fs::write(&output_path, yaml).expect("Failed to write provider_models.yaml");
println!(
"✓ Successfully updated {} providers ({} models) to {}",
models.metadata.total_providers, models.metadata.total_models, output_path
);
}
Err(e) => {
eprintln!("Error fetching models: {}", e);
eprintln!("\nMake sure required tools are set up:");
eprintln!(" AWS CLI configured for Bedrock (for Amazon models)");
eprintln!(" export OPENAI_API_KEY=your-key-here # Optional");
eprintln!(" export DEEPSEEK_API_KEY=your-key-here # Optional");
eprintln!(" cargo run --bin fetch_models");
std::process::exit(1);
}
}
}
// OpenAI-compatible API response (used by most providers)
#[derive(Debug, Deserialize)]
struct OpenAICompatibleModel {
id: String,
}
#[derive(Debug, Deserialize)]
struct OpenAICompatibleResponse {
data: Vec<OpenAICompatibleModel>,
}
// Google Gemini API response
#[derive(Debug, Deserialize)]
struct GoogleModel {
name: String,
#[serde(rename = "supportedGenerationMethods")]
supported_generation_methods: Option<Vec<String>>,
}
#[derive(Debug, Deserialize)]
struct GoogleResponse {
models: Vec<GoogleModel>,
}
#[derive(Debug, Serialize)]
struct ProviderModels {
version: String,
source: String,
providers: HashMap<String, Vec<String>>,
metadata: Metadata,
}
#[derive(Debug, Serialize)]
struct Metadata {
total_providers: usize,
total_models: usize,
last_updated: String,
}
fn is_text_model(model_id: &str) -> bool {
let id_lower = model_id.to_lowercase();
// Filter out known non-text models
let non_text_patterns = [
"embedding", // Embedding models
"whisper", // Audio transcription
"-tts", // Text-to-speech (with dash to avoid matching in middle of words)
"tts-", // Text-to-speech prefix
"dall-e", // Image generation
"sora", // Video generation
"moderation", // Moderation models
"babbage", // Legacy completion models
"davinci-002", // Legacy completion models
"transcribe", // Audio transcription models
"realtime", // Realtime audio models
"audio", // Audio models (gpt-audio, gpt-audio-mini)
"-image-", // Image generation models (grok-2-image-1212)
"-ocr-", // OCR models
"ocr-", // OCR models prefix
"voxtral", // Audio/voice models
];
// Additional pattern: models that are purely for image generation usually have "image" in the name
// but we need to be careful not to filter vision models that can process images
// Models like "gpt-image-1" or "chatgpt-image-latest" are image generators
// Models like "grok-2-vision" or "gemini-vision" are vision models (text+image->text)
if non_text_patterns
.iter()
.any(|pattern| id_lower.contains(pattern))
{
return false;
}
// Filter models starting with "gpt-image" (image generators)
if id_lower.contains("/gpt-image") || id_lower.contains("/chatgpt-image") {
return false;
}
true
}
fn fetch_openai_compatible_models(
api_url: &str,
api_key: &str,
provider_prefix: &str,
) -> Result<Vec<String>, Box<dyn std::error::Error>> {
let response_body = ureq::get(api_url)
.header("Authorization", &format!("Bearer {}", api_key))
.call()?
.body_mut()
.read_to_string()?;
let response: OpenAICompatibleResponse = serde_json::from_str(&response_body)?;
Ok(response
.data
.into_iter()
.filter(|m| is_text_model(&m.id))
.map(|m| format!("{}/{}", provider_prefix, m.id))
.collect())
}
fn fetch_anthropic_models(api_key: &str) -> Result<Vec<String>, Box<dyn std::error::Error>> {
let response_body = ureq::get("https://api.anthropic.com/v1/models")
.header("x-api-key", api_key)
.header("anthropic-version", "2023-06-01")
.call()?
.body_mut()
.read_to_string()?;
let response: OpenAICompatibleResponse = serde_json::from_str(&response_body)?;
let dated_models: Vec<String> = response
.data
.into_iter()
.filter(|m| is_text_model(&m.id))
.map(|m| m.id)
.collect();
let mut models: Vec<String> = Vec::new();
// Add both dated versions and their aliases (without the -YYYYMMDD suffix)
for model_id in dated_models {
// Add the full dated model ID
models.push(format!("anthropic/{}", model_id));
// Generate alias by removing trailing -YYYYMMDD pattern
// Pattern: ends with -YYYYMMDD where YYYY is year, MM is month, DD is day
if let Some(date_pos) = model_id.rfind('-') {
let potential_date = &model_id[date_pos + 1..];
// Check if it's an 8-digit date (YYYYMMDD)
if potential_date.len() == 8 && potential_date.chars().all(|c| c.is_ascii_digit()) {
let alias = &model_id[..date_pos];
let alias_full = format!("anthropic/{}", alias);
// Only add if not already present
if !models.contains(&alias_full) {
models.push(alias_full);
}
}
}
}
Ok(models)
}
fn fetch_google_models(api_key: &str) -> Result<Vec<String>, Box<dyn std::error::Error>> {
let api_url = format!(
"https://generativelanguage.googleapis.com/v1beta/models?key={}",
api_key
);
let response_body = ureq::get(&api_url).call()?.body_mut().read_to_string()?;
let response: GoogleResponse = serde_json::from_str(&response_body)?;
// Only include models that support generateContent
Ok(response
.models
.into_iter()
.filter(|m| {
m.supported_generation_methods
.as_ref()
.is_some_and(|methods| methods.contains(&"generateContent".to_string()))
})
.map(|m| {
// Convert "models/gemini-pro" to "google/gemini-pro"
let model_id = m.name.strip_prefix("models/").unwrap_or(&m.name);
format!("google/{}", model_id)
})
.collect())
}
fn fetch_bedrock_amazon_models() -> Result<Vec<String>, Box<dyn std::error::Error>> {
// Use AWS CLI to fetch Amazon models from Bedrock
let output = std::process::Command::new("aws")
.args([
"bedrock",
"list-foundation-models",
"--by-provider",
"amazon",
"--by-output-modality",
"TEXT",
"--no-cli-pager",
"--output",
"json",
])
.output()?;
if !output.status.success() {
return Err(format!(
"AWS CLI command failed: {}",
String::from_utf8_lossy(&output.stderr)
)
.into());
}
let response_body = String::from_utf8(output.stdout)?;
#[derive(Debug, Deserialize)]
struct BedrockModelSummary {
#[serde(rename = "modelId")]
model_id: String,
}
#[derive(Debug, Deserialize)]
struct BedrockResponse {
#[serde(rename = "modelSummaries")]
model_summaries: Vec<BedrockModelSummary>,
}
let bedrock_response: BedrockResponse = serde_json::from_str(&response_body)?;
// Filter out embedding, image generation, and rerank models
let amazon_models: Vec<String> = bedrock_response
.model_summaries
.into_iter()
.filter(|model| {
let id_lower = model.model_id.to_lowercase();
!id_lower.contains("embed")
&& !id_lower.contains("image")
&& !id_lower.contains("rerank")
})
.map(|m| format!("amazon/{}", m.model_id))
.collect();
Ok(amazon_models)
}
fn fetch_all_models() -> Result<ProviderModels, Box<dyn std::error::Error>> {
let mut providers: HashMap<String, Vec<String>> = HashMap::new();
let mut errors: Vec<String> = Vec::new();
// Configuration: provider name, env var, API URL, prefix for model IDs
let provider_configs = vec![
(
"openai",
"OPENAI_API_KEY",
"https://api.openai.com/v1/models",
"openai",
),
(
"mistralai",
"MISTRAL_API_KEY",
"https://api.mistral.ai/v1/models",
"mistralai",
),
(
"deepseek",
"DEEPSEEK_API_KEY",
"https://api.deepseek.com/v1/models",
"deepseek",
),
("x-ai", "GROK_API_KEY", "https://api.x.ai/v1/models", "x-ai"),
(
"moonshotai",
"MOONSHOT_API_KEY",
"https://api.moonshot.ai/v1/models",
"moonshotai",
),
(
"qwen",
"DASHSCOPE_API_KEY",
"https://dashscope-intl.aliyuncs.com/compatible-mode/v1/models",
"qwen",
),
(
"z-ai",
"ZHIPU_API_KEY",
"https://open.bigmodel.cn/api/paas/v4/models",
"z-ai",
),
];
// Fetch from OpenAI-compatible providers
for (provider_name, env_var, api_url, prefix) in provider_configs {
if let Ok(api_key) = std::env::var(env_var) {
match fetch_openai_compatible_models(api_url, &api_key, prefix) {
Ok(models) => {
println!("{}: {} models", provider_name, models.len());
providers.insert(provider_name.to_string(), models);
}
Err(e) => {
let err_msg = format!("{}: {}", provider_name, e);
eprintln!("{}", err_msg);
errors.push(err_msg);
}
}
} else {
println!("{}: {} not set (skipped)", provider_name, env_var);
}
}
// Fetch Anthropic models (different authentication)
if let Ok(api_key) = std::env::var("ANTHROPIC_API_KEY") {
match fetch_anthropic_models(&api_key) {
Ok(models) => {
println!(" ✓ anthropic: {} models", models.len());
providers.insert("anthropic".to_string(), models);
}
Err(e) => {
let err_msg = format!(" ✗ anthropic: {}", e);
eprintln!("{}", err_msg);
errors.push(err_msg);
}
}
} else {
println!(" ⊘ anthropic: ANTHROPIC_API_KEY not set (skipped)");
}
// Fetch Google models (different API format)
if let Ok(api_key) = std::env::var("GOOGLE_API_KEY") {
match fetch_google_models(&api_key) {
Ok(models) => {
println!(" ✓ google: {} models", models.len());
providers.insert("google".to_string(), models);
}
Err(e) => {
let err_msg = format!(" ✗ google: {}", e);
eprintln!("{}", err_msg);
errors.push(err_msg);
}
}
} else {
println!(" ⊘ google: GOOGLE_API_KEY not set (skipped)");
}
// Fetch Amazon models from AWS Bedrock
match fetch_bedrock_amazon_models() {
Ok(models) => {
println!(" ✓ amazon: {} models (via AWS Bedrock)", models.len());
providers.insert("amazon".to_string(), models);
}
Err(e) => {
let err_msg = format!(" ✗ amazon: {} (AWS Bedrock required)", e);
eprintln!("{}", err_msg);
errors.push(err_msg);
}
}
if providers.is_empty() {
return Err("No models fetched from any provider. Check API keys.".into());
}
let total_providers = providers.len();
let total_models: usize = providers.values().map(|v| v.len()).sum();
println!(
"\n✅ Successfully fetched models from {} providers",
total_providers
);
if !errors.is_empty() {
println!("⚠️ {} providers failed", errors.len());
}
Ok(ProviderModels {
version: "1.0".to_string(),
source: "canonical-apis".to_string(),
providers,
metadata: Metadata {
total_providers,
total_models,
last_updated: chrono::Utc::now().to_rfc3339(),
},
})
}

View file

@ -0,0 +1,315 @@
version: '1.0'
source: canonical-apis
providers:
qwen:
- qwen/qwen3-max-2026-01-23
- qwen/qwen-plus-character
- qwen/qwen-flash-character
- qwen/qwen-flash
- qwen/qwen3-vl-plus-2025-12-19
- qwen/qwen3-omni-flash-2025-12-01
- qwen/qwen3-livetranslate-flash-2025-12-01
- qwen/qwen3-livetranslate-flash
- qwen/qwen-mt-lite
- qwen/qwen-plus-2025-12-01
- qwen/qwen-mt-flash
- qwen/ccai-pro
- qwen/tongyi-tingwu-slp
- qwen/qwen3-vl-flash
- qwen/qwen3-vl-flash-2025-10-15
- qwen/qwen3-omni-flash
- qwen/qwen3-omni-flash-2025-09-15
- qwen/qwen3-omni-30b-a3b-captioner
- qwen/qwen2.5-7b-instruct
- qwen/qwen2.5-14b-instruct
- qwen/qwen2.5-32b-instruct
- qwen/qwen2.5-72b-instruct
- qwen/qwen2.5-14b-instruct-1m
- qwen/qwen2.5-7b-instruct-1m
- qwen/qwen-max-2025-01-25
- qwen/qwen-max-latest
- qwen/qwen-turbo-2024-11-01
- qwen/qwen-turbo-latest
- qwen/qwen-plus-latest
- qwen/qwen-plus-2025-01-25
- qwen/qwq-plus-2025-03-05
- qwen/qwen-mt-turbo
- qwen/qwen-mt-plus
- qwen/qwen-coder-plus
- qwen/qwq-plus
- qwen/qwen2.5-vl-32b-instruct
- qwen/qvq-max
- qwen/qwen-omni-turbo
- qwen/qwen3-8b
- qwen/qwen3-30b-a3b
- qwen/qwen3-235b-a22b
- qwen/qwen-turbo-2025-04-28
- qwen/qwen-plus-2025-04-28
- qwen/qwen-vl-max-2025-04-08
- qwen/qwen-vl-plus-2025-01-25
- qwen/qwen-vl-plus-latest
- qwen/qwen-vl-max-latest
- qwen/qwen-vl-plus-2025-05-07
- qwen/qwen3-coder-plus
- qwen/qwen3-coder-480b-a35b-instruct
- qwen/qwen3-235b-a22b-instruct-2507
- qwen/qwen-plus-2025-07-14
- qwen/qwen3-coder-plus-2025-07-22
- qwen/qwen3-235b-a22b-thinking-2507
- qwen/qwen3-coder-flash
- qwen/qwen-vl-max
- qwen/qwen-vl-max-2025-08-13
- qwen/qwen3-max
- qwen/qwen3-max-2025-09-23
- qwen/qwen3-vl-plus
- qwen/qwen3-vl-235b-a22b-instruct
- qwen/qwen3-vl-235b-a22b-thinking
- qwen/qwen3-30b-a3b-thinking-2507
- qwen/qwen3-30b-a3b-instruct-2507
- qwen/qwen3-14b
- qwen/qwen3-32b
- qwen/qwen3-0.6b
- qwen/qwen3-4b
- qwen/qwen3-1.7b
- qwen/qwen-vl-plus
- qwen/qwen3-coder-plus-2025-09-23
- qwen/qwen3-vl-plus-2025-09-23
- qwen/qwen-plus-2025-09-11
- qwen/qwen3-next-80b-a3b-thinking
- qwen/qwen3-next-80b-a3b-instruct
- qwen/qwen3-max-preview
- qwen/qwen2-7b-instruct
- qwen/qwen-max
- qwen/qwen-plus
- qwen/qwen-turbo
openai:
- openai/gpt-4-0613
- openai/gpt-4
- openai/gpt-3.5-turbo
- openai/gpt-5.2-codex
- openai/gpt-3.5-turbo-instruct
- openai/gpt-3.5-turbo-instruct-0914
- openai/gpt-4-1106-preview
- openai/gpt-3.5-turbo-1106
- openai/gpt-4-0125-preview
- openai/gpt-4-turbo-preview
- openai/gpt-3.5-turbo-0125
- openai/gpt-4-turbo
- openai/gpt-4-turbo-2024-04-09
- openai/gpt-4o
- openai/gpt-4o-2024-05-13
- openai/gpt-4o-mini-2024-07-18
- openai/gpt-4o-mini
- openai/gpt-4o-2024-08-06
- openai/chatgpt-4o-latest
- openai/o1-2024-12-17
- openai/o1
- openai/computer-use-preview
- openai/o3-mini
- openai/o3-mini-2025-01-31
- openai/gpt-4o-2024-11-20
- openai/computer-use-preview-2025-03-11
- openai/gpt-4o-search-preview-2025-03-11
- openai/gpt-4o-search-preview
- openai/gpt-4o-mini-search-preview-2025-03-11
- openai/gpt-4o-mini-search-preview
- openai/o1-pro-2025-03-19
- openai/o1-pro
- openai/o3-2025-04-16
- openai/o4-mini-2025-04-16
- openai/o3
- openai/o4-mini
- openai/gpt-4.1-2025-04-14
- openai/gpt-4.1
- openai/gpt-4.1-mini-2025-04-14
- openai/gpt-4.1-mini
- openai/gpt-4.1-nano-2025-04-14
- openai/gpt-4.1-nano
- openai/codex-mini-latest
- openai/o3-pro
- openai/o3-pro-2025-06-10
- openai/o4-mini-deep-research
- openai/o3-deep-research
- openai/o3-deep-research-2025-06-26
- openai/o4-mini-deep-research-2025-06-26
- openai/gpt-5-chat-latest
- openai/gpt-5-2025-08-07
- openai/gpt-5
- openai/gpt-5-mini-2025-08-07
- openai/gpt-5-mini
- openai/gpt-5-nano-2025-08-07
- openai/gpt-5-nano
- openai/gpt-5-codex
- openai/gpt-5-pro-2025-10-06
- openai/gpt-5-pro
- openai/gpt-5-search-api
- openai/gpt-5-search-api-2025-10-14
- openai/gpt-5.1-chat-latest
- openai/gpt-5.1-2025-11-13
- openai/gpt-5.1
- openai/gpt-5.1-codex
- openai/gpt-5.1-codex-mini
- openai/gpt-5.1-codex-max
- openai/gpt-5.2-2025-12-11
- openai/gpt-5.2
- openai/gpt-5.2-pro-2025-12-11
- openai/gpt-5.2-pro
- openai/gpt-5.2-chat-latest
- openai/gpt-3.5-turbo-16k
- openai/ft:gpt-3.5-turbo-0613:katanemo::8CMZbm0P
google:
- google/gemini-2.5-flash
- google/gemini-2.5-pro
- google/gemini-2.0-flash-exp
- google/gemini-2.0-flash
- google/gemini-2.0-flash-001
- google/gemini-2.0-flash-exp-image-generation
- google/gemini-2.0-flash-lite-001
- google/gemini-2.0-flash-lite
- google/gemini-2.0-flash-lite-preview-02-05
- google/gemini-2.0-flash-lite-preview
- google/gemini-exp-1206
- google/gemini-2.5-flash-preview-tts
- google/gemini-2.5-pro-preview-tts
- google/gemma-3-1b-it
- google/gemma-3-4b-it
- google/gemma-3-12b-it
- google/gemma-3-27b-it
- google/gemma-3n-e4b-it
- google/gemma-3n-e2b-it
- google/gemini-flash-latest
- google/gemini-flash-lite-latest
- google/gemini-pro-latest
- google/gemini-2.5-flash-lite
- google/gemini-2.5-flash-image
- google/gemini-2.5-flash-preview-09-2025
- google/gemini-2.5-flash-lite-preview-09-2025
- google/gemini-3-pro-preview
- google/gemini-3-flash-preview
- google/gemini-3-pro-image-preview
- google/nano-banana-pro-preview
- google/gemini-robotics-er-1.5-preview
- google/gemini-2.5-computer-use-preview-10-2025
- google/deep-research-pro-preview-12-2025
mistralai:
- mistralai/mistral-medium-2505
- mistralai/mistral-medium-2508
- mistralai/mistral-medium-latest
- mistralai/mistral-medium
- mistralai/open-mistral-nemo
- mistralai/open-mistral-nemo-2407
- mistralai/mistral-tiny-2407
- mistralai/mistral-tiny-latest
- mistralai/mistral-large-2411
- mistralai/pixtral-large-2411
- mistralai/pixtral-large-latest
- mistralai/mistral-large-pixtral-2411
- mistralai/codestral-2508
- mistralai/codestral-latest
- mistralai/devstral-small-2507
- mistralai/devstral-medium-2507
- mistralai/devstral-2512
- mistralai/mistral-vibe-cli-latest
- mistralai/devstral-medium-latest
- mistralai/devstral-latest
- mistralai/labs-devstral-small-2512
- mistralai/devstral-small-latest
- mistralai/mistral-small-2506
- mistralai/mistral-small-latest
- mistralai/labs-mistral-small-creative
- mistralai/magistral-medium-2509
- mistralai/magistral-medium-latest
- mistralai/magistral-small-2509
- mistralai/magistral-small-latest
- mistralai/mistral-large-2512
- mistralai/mistral-large-latest
- mistralai/ministral-3b-2512
- mistralai/ministral-3b-latest
- mistralai/ministral-8b-2512
- mistralai/ministral-8b-latest
- mistralai/ministral-14b-2512
- mistralai/ministral-14b-latest
- mistralai/open-mistral-7b
- mistralai/mistral-tiny
- mistralai/mistral-tiny-2312
- mistralai/pixtral-12b-2409
- mistralai/pixtral-12b
- mistralai/pixtral-12b-latest
- mistralai/ministral-3b-2410
- mistralai/ministral-8b-2410
- mistralai/codestral-2501
- mistralai/codestral-2412
- mistralai/codestral-2411-rc5
- mistralai/mistral-small-2501
- mistralai/mistral-embed-2312
- mistralai/mistral-embed
- mistralai/codestral-embed
- mistralai/codestral-embed-2505
z-ai:
- z-ai/glm-4.5
- z-ai/glm-4.5-air
- z-ai/glm-4.6
- z-ai/glm-4.7
amazon:
- amazon/amazon.nova-pro-v1:0
- amazon/amazon.nova-2-lite-v1:0
- amazon/amazon.nova-2-sonic-v1:0
- amazon/amazon.titan-tg1-large
- amazon/amazon.nova-premier-v1:0:8k
- amazon/amazon.nova-premier-v1:0:20k
- amazon/amazon.nova-premier-v1:0:1000k
- amazon/amazon.nova-premier-v1:0:mm
- amazon/amazon.nova-premier-v1:0
- amazon/amazon.nova-lite-v1:0
- amazon/amazon.nova-micro-v1:0
deepseek:
- deepseek/deepseek-chat
- deepseek/deepseek-reasoner
x-ai:
- x-ai/grok-2-vision-1212
- x-ai/grok-3
- x-ai/grok-3-mini
- x-ai/grok-4-0709
- x-ai/grok-4-1-fast-non-reasoning
- x-ai/grok-4-1-fast-reasoning
- x-ai/grok-4-fast-non-reasoning
- x-ai/grok-4-fast-reasoning
- x-ai/grok-code-fast-1
moonshotai:
- moonshotai/kimi-latest
- moonshotai/kimi-k2.5
- moonshotai/moonshot-v1-8k-vision-preview
- moonshotai/kimi-k2-thinking
- moonshotai/moonshot-v1-auto
- moonshotai/kimi-k2-0711-preview
- moonshotai/moonshot-v1-32k
- moonshotai/kimi-k2-thinking-turbo
- moonshotai/kimi-k2-0905-preview
- moonshotai/moonshot-v1-128k
- moonshotai/moonshot-v1-32k-vision-preview
- moonshotai/moonshot-v1-128k-vision-preview
- moonshotai/kimi-k2-turbo-preview
- moonshotai/moonshot-v1-8k
anthropic:
- anthropic/claude-opus-4-5-20251101
- anthropic/claude-opus-4-5
- anthropic/claude-haiku-4-5-20251001
- anthropic/claude-haiku-4-5
- anthropic/claude-sonnet-4-5-20250929
- anthropic/claude-sonnet-4-5
- anthropic/claude-opus-4-1-20250805
- anthropic/claude-opus-4-1
- anthropic/claude-opus-4-20250514
- anthropic/claude-opus-4
- anthropic/claude-sonnet-4-20250514
- anthropic/claude-sonnet-4
- anthropic/claude-3-7-sonnet-20250219
- anthropic/claude-3-7-sonnet
- anthropic/claude-3-5-haiku-20241022
- anthropic/claude-3-5-haiku
- anthropic/claude-3-haiku-20240307
- anthropic/claude-3-haiku
metadata:
total_providers: 10
total_models: 298
last_updated: 2026-01-27T22:40:53.653700+00:00

View file

@ -0,0 +1,15 @@
#!/bin/bash
set -e
# Get the directory where this script is located
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
# Navigate to crates directory (bin -> src -> hermesllm -> crates)
cd "$SCRIPT_DIR/../../.."
# Load environment variables silently and run fetch_models
set -a
source hermesllm/src/bin/.env
set +a
cargo run --bin fetch_models --features model-fetch

View file

@ -29,10 +29,27 @@ mod tests {
#[test]
fn test_provider_id_conversion() {
assert_eq!(ProviderId::from("openai"), ProviderId::OpenAI);
assert_eq!(ProviderId::from("mistral"), ProviderId::Mistral);
assert_eq!(ProviderId::from("groq"), ProviderId::Groq);
assert_eq!(ProviderId::from("arch"), ProviderId::Arch);
assert_eq!(ProviderId::try_from("openai").unwrap(), ProviderId::OpenAI);
assert_eq!(
ProviderId::try_from("mistral").unwrap(),
ProviderId::Mistral
);
assert_eq!(ProviderId::try_from("groq").unwrap(), ProviderId::Groq);
assert_eq!(ProviderId::try_from("arch").unwrap(), ProviderId::Arch);
// Test aliases
assert_eq!(ProviderId::try_from("google").unwrap(), ProviderId::Gemini);
assert_eq!(
ProviderId::try_from("together").unwrap(),
ProviderId::TogetherAI
);
assert_eq!(
ProviderId::try_from("amazon").unwrap(),
ProviderId::AmazonBedrock
);
// Test error case
assert!(ProviderId::try_from("unknown_provider").is_err());
}
#[test]

View file

@ -1,6 +1,28 @@
use crate::apis::{AmazonBedrockApi, AnthropicApi, OpenAIApi};
use crate::clients::endpoints::{SupportedAPIsFromClient, SupportedUpstreamAPIs};
use serde::Deserialize;
use std::collections::HashMap;
use std::fmt::Display;
use std::sync::OnceLock;
static PROVIDER_MODELS_YAML: &str = include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/src/bin/provider_models.yaml"
));
#[derive(Deserialize)]
struct ProviderModelsFile {
providers: HashMap<String, Vec<String>>,
}
fn load_provider_models() -> &'static HashMap<String, Vec<String>> {
static MODELS: OnceLock<HashMap<String, Vec<String>>> = OnceLock::new();
MODELS.get_or_init(|| {
let ProviderModelsFile { providers } = serde_yaml::from_str(PROVIDER_MODELS_YAML)
.expect("Failed to parse provider_models.yaml");
providers
})
}
/// Provider identifier enum - simple enum for identifying providers
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
@ -23,31 +45,70 @@ pub enum ProviderId {
AmazonBedrock,
}
impl From<&str> for ProviderId {
fn from(value: &str) -> Self {
impl TryFrom<&str> for ProviderId {
type Error = String;
fn try_from(value: &str) -> Result<Self, Self::Error> {
match value.to_lowercase().as_str() {
"openai" => ProviderId::OpenAI,
"mistral" => ProviderId::Mistral,
"deepseek" => ProviderId::Deepseek,
"groq" => ProviderId::Groq,
"gemini" => ProviderId::Gemini,
"anthropic" => ProviderId::Anthropic,
"github" => ProviderId::GitHub,
"arch" => ProviderId::Arch,
"azure_openai" => ProviderId::AzureOpenAI,
"xai" => ProviderId::XAI,
"together_ai" => ProviderId::TogetherAI,
"ollama" => ProviderId::Ollama,
"moonshotai" => ProviderId::Moonshotai,
"zhipu" => ProviderId::Zhipu,
"qwen" => ProviderId::Qwen, // alias for Qwen
"amazon_bedrock" => ProviderId::AmazonBedrock,
_ => panic!("Unknown provider: {}", value),
"openai" => Ok(ProviderId::OpenAI),
"mistral" => Ok(ProviderId::Mistral),
"deepseek" => Ok(ProviderId::Deepseek),
"groq" => Ok(ProviderId::Groq),
"gemini" => Ok(ProviderId::Gemini),
"google" => Ok(ProviderId::Gemini), // alias
"anthropic" => Ok(ProviderId::Anthropic),
"github" => Ok(ProviderId::GitHub),
"arch" => Ok(ProviderId::Arch),
"azure_openai" => Ok(ProviderId::AzureOpenAI),
"xai" => Ok(ProviderId::XAI),
"together_ai" => Ok(ProviderId::TogetherAI),
"together" => Ok(ProviderId::TogetherAI), // alias
"ollama" => Ok(ProviderId::Ollama),
"moonshotai" => Ok(ProviderId::Moonshotai),
"zhipu" => Ok(ProviderId::Zhipu),
"qwen" => Ok(ProviderId::Qwen),
"amazon_bedrock" => Ok(ProviderId::AmazonBedrock),
"amazon" => Ok(ProviderId::AmazonBedrock), // alias
_ => Err(format!("Unknown provider: {}", value)),
}
}
}
impl ProviderId {
/// Get all available models for this provider
/// Returns model names without the provider prefix (e.g., "gpt-4" not "openai/gpt-4")
pub fn models(&self) -> Vec<String> {
let provider_key = match self {
ProviderId::AmazonBedrock => "amazon",
ProviderId::AzureOpenAI => "openai",
ProviderId::TogetherAI => "together",
ProviderId::Gemini => "google",
ProviderId::OpenAI => "openai",
ProviderId::Anthropic => "anthropic",
ProviderId::Mistral => "mistralai",
ProviderId::Deepseek => "deepseek",
ProviderId::Groq => "groq",
ProviderId::XAI => "x-ai",
ProviderId::Moonshotai => "moonshotai",
ProviderId::Zhipu => "z-ai",
ProviderId::Qwen => "qwen",
_ => return Vec::new(),
};
load_provider_models()
.get(provider_key)
.map(|models| {
models
.iter()
.filter_map(|model| {
// Strip provider prefix (e.g., "openai/gpt-4" -> "gpt-4")
model.split_once('/').map(|(_, name)| name.to_string())
})
.collect()
})
.unwrap_or_default()
}
/// Given a client API, return the compatible upstream API for this provider
pub fn compatible_api_for_client(
&self,
@ -169,3 +230,102 @@ impl Display for ProviderId {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_models_loaded_from_yaml() {
// Test that we can load models for each supported provider
let openai_models = ProviderId::OpenAI.models();
assert!(!openai_models.is_empty(), "OpenAI should have models");
let anthropic_models = ProviderId::Anthropic.models();
assert!(!anthropic_models.is_empty(), "Anthropic should have models");
let mistral_models = ProviderId::Mistral.models();
assert!(!mistral_models.is_empty(), "Mistral should have models");
let deepseek_models = ProviderId::Deepseek.models();
assert!(!deepseek_models.is_empty(), "Deepseek should have models");
let gemini_models = ProviderId::Gemini.models();
assert!(!gemini_models.is_empty(), "Gemini should have models");
}
#[test]
fn test_model_names_without_provider_prefix() {
// Test that model names don't include the provider/ prefix
let openai_models = ProviderId::OpenAI.models();
for model in &openai_models {
assert!(
!model.contains('/'),
"Model name '{}' should not contain provider prefix",
model
);
}
let anthropic_models = ProviderId::Anthropic.models();
for model in &anthropic_models {
assert!(
!model.contains('/'),
"Model name '{}' should not contain provider prefix",
model
);
}
}
#[test]
fn test_specific_models_exist() {
// Test that specific well-known models are present
let openai_models = ProviderId::OpenAI.models();
let has_gpt4 = openai_models.iter().any(|m| m.contains("gpt-4"));
assert!(has_gpt4, "OpenAI models should include GPT-4 variants");
let anthropic_models = ProviderId::Anthropic.models();
let has_claude = anthropic_models.iter().any(|m| m.contains("claude"));
assert!(
has_claude,
"Anthropic models should include Claude variants"
);
}
#[test]
fn test_unsupported_providers_return_empty() {
// Providers without models should return empty vec
let github_models = ProviderId::GitHub.models();
assert!(
github_models.is_empty(),
"GitHub should return empty models list"
);
let ollama_models = ProviderId::Ollama.models();
assert!(
ollama_models.is_empty(),
"Ollama should return empty models list"
);
}
#[test]
fn test_provider_name_mapping() {
// Test that provider key mappings work correctly
let xai_models = ProviderId::XAI.models();
assert!(
!xai_models.is_empty(),
"XAI should have models (mapped to x-ai)"
);
let zhipu_models = ProviderId::Zhipu.models();
assert!(
!zhipu_models.is_empty(),
"Zhipu should have models (mapped to z-ai)"
);
let amazon_models = ProviderId::AmazonBedrock.models();
assert!(
!amazon_models.is_empty(),
"AmazonBedrock should have models (mapped to amazon)"
);
}
}