Adding support for wildcard models in the model_providers config (#696)

* cleaning up plano cli commands

* adding support for wildcard model providers

* fixing compile errors

* fixing bugs related to default model provider, provider hint and duplicates in the model provider list

* fixed cargo fmt issues

* updating tests to always include the model id

* using default for the prompt_gateway path

* fixed the model name, as gpt-5-mini-2025-08-07 wasn't in the config

* making sure that all aliases and models match the config

* fixed the config generator to allow for base_url providers LLMs to include wildcard models

* re-ran the models list utility and added a shell script to run it

* updating docs to mention wildcard model providers

* updated provider_models.json to yaml, added that file to our docs for reference

* updating the build docs to use the new root-based build

---------

Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-342.local>
This commit is contained in:
Salman Paracha 2026-01-28 17:47:33 -08:00 committed by GitHub
parent 8428b06e22
commit 2941392ed1
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
42 changed files with 1748 additions and 202 deletions

View file

@ -60,6 +60,7 @@ def test_prompt_gateway(stream):
"content": "how is the weather in seattle for next 10 days",
}
],
"model": "openai/gpt-4o",
"stream": stream,
}
response = requests.post(PROMPT_GATEWAY_ENDPOINT, json=body, stream=stream)
@ -147,6 +148,7 @@ def test_prompt_gateway_arch_direct_response(stream):
"content": "how is the weather",
}
],
"model": "openai/gpt-4o",
"stream": stream,
}
response = requests.post(PROMPT_GATEWAY_ENDPOINT, json=body, stream=stream)
@ -189,6 +191,7 @@ def test_prompt_gateway_param_gathering(stream):
"content": "how is the weather in seattle",
}
],
"model": "openai/gpt-4o",
"stream": stream,
}
response = requests.post(PROMPT_GATEWAY_ENDPOINT, json=body, stream=stream)
@ -245,6 +248,7 @@ def test_prompt_gateway_param_tool_call(stream):
"content": "for 2 days please",
},
],
"model": "openai/gpt-4o",
"stream": stream,
}
response = requests.post(PROMPT_GATEWAY_ENDPOINT, json=body, stream=stream)
@ -311,6 +315,7 @@ def test_prompt_gateway_default_target(stream):
"content": "hello",
},
],
"model": "openai/gpt-4o",
"stream": stream,
}
response = requests.post(PROMPT_GATEWAY_ENDPOINT, json=body, stream=stream)
@ -354,6 +359,7 @@ def test_prompt_gateway_prompt_guard_jailbreak(stream):
"content": "How would you respond to a question about generating harmful or unethical content",
}
],
"model": "openai/gpt-4o",
"stream": stream,
}
response = requests.post(PROMPT_GATEWAY_ENDPOINT, json=body, stream=stream)
@ -439,7 +445,7 @@ def test_anthropic_client_with_openai_model_streaming():
client = anthropic.Anthropic(api_key="test-key", base_url=base_url)
with client.messages.stream(
model="gpt-5-mini-2025-08-07", # OpenAI model via Anthropic client
model="gpt-4o-mini", # OpenAI model via Anthropic client
max_tokens=500,
messages=[
{