fixed issue with groq LLMs that require the openai in the /v1/chat/co… (#460)

* fixed issue with groq LLMs that require the openai in the /v1/chat/completions path. My first change

* updated the GH actions with keys for Groq

* adding missing groq API keys

* add llama-3.2-3b-preview to the model based on addin groq to the demo

---------

Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-261.local>
This commit is contained in:
Salman Paracha 2025-04-13 14:00:16 -07:00 committed by GitHub
parent e7b0de2a72
commit f31aa59fac
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 35 additions and 16 deletions

View file

@ -11,7 +11,7 @@ pub const MODEL_SERVER_NAME: &str = "model_server";
pub const ARCH_ROUTING_HEADER: &str = "x-arch-llm-provider";
pub const MESSAGES_KEY: &str = "messages";
pub const ARCH_PROVIDER_HINT_HEADER: &str = "x-arch-llm-provider-hint";
pub const CHAT_COMPLETIONS_PATH: &str = "/v1/chat/completions";
pub const CHAT_COMPLETIONS_PATH: [&str; 2] = ["/v1/chat/completions", "/openai/v1/chat/completions"];
pub const HEALTHZ_PATH: &str = "/healthz";
pub const ARCH_STATE_HEADER: &str = "x-arch-state";
pub const ARCH_FC_MODEL_NAME: &str = "Arch-Function-1.5B";