fixed issue with groq LLMs that require the openai in the /v1/chat/co… (#460)

* fixed issue with groq LLMs that require the openai in the /v1/chat/completions path. My first change

* updated the GH actions with keys for Groq

* adding missing groq API keys

* add llama-3.2-3b-preview to the model based on addin groq to the demo

---------

Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-261.local>
This commit is contained in:
Salman Paracha 2025-04-13 14:00:16 -07:00 committed by GitHub
parent e7b0de2a72
commit f31aa59fac
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 35 additions and 16 deletions

View file

@ -17,17 +17,13 @@ overrides:
prompt_target_intent_matching_threshold: 0.6
llm_providers:
- name: gpt-4o-mini
access_key: $OPENAI_API_KEY
- name: groq
access_key: $GROQ_API_KEY
provider_interface: openai
model: gpt-4o-mini
model: llama-3.2-3b-preview
base_url: https://api.groq.com
default: true
- name: gpt-3.5-turbo-0125
access_key: $OPENAI_API_KEY
provider_interface: openai
model: gpt-3.5-turbo-0125
- name: gpt-4o
access_key: $OPENAI_API_KEY
provider_interface: openai

View file

@ -19,3 +19,5 @@ services:
- CHAT_COMPLETION_ENDPOINT=http://host.docker.internal:10000/v1
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- ./arch_config.yaml:/app/arch_config.yaml