llm listener split (#155)

This commit is contained in:
Adil Hafeez 2024-10-09 15:47:32 -07:00 committed by GitHub
parent 8b5db45507
commit e81ca8d5cf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 305 additions and 54 deletions

View file

@ -7,16 +7,12 @@ from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY")
CHAT_COMPLETION_ENDPOINT = os.getenv("CHAT_COMPLETION_ENDPOINT")
MODEL_NAME = os.getenv("MODEL_NAME", "gpt-3.5-turbo")
ARCH_STATE_HEADER = "x-arch-state"
log.info("CHAT_COMPLETION_ENDPOINT: ", CHAT_COMPLETION_ENDPOINT)
client = OpenAI(
api_key=OPENAI_API_KEY,
api_key="--",
base_url=CHAT_COMPLETION_ENDPOINT,
http_client=DefaultHttpxClient(headers={"accept-encoding": "*"}),
)
@ -31,8 +27,6 @@ def predict(message, state):
# Custom headers
custom_headers = {
"x-arch-openai-api-key": f"{OPENAI_API_KEY}",
"x-arch-mistral-api-key": f"{MISTRAL_API_KEY}",
"x-arch-deterministic-provider": "openai",
}
@ -42,7 +36,7 @@ def predict(message, state):
try:
raw_response = client.chat.completions.with_raw_response.create(
model=MODEL_NAME,
model="--",
messages=history,
temperature=1.0,
metadata=metadata,