Add support for other providers - litellm, openrouter

This commit is contained in:
akhisud3195 2025-04-25 23:50:26 +05:30 committed by Ramnique Singh
parent 8c2c21a239
commit 14eee3e0c3
24 changed files with 398 additions and 95 deletions

View file

@ -1,7 +1,7 @@
from flask import Flask, request, jsonify, Response, stream_with_context
from pydantic import BaseModel, ValidationError
from typing import List
from copilot import UserMessage, AssistantMessage, get_response, openai_client
from copilot import UserMessage, AssistantMessage, get_response
from streaming import get_streaming_response
from lib import AgentContext, PromptContext, ToolContext, ChatContext
import os

24
apps/copilot/client.py Normal file
View file

@ -0,0 +1,24 @@
import os
from openai import OpenAI
import dotenv
dotenv.load_dotenv()
PROVIDER_BASE_URL = os.getenv('PROVIDER_BASE_URL', '')
PROVIDER_API_KEY = os.getenv('PROVIDER_API_KEY', os.getenv('OPENAI_API_KEY', ''))
PROVIDER_DEFAULT_MODEL = os.getenv('PROVIDER_DEFAULT_MODEL', 'gpt-4.1')
if not PROVIDER_API_KEY:
raise ValueError("No LLM Provider API key found")
completions_client = None
if PROVIDER_BASE_URL:
print(f"Using provider {PROVIDER_BASE_URL}, for completions")
completions_client = OpenAI(
base_url=PROVIDER_BASE_URL,
api_key=PROVIDER_API_KEY
)
else:
print(f"Using OpenAI directly for completions")
completions_client = OpenAI(
api_key=PROVIDER_API_KEY
)

View file

@ -4,9 +4,8 @@ from pydantic import BaseModel, ValidationError
from typing import List, Dict, Any, Literal
import json
from lib import AgentContext, PromptContext, ToolContext, ChatContext
openai_client = OpenAI()
MODEL_NAME = "gpt-4.1" # OpenAI model name
from client import PROVIDER_DEFAULT_MODEL
from client import completions_client
class UserMessage(BaseModel):
role: Literal["user"]
@ -75,8 +74,8 @@ User: {last_message.content}
message.model_dump() for message in messages
]
response = openai_client.chat.completions.create(
model=MODEL_NAME,
response = completions_client.chat.completions.create(
model=PROVIDER_DEFAULT_MODEL,
messages=updated_msgs,
temperature=0.0,
response_format={"type": "json_object"}

View file

@ -18,6 +18,7 @@ openai==1.61.0
packaging==24.2
pydantic==2.10.3
pydantic_core==2.27.1
python-dotenv
sniffio==1.3.1
tqdm==4.67.1
typing_extensions==4.12.2

View file

@ -4,9 +4,8 @@ from pydantic import BaseModel, ValidationError
from typing import List, Dict, Any, Literal
import json
from lib import AgentContext, PromptContext, ToolContext, ChatContext
openai_client = OpenAI()
MODEL_NAME = "gpt-4.1" # OpenAI model name
from client import PROVIDER_DEFAULT_MODEL
from client import completions_client
class UserMessage(BaseModel):
role: Literal["user"]
@ -90,8 +89,8 @@ User: {last_message.content}
message.model_dump() for message in messages
]
return openai_client.chat.completions.create(
model=MODEL_NAME,
return completions_client.chat.completions.create(
model=PROVIDER_DEFAULT_MODEL,
messages=updated_msgs,
temperature=0.0,
stream=True