Integrate Arch-Function-Calling-1.5B model (#85)

* add arch support

* add missing file

* e2e tests

* delete old files and fix response

* fmt
This commit is contained in:
Adil Hafeez 2024-09-25 23:30:50 -07:00 committed by GitHub
parent 9ea6bb0d73
commit 3511798fa8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 203 additions and 427 deletions

View file

@ -1,6 +1,6 @@
import json
import random
from fastapi import FastAPI, Response
from bolt_handler import BoltHandler
from arch_handler import ArchHandler
from common import ChatMessage
import logging
@ -8,15 +8,15 @@ from openai import OpenAI
import os
ollama_endpoint = os.getenv("OLLAMA_ENDPOINT", "localhost")
ollama_model = os.getenv("OLLAMA_MODEL", "Bolt-Function-Calling-1B:Q4_K_M")
ollama_model = os.getenv("OLLAMA_MODEL", "Arch-Function-Calling-1.5B-Q4_K_M")
logger = logging.getLogger('uvicorn.error')
logger.info(f"using model: {ollama_model}")
logger.info(f"using ollama endpoint: {ollama_endpoint}")
app = FastAPI()
bolt_handler = BoltHandler()
arch_handler = ArchHandler()
handler = ArchHandler()
client = OpenAI(
base_url='http://{}:11434/v1/'.format(ollama_endpoint),
@ -35,10 +35,6 @@ async def healthz():
@app.post("/v1/chat/completions")
async def chat_completion(req: ChatMessage, res: Response):
logger.info("starting request")
if ollama_model.startswith("Bolt"):
handler = bolt_handler
else:
handler = arch_handler
tools_encoded = handler._format_system(req.tools)
# append system prompt with tools to messages
messages = [{"role": "system", "content": tools_encoded}]
@ -46,5 +42,21 @@ async def chat_completion(req: ChatMessage, res: Response):
messages.append({"role": message.role, "content": message.content})
logger.info(f"request model: {ollama_model}, messages: {json.dumps(messages)}")
resp = client.chat.completions.create(messages=messages, model=ollama_model, stream=False)
logger.info(f"response: {resp.to_json()}")
tools = handler.extract_tools(resp.choices[0].message.content)
tool_calls = []
for tool in tools:
for tool_name, tool_args in tool.items():
tool_calls.append({
"id": f"call_{random.randint(1000, 10000)}",
"type": "function",
"function": {
"name": tool_name,
"arguments": tool_args
}
})
if tools:
resp.choices[0].message.tool_calls = tool_calls
resp.choices[0].message.content = None
logger.info(f"response (tools): {json.dumps(tools)}")
logger.info(f"response: {json.dumps(resp.to_dict())}")
return resp