Merge branch 'main' into dev-v0.6
This commit is contained in:
commit
1b355d8435
2 changed files with 13 additions and 1 deletions
11
router.py
11
router.py
|
|
@ -7,6 +7,10 @@ license: AGPL
|
|||
"""
|
||||
# -------------------------------------------------------------
|
||||
import orjson, time, asyncio, yaml, ollama, openai, os, re, aiohttp, ssl, random, base64, io, enhance, secrets
|
||||
try:
|
||||
import truststore; truststore.inject_into_ssl()
|
||||
except ImportError:
|
||||
pass
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
|
|
@ -1873,6 +1877,7 @@ async def create_proxy(request: Request):
|
|||
raise HTTPException(status_code=400, detail=f"Invalid JSON: {e}") from e
|
||||
|
||||
status_lists = []
|
||||
|
||||
for endpoint in config.endpoints:
|
||||
client = ollama.AsyncClient(host=endpoint)
|
||||
create = await client.create(model=model, quantize=quantize, from_=from_, files=files, adapters=adapters, template=template, license=license, system=system, parameters=parameters, messages=messages, stream=False)
|
||||
|
|
@ -1912,6 +1917,7 @@ async def show_proxy(request: Request, model: Optional[str] = None):
|
|||
# 2. Endpoint logic
|
||||
endpoint = await choose_endpoint(model)
|
||||
#await increment_usage(endpoint, model)
|
||||
|
||||
client = ollama.AsyncClient(host=endpoint)
|
||||
|
||||
# 3. Proxy a simple show request
|
||||
|
|
@ -2045,6 +2051,7 @@ async def copy_proxy(request: Request, source: Optional[str] = None, destination
|
|||
|
||||
# 3. Iterate over all endpoints to copy the model on each endpoint
|
||||
status_list = []
|
||||
|
||||
for endpoint in config.endpoints:
|
||||
if "/v1" not in endpoint:
|
||||
client = ollama.AsyncClient(host=endpoint)
|
||||
|
|
@ -2081,6 +2088,7 @@ async def delete_proxy(request: Request, model: Optional[str] = None):
|
|||
|
||||
# 2. Iterate over all endpoints to delete the model on each endpoint
|
||||
status_list = []
|
||||
|
||||
for endpoint in config.endpoints:
|
||||
if "/v1" not in endpoint:
|
||||
client = ollama.AsyncClient(host=endpoint)
|
||||
|
|
@ -2119,6 +2127,7 @@ async def pull_proxy(request: Request, model: Optional[str] = None):
|
|||
|
||||
# 2. Iterate over all endpoints to pull the model
|
||||
status_list = []
|
||||
|
||||
for endpoint in config.endpoints:
|
||||
if "/v1" not in endpoint:
|
||||
client = ollama.AsyncClient(host=endpoint)
|
||||
|
|
@ -2160,6 +2169,7 @@ async def push_proxy(request: Request):
|
|||
|
||||
# 2. Iterate over all endpoints
|
||||
status_list = []
|
||||
|
||||
for endpoint in config.endpoints:
|
||||
client = ollama.AsyncClient(host=endpoint)
|
||||
# 3. Proxy a simple push request
|
||||
|
|
@ -2446,6 +2456,7 @@ async def openai_embedding_proxy(request: Request):
|
|||
else:
|
||||
api_key = "ollama"
|
||||
base_url = ep2base(endpoint)
|
||||
|
||||
oclient = openai.AsyncOpenAI(base_url=base_url, default_headers=default_headers, api_key=api_key)
|
||||
|
||||
# 3. Async generator that streams embedding data and decrements the counter
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue