diff --git a/requirements.txt b/requirements.txt index 01e704b..314345e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,7 +22,7 @@ multidict==6.6.4 ollama==0.6.0 openai==1.102.0 orjson>=3.11.5 -pillow==11.3.0 +pillow==12.1.1 propcache==0.3.2 pydantic==2.11.7 pydantic-settings==2.10.1 @@ -31,6 +31,7 @@ python-dotenv==1.2.1 PyYAML==6.0.3 sniffio==1.3.1 starlette==0.49.1 +truststore==0.10.4 tqdm==4.67.1 typing-inspection==0.4.1 typing_extensions==4.14.1 diff --git a/router.py b/router.py index dcbe60b..51f5054 100644 --- a/router.py +++ b/router.py @@ -7,6 +7,10 @@ license: AGPL """ # ------------------------------------------------------------- import orjson, time, asyncio, yaml, ollama, openai, os, re, aiohttp, ssl, random, base64, io, enhance, secrets +try: + import truststore; truststore.inject_into_ssl() +except ImportError: + pass from datetime import datetime, timezone from pathlib import Path @@ -1873,6 +1877,7 @@ async def create_proxy(request: Request): raise HTTPException(status_code=400, detail=f"Invalid JSON: {e}") from e status_lists = [] + for endpoint in config.endpoints: client = ollama.AsyncClient(host=endpoint) create = await client.create(model=model, quantize=quantize, from_=from_, files=files, adapters=adapters, template=template, license=license, system=system, parameters=parameters, messages=messages, stream=False) @@ -1912,6 +1917,7 @@ async def show_proxy(request: Request, model: Optional[str] = None): # 2. Endpoint logic endpoint = await choose_endpoint(model) #await increment_usage(endpoint, model) + client = ollama.AsyncClient(host=endpoint) # 3. Proxy a simple show request @@ -2045,6 +2051,7 @@ async def copy_proxy(request: Request, source: Optional[str] = None, destination # 3. Iterate over all endpoints to copy the model on each endpoint status_list = [] + for endpoint in config.endpoints: if "/v1" not in endpoint: client = ollama.AsyncClient(host=endpoint) @@ -2081,6 +2088,7 @@ async def delete_proxy(request: Request, model: Optional[str] = None): # 2. Iterate over all endpoints to delete the model on each endpoint status_list = [] + for endpoint in config.endpoints: if "/v1" not in endpoint: client = ollama.AsyncClient(host=endpoint) @@ -2119,6 +2127,7 @@ async def pull_proxy(request: Request, model: Optional[str] = None): # 2. Iterate over all endpoints to pull the model status_list = [] + for endpoint in config.endpoints: if "/v1" not in endpoint: client = ollama.AsyncClient(host=endpoint) @@ -2160,6 +2169,7 @@ async def push_proxy(request: Request): # 2. Iterate over all endpoints status_list = [] + for endpoint in config.endpoints: client = ollama.AsyncClient(host=endpoint) # 3. Proxy a simple push request @@ -2446,6 +2456,7 @@ async def openai_embedding_proxy(request: Request): else: api_key = "ollama" base_url = ep2base(endpoint) + oclient = openai.AsyncOpenAI(base_url=base_url, default_headers=default_headers, api_key=api_key) # 3. Async generator that streams embedding data and decrements the counter