mirror of
https://github.com/MODSetter/SurfSense.git
synced 2026-05-17 18:35:19 +02:00
fix: Added API_BASE param for LiteLLM.
This commit is contained in:
parent
9f3d49ab93
commit
934aff2518
7 changed files with 151 additions and 73 deletions
|
|
@ -30,3 +30,8 @@ LANGSMITH_TRACING=true
|
|||
LANGSMITH_ENDPOINT="https://api.smith.langchain.com"
|
||||
LANGSMITH_API_KEY="lsv2_pt_....."
|
||||
LANGSMITH_PROJECT="surfsense"
|
||||
|
||||
# OPTIONAL: LiteLLM API Base
|
||||
FAST_LLM_API_BASE=""
|
||||
STRATEGIC_LLM_API_BASE=""
|
||||
LONG_CONTEXT_LLM_API_BASE=""
|
||||
|
|
|
|||
1
surfsense_backend/.gitignore
vendored
1
surfsense_backend/.gitignore
vendored
|
|
@ -5,3 +5,4 @@ data/
|
|||
__pycache__/
|
||||
.flashrank_cache
|
||||
surf_new_backend.egg-info/
|
||||
podcasts/
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional, Any
|
||||
from typing import List, Optional, Any
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from app.utils.streaming_service import StreamingService
|
||||
|
||||
|
|
@ -21,7 +21,7 @@ class State:
|
|||
# Streaming service
|
||||
streaming_service: StreamingService
|
||||
|
||||
# Intermediate state - populated during workflow
|
||||
# chat_history: Optional[List[Any]] = field(default=None)
|
||||
# Using field to explicitly mark as part of state
|
||||
answer_outline: Optional[Any] = field(default=None)
|
||||
|
||||
|
|
|
|||
|
|
@ -46,14 +46,27 @@ class Config:
|
|||
|
||||
# LONG-CONTEXT LLMS
|
||||
LONG_CONTEXT_LLM = os.getenv("LONG_CONTEXT_LLM")
|
||||
long_context_llm_instance = ChatLiteLLM(model=LONG_CONTEXT_LLM)
|
||||
FAST_LLM_API_BASE = os.getenv("FAST_LLM_API_BASE")
|
||||
if FAST_LLM_API_BASE:
|
||||
long_context_llm_instance = ChatLiteLLM(model=LONG_CONTEXT_LLM, api_base=FAST_LLM_API_BASE)
|
||||
else:
|
||||
long_context_llm_instance = ChatLiteLLM(model=LONG_CONTEXT_LLM)
|
||||
|
||||
# FAST & STRATEGIC LLM's
|
||||
# FAST LLM
|
||||
FAST_LLM = os.getenv("FAST_LLM")
|
||||
STRATEGIC_LLM = os.getenv("STRATEGIC_LLM")
|
||||
fast_llm_instance = ChatLiteLLM(model=FAST_LLM)
|
||||
strategic_llm_instance = ChatLiteLLM(model=STRATEGIC_LLM)
|
||||
FAST_LLM_API_BASE = os.getenv("FAST_LLM_API_BASE")
|
||||
if FAST_LLM_API_BASE:
|
||||
fast_llm_instance = ChatLiteLLM(model=FAST_LLM, api_base=FAST_LLM_API_BASE)
|
||||
else:
|
||||
fast_llm_instance = ChatLiteLLM(model=FAST_LLM)
|
||||
|
||||
# STRATEGIC LLM
|
||||
STRATEGIC_LLM = os.getenv("STRATEGIC_LLM")
|
||||
STRATEGIC_LLM_API_BASE = os.getenv("STRATEGIC_LLM_API_BASE")
|
||||
if STRATEGIC_LLM_API_BASE:
|
||||
strategic_llm_instance = ChatLiteLLM(model=STRATEGIC_LLM, api_base=STRATEGIC_LLM_API_BASE)
|
||||
else:
|
||||
strategic_llm_instance = ChatLiteLLM(model=STRATEGIC_LLM)
|
||||
|
||||
# Chonkie Configuration | Edit this to your needs
|
||||
EMBEDDING_MODEL = os.getenv("EMBEDDING_MODEL")
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue