feat: added global llm configurations

This commit is contained in:
DESKTOP-RTLN3BA\$punk 2025-11-14 21:53:46 -08:00
parent 48fca3329b
commit d4345f75e5
24 changed files with 878 additions and 158 deletions

View file

@ -11,3 +11,4 @@ celerybeat-schedule*
celerybeat-schedule.*
celerybeat-schedule.dir
celerybeat-schedule.bak
global_llm_config.yaml

View file

@ -0,0 +1,73 @@
"""remove_fk_constraints_for_global_llm_configs
Revision ID: 36
Revises: 35
Create Date: 2025-11-13 23:20:12.912741
"""
from collections.abc import Sequence
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "36"
down_revision: str | None = "35"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
"""
Remove foreign key constraints on LLM preference columns to allow global configs (negative IDs).
Global LLM configs use negative IDs and don't exist in the llm_configs table,
so we need to remove the foreign key constraints that were preventing their use.
"""
# Drop the foreign key constraints
op.drop_constraint(
"user_search_space_preferences_long_context_llm_id_fkey",
"user_search_space_preferences",
type_="foreignkey",
)
op.drop_constraint(
"user_search_space_preferences_fast_llm_id_fkey",
"user_search_space_preferences",
type_="foreignkey",
)
op.drop_constraint(
"user_search_space_preferences_strategic_llm_id_fkey",
"user_search_space_preferences",
type_="foreignkey",
)
def downgrade() -> None:
"""
Re-add foreign key constraints (will fail if any negative IDs exist in the table).
"""
# Re-add the foreign key constraints
op.create_foreign_key(
"user_search_space_preferences_long_context_llm_id_fkey",
"user_search_space_preferences",
"llm_configs",
["long_context_llm_id"],
["id"],
ondelete="SET NULL",
)
op.create_foreign_key(
"user_search_space_preferences_fast_llm_id_fkey",
"user_search_space_preferences",
"llm_configs",
["fast_llm_id"],
["id"],
ondelete="SET NULL",
)
op.create_foreign_key(
"user_search_space_preferences_strategic_llm_id_fkey",
"user_search_space_preferences",
"llm_configs",
["strategic_llm_id"],
["id"],
ondelete="SET NULL",
)

View file

@ -3,6 +3,7 @@ import shutil
from pathlib import Path
from typing import Any
import yaml
from chonkie import AutoEmbeddings, CodeChunker, RecursiveChunker
from chonkie.embeddings.azure_openai import AzureOpenAIEmbeddings
from chonkie.embeddings.registry import EmbeddingsRegistry
@ -80,6 +81,36 @@ def is_ffmpeg_installed():
return shutil.which("ffmpeg") is not None
def load_global_llm_configs():
"""
Load global LLM configurations from YAML file.
Falls back to example file if main file doesn't exist.
Returns:
list: List of global LLM config dictionaries, or empty list if file doesn't exist
"""
# Try main config file first
global_config_file = BASE_DIR / "app" / "config" / "global_llm_config.yaml"
# Fall back to example file for testing
# if not global_config_file.exists():
# global_config_file = BASE_DIR / "app" / "config" / "global_llm_config.example.yaml"
# if global_config_file.exists():
# print("Info: Using global_llm_config.example.yaml (copy to global_llm_config.yaml for production)")
if not global_config_file.exists():
# No global configs available
return []
try:
with open(global_config_file, encoding="utf-8") as f:
data = yaml.safe_load(f)
return data.get("global_llm_configs", [])
except Exception as e:
print(f"Warning: Failed to load global LLM configs: {e}")
return []
class Config:
# Check if ffmpeg is installed
if not is_ffmpeg_installed():
@ -122,6 +153,11 @@ class Config:
# LLM instances are now managed per-user through the LLMConfig system
# Legacy environment variables removed in favor of user-specific configurations
# Global LLM Configurations (optional)
# Load from global_llm_config.yaml if available
# These can be used as default options for users
GLOBAL_LLM_CONFIGS = load_global_llm_configs()
# Chonkie Configuration | Edit this to your needs
EMBEDDING_MODEL = os.getenv("EMBEDDING_MODEL")
# Azure OpenAI credentials from environment variables

View file

@ -0,0 +1,80 @@
# Global LLM Configuration
#
# SETUP INSTRUCTIONS:
# 1. For production: Copy this file to global_llm_config.yaml and add your real API keys
# 2. For testing: The system will use this example file automatically if global_llm_config.yaml doesn't exist
#
# NOTE: The example API keys below are placeholders and won't work.
# Replace them with your actual API keys to enable global configurations.
#
# These configurations will be available to all users as a convenient option
# Users can choose to use these global configs or add their own
global_llm_configs:
# Example: OpenAI GPT-4 Turbo
- id: -1
name: "Global GPT-4 Turbo"
provider: "OPENAI"
model_name: "gpt-4-turbo-preview"
api_key: "sk-your-openai-api-key-here"
api_base: ""
language: "English"
litellm_params:
temperature: 0.7
max_tokens: 4000
# Example: Anthropic Claude 3 Opus
- id: -2
name: "Global Claude 3 Opus"
provider: "ANTHROPIC"
model_name: "claude-3-opus-20240229"
api_key: "sk-ant-your-anthropic-api-key-here"
api_base: ""
language: "English"
litellm_params:
temperature: 0.7
max_tokens: 4000
# Example: Fast model - GPT-3.5 Turbo
- id: -3
name: "Global GPT-3.5 Turbo"
provider: "OPENAI"
model_name: "gpt-3.5-turbo"
api_key: "sk-your-openai-api-key-here"
api_base: ""
language: "English"
litellm_params:
temperature: 0.5
max_tokens: 2000
# Example: Chinese LLM - DeepSeek
- id: -4
name: "Global DeepSeek Chat"
provider: "DEEPSEEK"
model_name: "deepseek-chat"
api_key: "your-deepseek-api-key-here"
api_base: "https://api.deepseek.com/v1"
language: "Chinese"
litellm_params:
temperature: 0.7
max_tokens: 4000
# Example: Groq - Fast inference
- id: -5
name: "Global Groq Llama 3"
provider: "GROQ"
model_name: "llama3-70b-8192"
api_key: "your-groq-api-key-here"
api_base: ""
language: "English"
litellm_params:
temperature: 0.7
max_tokens: 8000
# Notes:
# - Use negative IDs to distinguish global configs from user configs
# - IDs should be unique and sequential (e.g., -1, -2, -3, etc.)
# - The 'api_key' field will not be exposed to users via API
# - Users can select these configs for their long_context, fast, or strategic LLM roles
# - All standard LiteLLM providers are supported

View file

@ -348,15 +348,11 @@ class UserSearchSpacePreference(BaseModel, TimestampMixin):
)
# User-specific LLM preferences for this search space
long_context_llm_id = Column(
Integer, ForeignKey("llm_configs.id", ondelete="SET NULL"), nullable=True
)
fast_llm_id = Column(
Integer, ForeignKey("llm_configs.id", ondelete="SET NULL"), nullable=True
)
strategic_llm_id = Column(
Integer, ForeignKey("llm_configs.id", ondelete="SET NULL"), nullable=True
)
# Note: These can be negative IDs for global configs (from YAML) or positive IDs for custom configs (from DB)
# Foreign keys removed to support global configs with negative IDs
long_context_llm_id = Column(Integer, nullable=True)
fast_llm_id = Column(Integer, nullable=True)
strategic_llm_id = Column(Integer, nullable=True)
# Future RBAC fields can be added here
# role = Column(String(50), nullable=True) # e.g., 'owner', 'editor', 'viewer'
@ -365,13 +361,12 @@ class UserSearchSpacePreference(BaseModel, TimestampMixin):
user = relationship("User", back_populates="search_space_preferences")
search_space = relationship("SearchSpace", back_populates="user_preferences")
long_context_llm = relationship(
"LLMConfig", foreign_keys=[long_context_llm_id], post_update=True
)
fast_llm = relationship("LLMConfig", foreign_keys=[fast_llm_id], post_update=True)
strategic_llm = relationship(
"LLMConfig", foreign_keys=[strategic_llm_id], post_update=True
)
# Note: Relationships removed because foreign keys no longer exist
# Global configs (negative IDs) don't exist in llm_configs table
# Application code manually fetches configs when needed
# long_context_llm = relationship("LLMConfig", foreign_keys=[long_context_llm_id], post_update=True)
# fast_llm = relationship("LLMConfig", foreign_keys=[fast_llm_id], post_update=True)
# strategic_llm = relationship("LLMConfig", foreign_keys=[strategic_llm_id], post_update=True)
class Log(BaseModel, TimestampMixin):

View file

@ -68,9 +68,9 @@ async def handle_chat_data(
selectinload(UserSearchSpacePreference.search_space).selectinload(
SearchSpace.llm_configs
),
selectinload(UserSearchSpacePreference.long_context_llm),
selectinload(UserSearchSpacePreference.fast_llm),
selectinload(UserSearchSpacePreference.strategic_llm),
# Note: Removed selectinload for LLM relationships as they no longer exist
# Global configs (negative IDs) don't have foreign keys
# LLM configs are now fetched manually when needed
)
.filter(
UserSearchSpacePreference.search_space_id == search_space_id,
@ -81,6 +81,8 @@ async def handle_chat_data(
# print("UserSearchSpacePreference:", user_preference)
language = None
llm_configs = [] # Initialize to empty list
if (
user_preference
and user_preference.search_space
@ -88,16 +90,36 @@ async def handle_chat_data(
):
llm_configs = user_preference.search_space.llm_configs
for preferred_llm in [
user_preference.fast_llm,
user_preference.long_context_llm,
user_preference.strategic_llm,
]:
if preferred_llm and getattr(preferred_llm, "language", None):
language = preferred_llm.language
break
# Manually fetch LLM configs since relationships no longer exist
# Check fast_llm, long_context_llm, and strategic_llm IDs
from app.config import config as app_config
if not language:
for llm_id in [
user_preference.fast_llm_id,
user_preference.long_context_llm_id,
user_preference.strategic_llm_id,
]:
if llm_id is not None:
# Check if it's a global config (negative ID)
if llm_id < 0:
# Look in global configs
for global_cfg in app_config.GLOBAL_LLM_CONFIGS:
if global_cfg.get("id") == llm_id:
language = global_cfg.get("language")
if language:
break
else:
# Look in custom configs
for llm_config in llm_configs:
if llm_config.id == llm_id and getattr(
llm_config, "language", None
):
language = llm_config.language
break
if language:
break
if not language and llm_configs:
first_llm_config = llm_configs[0]
language = getattr(first_llm_config, "language", None)

View file

@ -1,9 +1,11 @@
import logging
from fastapi import APIRouter, Depends, HTTPException
from pydantic import BaseModel
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from sqlalchemy.orm import selectinload
from app.config import config
from app.db import (
LLMConfig,
SearchSpace,
@ -16,6 +18,7 @@ from app.services.llm_service import validate_llm_config
from app.users import current_active_user
router = APIRouter()
logger = logging.getLogger(__name__)
# Helper function to check search space access
@ -43,16 +46,11 @@ async def get_or_create_user_preference(
) -> UserSearchSpacePreference:
"""Get or create user preference for a search space"""
result = await session.execute(
select(UserSearchSpacePreference)
.filter(
select(UserSearchSpacePreference).filter(
UserSearchSpacePreference.user_id == user_id,
UserSearchSpacePreference.search_space_id == search_space_id,
)
.options(
selectinload(UserSearchSpacePreference.long_context_llm),
selectinload(UserSearchSpacePreference.fast_llm),
selectinload(UserSearchSpacePreference.strategic_llm),
)
# Removed selectinload options since relationships no longer exist
)
preference = result.scalars().first()
@ -88,6 +86,58 @@ class LLMPreferencesRead(BaseModel):
strategic_llm: LLMConfigRead | None = None
class GlobalLLMConfigRead(BaseModel):
"""Schema for reading global LLM configs (without API key)"""
id: int
name: str
provider: str
custom_provider: str | None = None
model_name: str
api_base: str | None = None
language: str | None = None
litellm_params: dict | None = None
is_global: bool = True
# Global LLM Config endpoints
@router.get("/global-llm-configs", response_model=list[GlobalLLMConfigRead])
async def get_global_llm_configs(
user: User = Depends(current_active_user),
):
"""
Get all available global LLM configurations.
These are pre-configured by the system administrator and available to all users.
API keys are not exposed through this endpoint.
"""
try:
global_configs = config.GLOBAL_LLM_CONFIGS
# Remove API keys from response
safe_configs = []
for cfg in global_configs:
safe_config = {
"id": cfg.get("id"),
"name": cfg.get("name"),
"provider": cfg.get("provider"),
"custom_provider": cfg.get("custom_provider"),
"model_name": cfg.get("model_name"),
"api_base": cfg.get("api_base"),
"language": cfg.get("language"),
"litellm_params": cfg.get("litellm_params", {}),
"is_global": True,
}
safe_configs.append(safe_config)
return safe_configs
except Exception as e:
raise HTTPException(
status_code=500, detail=f"Failed to fetch global LLM configs: {e!s}"
) from e
@router.post("/llm-configs", response_model=LLMConfigRead)
async def create_llm_config(
llm_config: LLMConfigCreate,
@ -309,13 +359,49 @@ async def get_user_llm_preferences(
session, user.id, search_space_id
)
# Helper function to get config (global or custom)
async def get_config_for_id(config_id):
if config_id is None:
return None
# Check if it's a global config (negative ID)
if config_id < 0:
for cfg in config.GLOBAL_LLM_CONFIGS:
if cfg.get("id") == config_id:
# Return as LLMConfigRead-compatible dict
return {
"id": cfg.get("id"),
"name": cfg.get("name"),
"provider": cfg.get("provider"),
"custom_provider": cfg.get("custom_provider"),
"model_name": cfg.get("model_name"),
"api_key": "***GLOBAL***", # Don't expose the actual key
"api_base": cfg.get("api_base"),
"language": cfg.get("language"),
"litellm_params": cfg.get("litellm_params"),
"created_at": None,
"search_space_id": search_space_id,
}
return None
# It's a custom config, fetch from database
result = await session.execute(
select(LLMConfig).filter(LLMConfig.id == config_id)
)
return result.scalars().first()
# Get the configs (from DB for custom, or constructed for global)
long_context_llm = await get_config_for_id(preference.long_context_llm_id)
fast_llm = await get_config_for_id(preference.fast_llm_id)
strategic_llm = await get_config_for_id(preference.strategic_llm_id)
return {
"long_context_llm_id": preference.long_context_llm_id,
"fast_llm_id": preference.fast_llm_id,
"strategic_llm_id": preference.strategic_llm_id,
"long_context_llm": preference.long_context_llm,
"fast_llm": preference.fast_llm,
"strategic_llm": preference.strategic_llm,
"long_context_llm": long_context_llm,
"fast_llm": fast_llm,
"strategic_llm": strategic_llm,
}
except HTTPException:
raise
@ -353,29 +439,57 @@ async def update_user_llm_preferences(
for _key, llm_config_id in update_data.items():
if llm_config_id is not None:
# Verify the LLM config belongs to the search space
result = await session.execute(
select(LLMConfig).filter(
LLMConfig.id == llm_config_id,
LLMConfig.search_space_id == search_space_id,
)
)
llm_config = result.scalars().first()
if not llm_config:
raise HTTPException(
status_code=404,
detail=f"LLM configuration {llm_config_id} not found in this search space",
)
# Check if this is a global config (negative ID)
if llm_config_id < 0:
# Validate global config exists
global_config = None
for cfg in config.GLOBAL_LLM_CONFIGS:
if cfg.get("id") == llm_config_id:
global_config = cfg
break
# Collect language for consistency check
languages.add(llm_config.language)
if not global_config:
raise HTTPException(
status_code=404,
detail=f"Global LLM configuration {llm_config_id} not found",
)
# Check if all selected LLM configs have the same language
# Collect language for consistency check (if explicitly set)
lang = global_config.get("language")
if lang and lang.strip(): # Only add non-empty languages
languages.add(lang.strip())
else:
# Verify the LLM config belongs to the search space (custom config)
result = await session.execute(
select(LLMConfig).filter(
LLMConfig.id == llm_config_id,
LLMConfig.search_space_id == search_space_id,
)
)
llm_config = result.scalars().first()
if not llm_config:
raise HTTPException(
status_code=404,
detail=f"LLM configuration {llm_config_id} not found in this search space",
)
# Collect language for consistency check (if explicitly set)
if llm_config.language and llm_config.language.strip():
languages.add(llm_config.language.strip())
# Language consistency check - only warn if there are multiple explicit languages
# Allow mixing configs with and without language settings
if len(languages) > 1:
raise HTTPException(
status_code=400,
detail="All selected LLM configurations must have the same language setting",
# Log warning but allow the operation
logger.warning(
f"Multiple languages detected in LLM selection for search_space {search_space_id}: {languages}. "
"This may affect response quality."
)
# Don't raise an exception - allow users to proceed
# raise HTTPException(
# status_code=400,
# detail="All selected LLM configurations must have the same language setting",
# )
# Update user preferences
for key, value in update_data.items():
@ -384,19 +498,50 @@ async def update_user_llm_preferences(
await session.commit()
await session.refresh(preference)
# Reload relationships
await session.refresh(
preference, ["long_context_llm", "fast_llm", "strategic_llm"]
)
# Helper function to get config (global or custom)
async def get_config_for_id(config_id):
if config_id is None:
return None
# Check if it's a global config (negative ID)
if config_id < 0:
for cfg in config.GLOBAL_LLM_CONFIGS:
if cfg.get("id") == config_id:
# Return as LLMConfigRead-compatible dict
return {
"id": cfg.get("id"),
"name": cfg.get("name"),
"provider": cfg.get("provider"),
"custom_provider": cfg.get("custom_provider"),
"model_name": cfg.get("model_name"),
"api_key": "***GLOBAL***", # Don't expose the actual key
"api_base": cfg.get("api_base"),
"language": cfg.get("language"),
"litellm_params": cfg.get("litellm_params"),
"created_at": None,
"search_space_id": search_space_id,
}
return None
# It's a custom config, fetch from database
result = await session.execute(
select(LLMConfig).filter(LLMConfig.id == config_id)
)
return result.scalars().first()
# Get the configs (from DB for custom, or constructed for global)
long_context_llm = await get_config_for_id(preference.long_context_llm_id)
fast_llm = await get_config_for_id(preference.fast_llm_id)
strategic_llm = await get_config_for_id(preference.strategic_llm_id)
# Return updated preferences
return {
"long_context_llm_id": preference.long_context_llm_id,
"fast_llm_id": preference.fast_llm_id,
"strategic_llm_id": preference.strategic_llm_id,
"long_context_llm": preference.long_context_llm,
"fast_llm": preference.fast_llm,
"strategic_llm": preference.strategic_llm,
"long_context_llm": long_context_llm,
"fast_llm": fast_llm,
"strategic_llm": strategic_llm,
}
except HTTPException:
raise

View file

@ -62,7 +62,11 @@ class LLMConfigUpdate(BaseModel):
class LLMConfigRead(LLMConfigBase, IDModel, TimestampModel):
id: int
created_at: datetime
search_space_id: int
created_at: datetime | None = Field(
None, description="Creation timestamp (None for global configs)"
)
search_space_id: int | None = Field(
None, description="Search space ID (None for global configs)"
)
model_config = ConfigDict(from_attributes=True)

View file

@ -6,6 +6,7 @@ from langchain_litellm import ChatLiteLLM
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from app.config import config
from app.db import LLMConfig, UserSearchSpacePreference
# Configure litellm to automatically drop unsupported parameters
@ -20,6 +21,27 @@ class LLMRole:
STRATEGIC = "strategic"
def get_global_llm_config(llm_config_id: int) -> dict | None:
"""
Get a global LLM configuration by ID.
Global configs have negative IDs.
Args:
llm_config_id: The ID of the global config (should be negative)
Returns:
dict: Global config dictionary or None if not found
"""
if llm_config_id >= 0:
return None
for cfg in config.GLOBAL_LLM_CONFIGS:
if cfg.get("id") == llm_config_id:
return cfg
return None
async def validate_llm_config(
provider: str,
model_name: str,
@ -171,7 +193,70 @@ async def get_user_llm_instance(
)
return None
# Get the LLM configuration
# Check if this is a global config (negative ID)
if llm_config_id < 0:
global_config = get_global_llm_config(llm_config_id)
if not global_config:
logger.error(f"Global LLM config {llm_config_id} not found")
return None
# Build model string for global config
if global_config.get("custom_provider"):
model_string = (
f"{global_config['custom_provider']}/{global_config['model_name']}"
)
else:
provider_map = {
"OPENAI": "openai",
"ANTHROPIC": "anthropic",
"GROQ": "groq",
"COHERE": "cohere",
"GOOGLE": "gemini",
"OLLAMA": "ollama",
"MISTRAL": "mistral",
"AZURE_OPENAI": "azure",
"OPENROUTER": "openrouter",
"COMETAPI": "cometapi",
"XAI": "xai",
"BEDROCK": "bedrock",
"AWS_BEDROCK": "bedrock",
"VERTEX_AI": "vertex_ai",
"TOGETHER_AI": "together_ai",
"FIREWORKS_AI": "fireworks_ai",
"REPLICATE": "replicate",
"PERPLEXITY": "perplexity",
"ANYSCALE": "anyscale",
"DEEPINFRA": "deepinfra",
"CEREBRAS": "cerebras",
"SAMBANOVA": "sambanova",
"AI21": "ai21",
"CLOUDFLARE": "cloudflare",
"DATABRICKS": "databricks",
"DEEPSEEK": "openai",
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai",
}
provider_prefix = provider_map.get(
global_config["provider"], global_config["provider"].lower()
)
model_string = f"{provider_prefix}/{global_config['model_name']}"
# Create ChatLiteLLM instance from global config
litellm_kwargs = {
"model": model_string,
"api_key": global_config["api_key"],
}
if global_config.get("api_base"):
litellm_kwargs["api_base"] = global_config["api_base"]
if global_config.get("litellm_params"):
litellm_kwargs.update(global_config["litellm_params"])
return ChatLiteLLM(**litellm_kwargs)
# Get the LLM configuration from database (user-specific config)
result = await session.execute(
select(LLMConfig).where(
LLMConfig.id == llm_config_id,

View file

@ -282,7 +282,7 @@ export function DashboardClientLayout({
</div>
</div>
</header>
<div className="grow flex-1 overflow-auto">{children}</div>
<div className="grow flex-1 overflow-auto min-h-[calc(100vh-64px)]">{children}</div>
</div>
{/* Only render chat panel on researcher page */}
{isResearcherPage && <ChatPanelContainer />}

View file

@ -12,7 +12,7 @@ import { CompletionStep } from "@/components/onboard/completion-step";
import { Button } from "@/components/ui/button";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card";
import { Progress } from "@/components/ui/progress";
import { useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
import { useGlobalLLMConfigs, useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
const TOTAL_STEPS = 3;
@ -23,6 +23,7 @@ const OnboardPage = () => {
const searchSpaceId = Number(params.search_space_id);
const { llmConfigs, loading: configsLoading, refreshConfigs } = useLLMConfigs(searchSpaceId);
const { globalConfigs, loading: globalConfigsLoading } = useGlobalLLMConfigs();
const {
preferences,
loading: preferencesLoading,
@ -51,7 +52,13 @@ const OnboardPage = () => {
// Redirect to dashboard if onboarding is already complete and user hasn't progressed (fresh page load)
// But only check once to avoid redirect loops
useEffect(() => {
if (!preferencesLoading && !configsLoading && isOnboardingComplete() && !hasUserProgressed) {
if (
!preferencesLoading &&
!configsLoading &&
!globalConfigsLoading &&
isOnboardingComplete() &&
!hasUserProgressed
) {
// Small delay to ensure the check is stable
const timer = setTimeout(() => {
router.push(`/dashboard/${searchSpaceId}`);
@ -61,6 +68,7 @@ const OnboardPage = () => {
}, [
preferencesLoading,
configsLoading,
globalConfigsLoading,
isOnboardingComplete,
hasUserProgressed,
router,
@ -77,7 +85,10 @@ const OnboardPage = () => {
t("all_set"),
];
const canProceedToStep2 = !configsLoading && llmConfigs.length > 0;
// User can proceed to step 2 if they have either custom configs OR global configs available
const canProceedToStep2 =
!configsLoading && !globalConfigsLoading && (llmConfigs.length > 0 || globalConfigs.length > 0);
const canProceedToStep3 =
!preferencesLoading &&
preferences.long_context_llm_id &&
@ -100,7 +111,7 @@ const OnboardPage = () => {
router.push(`/dashboard/${searchSpaceId}/documents`);
};
if (configsLoading || preferencesLoading) {
if (configsLoading || preferencesLoading || globalConfigsLoading) {
return (
<div className="flex flex-col items-center justify-center min-h-screen">
<Card className="w-[350px] bg-background/60 backdrop-blur-sm">

View file

@ -131,7 +131,7 @@ export function AnimatedEmptyState() {
}, [layoutStable, isInView]);
return (
<div ref={ref} className="flex-1 flex items-center justify-center w-full min-h-[400px]">
<div ref={ref} className="flex-1 flex items-center justify-center w-full min-h-fit">
<div className="max-w-4xl mx-auto px-4 py-10 text-center">
<RoughNotationGroup show={shouldShowHighlight}>
<h1 className={headingClassName}>

View file

@ -27,7 +27,7 @@ import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/comp
import { getConnectorIcon } from "@/contracts/enums/connectorIcons";
import { useDocumentTypes } from "@/hooks/use-document-types";
import type { Document } from "@/hooks/use-documents";
import { useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
import { useGlobalLLMConfigs, useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
import { useSearchSourceConnectors } from "@/hooks/use-search-source-connectors";
const DocumentSelector = React.memo(
@ -567,19 +567,29 @@ const LLMSelector = React.memo(() => {
const searchSpaceId = Number(search_space_id);
const { llmConfigs, loading: llmLoading, error } = useLLMConfigs(searchSpaceId);
const {
globalConfigs,
loading: globalConfigsLoading,
error: globalConfigsError,
} = useGlobalLLMConfigs();
const {
preferences,
updatePreferences,
loading: preferencesLoading,
} = useLLMPreferences(searchSpaceId);
const isLoading = llmLoading || preferencesLoading;
const isLoading = llmLoading || preferencesLoading || globalConfigsLoading;
// Combine global and custom configs
const allConfigs = React.useMemo(() => {
return [...globalConfigs.map((config) => ({ ...config, is_global: true })), ...llmConfigs];
}, [globalConfigs, llmConfigs]);
// Memoize the selected config to avoid repeated lookups
const selectedConfig = React.useMemo(() => {
if (!preferences.fast_llm_id || !llmConfigs.length) return null;
return llmConfigs.find((config) => config.id === preferences.fast_llm_id) || null;
}, [preferences.fast_llm_id, llmConfigs]);
if (!preferences.fast_llm_id || !allConfigs.length) return null;
return allConfigs.find((config) => config.id === preferences.fast_llm_id) || null;
}, [preferences.fast_llm_id, allConfigs]);
// Memoize the display value for the trigger
const displayValue = React.useMemo(() => {
@ -591,6 +601,7 @@ const LLMSelector = React.memo(() => {
<span className="hidden sm:inline text-muted-foreground text-xs truncate max-w-[60px]">
{selectedConfig.name}
</span>
{selectedConfig.is_global && <span className="text-xs">🌐</span>}
</div>
);
}, [selectedConfig]);
@ -616,7 +627,7 @@ const LLMSelector = React.memo(() => {
}
// Error state
if (error) {
if (error || globalConfigsError) {
return (
<div className="h-8 min-w-[100px] sm:min-w-[120px]">
<Button
@ -655,7 +666,7 @@ const LLMSelector = React.memo(() => {
</div>
</div>
{llmConfigs.length === 0 ? (
{allConfigs.length === 0 ? (
<div className="px-4 py-6 text-center">
<div className="mx-auto w-12 h-12 rounded-full bg-muted flex items-center justify-center mb-3">
<Brain className="h-5 w-5 text-muted-foreground" />
@ -675,32 +686,87 @@ const LLMSelector = React.memo(() => {
</div>
) : (
<div className="py-1">
{llmConfigs.map((config) => (
<SelectItem
key={config.id}
value={config.id.toString()}
className="px-3 py-2 cursor-pointer hover:bg-accent/50 focus:bg-accent"
>
<div className="flex items-center justify-between w-full min-w-0">
<div className="flex items-center gap-3 min-w-0 flex-1">
<div className="flex h-8 w-8 items-center justify-center rounded-md bg-primary/10 flex-shrink-0">
<Brain className="h-4 w-4 text-primary" />
</div>
<div className="min-w-0 flex-1">
<div className="flex items-center gap-2 mb-1">
<span className="font-medium text-sm truncate">{config.name}</span>
<Badge variant="outline" className="text-xs px-1.5 py-0.5 flex-shrink-0">
{config.provider}
</Badge>
</div>
<p className="text-xs text-muted-foreground font-mono truncate">
{config.model_name}
</p>
</div>
</div>
{/* Global Configurations */}
{globalConfigs.length > 0 && (
<>
<div className="px-3 py-1.5 text-xs font-semibold text-muted-foreground">
Global Configurations
</div>
</SelectItem>
))}
{globalConfigs.map((config) => (
<SelectItem
key={config.id}
value={config.id.toString()}
className="px-3 py-2 cursor-pointer hover:bg-accent/50 focus:bg-accent"
>
<div className="flex items-center justify-between w-full min-w-0">
<div className="flex items-center gap-3 min-w-0 flex-1">
<div className="flex h-8 w-8 items-center justify-center rounded-md bg-primary/10 flex-shrink-0">
<Brain className="h-4 w-4 text-primary" />
</div>
<div className="min-w-0 flex-1">
<div className="flex items-center gap-2 mb-1 flex-wrap">
<span className="font-medium text-sm truncate">{config.name}</span>
<Badge
variant="outline"
className="text-xs px-1.5 py-0.5 flex-shrink-0"
>
{config.provider}
</Badge>
<Badge
variant="secondary"
className="text-xs px-1.5 py-0.5 flex-shrink-0"
>
🌐 Global
</Badge>
</div>
<p className="text-xs text-muted-foreground font-mono truncate">
{config.model_name}
</p>
</div>
</div>
</div>
</SelectItem>
))}
</>
)}
{/* Custom Configurations */}
{llmConfigs.length > 0 && (
<>
<div className="px-3 py-1.5 text-xs font-semibold text-muted-foreground">
Your Configurations
</div>
{llmConfigs.map((config) => (
<SelectItem
key={config.id}
value={config.id.toString()}
className="px-3 py-2 cursor-pointer hover:bg-accent/50 focus:bg-accent"
>
<div className="flex items-center justify-between w-full min-w-0">
<div className="flex items-center gap-3 min-w-0 flex-1">
<div className="flex h-8 w-8 items-center justify-center rounded-md bg-primary/10 flex-shrink-0">
<Brain className="h-4 w-4 text-primary" />
</div>
<div className="min-w-0 flex-1">
<div className="flex items-center gap-2 mb-1">
<span className="font-medium text-sm truncate">{config.name}</span>
<Badge
variant="outline"
className="text-xs px-1.5 py-0.5 flex-shrink-0"
>
{config.provider}
</Badge>
</div>
<p className="text-xs text-muted-foreground font-mono truncate">
{config.model_name}
</p>
</div>
</div>
</div>
</SelectItem>
))}
</>
)}
</div>
)}
</SelectContent>
@ -787,7 +853,7 @@ export const ChatInputUI = React.memo(
onTopKChange?: (topK: number) => void;
}) => {
return (
<ChatInput>
<ChatInput className="p-2">
<ChatInput.Form className="flex gap-2">
<ChatInput.Field className="flex-1" />
<ChatInput.Submit />

View file

@ -43,10 +43,10 @@ export default function ChatInterface({
}, [chat_id, search_space_id]);
return (
<LlamaIndexChatSection handler={handler} className="flex h-full">
<LlamaIndexChatSection handler={handler} className="flex h-full max-w-7xl mx-auto">
<div className="flex grow-1 flex-col">
<ChatMessagesUI />
<div className="border-t p-4">
<div className="border-1 rounded-4xl p-2">
<ChatInputUI
onDocumentSelectionChange={onDocumentSelectionChange}
selectedDocuments={selectedDocuments}

View file

@ -22,7 +22,7 @@ export function ChatMessagesUI() {
<LlamaIndexChatMessages.Empty>
<AnimatedEmptyState />
</LlamaIndexChatMessages.Empty>
<LlamaIndexChatMessages.List className="p-4">
<LlamaIndexChatMessages.List className="p-2">
{messages.map((message, index) => (
<ChatMessageUI
key={`Message-${index}`}

View file

@ -79,9 +79,16 @@ export function ChatPanelView(props: ChatPanelViewProps) {
animate={{ opacity: 1 }}
transition={{ duration: 0.3 }}
>
<button
type="button"
<div
role="button"
tabIndex={0}
onClick={handleGeneratePost}
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
e.preventDefault();
handleGeneratePost();
}
}}
className={cn(
"relative w-full rounded-2xl p-4 transition-all duration-300 cursor-pointer group overflow-hidden",
"border-2",
@ -147,7 +154,7 @@ export function ChatPanelView(props: ChatPanelViewProps) {
<ConfigModal generatePodcast={generatePodcast} />
</div>
</div>
</button>
</div>
</motion.div>
</div>
) : (

View file

@ -1,6 +1,6 @@
"use client";
import { AlertCircle, Bot, Check, ChevronsUpDown, Plus, Trash2 } from "lucide-react";
import { AlertCircle, Bot, Check, CheckCircle, ChevronsUpDown, Plus, Trash2 } from "lucide-react";
import { motion } from "motion/react";
import { useTranslations } from "next-intl";
import { useState } from "react";
@ -30,7 +30,7 @@ import {
import { LANGUAGES } from "@/contracts/enums/languages";
import { getModelsByProvider } from "@/contracts/enums/llm-models";
import { LLM_PROVIDERS } from "@/contracts/enums/llm-providers";
import { type CreateLLMConfig, useLLMConfigs } from "@/hooks/use-llm-configs";
import { type CreateLLMConfig, useGlobalLLMConfigs, useLLMConfigs } from "@/hooks/use-llm-configs";
import { cn } from "@/lib/utils";
import InferenceParamsEditor from "../inference-params-editor";
@ -48,6 +48,7 @@ export function AddProviderStep({
}: AddProviderStepProps) {
const t = useTranslations("onboard");
const { llmConfigs, createLLMConfig, deleteLLMConfig } = useLLMConfigs(searchSpaceId);
const { globalConfigs } = useGlobalLLMConfigs();
const [isAddingNew, setIsAddingNew] = useState(false);
const [formData, setFormData] = useState<CreateLLMConfig>({
name: "",
@ -117,6 +118,19 @@ export function AddProviderStep({
<AlertDescription>{t("add_provider_instruction")}</AlertDescription>
</Alert>
{/* Global Configs Notice */}
{globalConfigs.length > 0 && (
<Alert className="bg-blue-50 border-blue-200 dark:bg-blue-950 dark:border-blue-800">
<CheckCircle className="h-4 w-4 text-blue-600" />
<AlertDescription className="text-blue-800 dark:text-blue-200">
<strong>{globalConfigs.length} global configuration(s) available!</strong>
<br />
You can skip adding your own LLM provider and use our pre-configured models in the next
step. Or continue here to add your own custom configurations.
</AlertDescription>
</Alert>
)}
{/* Existing Configurations */}
{llmConfigs.length > 0 && (
<div className="space-y-4">

View file

@ -15,7 +15,7 @@ import {
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
import { useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
import { useGlobalLLMConfigs, useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
interface AssignRolesStepProps {
searchSpaceId: number;
@ -25,8 +25,12 @@ interface AssignRolesStepProps {
export function AssignRolesStep({ searchSpaceId, onPreferencesUpdated }: AssignRolesStepProps) {
const t = useTranslations("onboard");
const { llmConfigs } = useLLMConfigs(searchSpaceId);
const { globalConfigs } = useGlobalLLMConfigs();
const { preferences, updatePreferences } = useLLMPreferences(searchSpaceId);
// Combine global and user-specific configs
const allConfigs = [...globalConfigs, ...llmConfigs];
const ROLE_DESCRIPTIONS = {
long_context: {
icon: Brain,
@ -107,7 +111,7 @@ export function AssignRolesStep({ searchSpaceId, onPreferencesUpdated }: AssignR
const isAssignmentComplete =
assignments.long_context_llm_id && assignments.fast_llm_id && assignments.strategic_llm_id;
if (llmConfigs.length === 0) {
if (allConfigs.length === 0) {
return (
<div className="flex flex-col items-center justify-center py-12">
<AlertCircle className="w-16 h-16 text-muted-foreground mb-4" />
@ -130,7 +134,7 @@ export function AssignRolesStep({ searchSpaceId, onPreferencesUpdated }: AssignR
{Object.entries(ROLE_DESCRIPTIONS).map(([key, role]) => {
const IconComponent = role.icon;
const currentAssignment = assignments[`${key}_llm_id` as keyof typeof assignments];
const assignedConfig = llmConfigs.find((config) => config.id === currentAssignment);
const assignedConfig = allConfigs.find((config) => config.id === currentAssignment);
return (
<motion.div
@ -171,6 +175,32 @@ export function AssignRolesStep({ searchSpaceId, onPreferencesUpdated }: AssignR
<SelectValue placeholder={t("select_llm_config")} />
</SelectTrigger>
<SelectContent>
{globalConfigs.length > 0 && (
<div className="px-2 py-1.5 text-xs font-semibold text-muted-foreground">
{t("global_configs") || "Global Configurations"}
</div>
)}
{globalConfigs
.filter((config) => config.id && config.id.toString().trim() !== "")
.map((config) => (
<SelectItem key={config.id} value={config.id.toString()}>
<div className="flex items-center gap-2">
<Badge variant="secondary" className="text-xs">
🌐 Global
</Badge>
<Badge variant="outline" className="text-xs">
{config.provider}
</Badge>
<span>{config.name}</span>
<span className="text-muted-foreground">({config.model_name})</span>
</div>
</SelectItem>
))}
{llmConfigs.length > 0 && globalConfigs.length > 0 && (
<div className="px-2 py-1.5 text-xs font-semibold text-muted-foreground border-t mt-1">
{t("your_configs") || "Your Configurations"}
</div>
)}
{llmConfigs
.filter((config) => config.id && config.id.toString().trim() !== "")
.map((config) => (
@ -193,6 +223,11 @@ export function AssignRolesStep({ searchSpaceId, onPreferencesUpdated }: AssignR
<div className="flex items-center gap-2 text-sm">
<Bot className="w-4 h-4" />
<span className="font-medium">{t("assigned")}:</span>
{assignedConfig.is_global && (
<Badge variant="secondary" className="text-xs">
🌐 Global
</Badge>
)}
<Badge variant="secondary">{assignedConfig.provider}</Badge>
<span>{assignedConfig.name}</span>
</div>

View file

@ -4,7 +4,7 @@ import { ArrowRight, Bot, Brain, CheckCircle, Sparkles, Zap } from "lucide-react
import { motion } from "motion/react";
import { Badge } from "@/components/ui/badge";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card";
import { useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
import { useGlobalLLMConfigs, useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
const ROLE_ICONS = {
long_context: Brain,
@ -18,12 +18,16 @@ interface CompletionStepProps {
export function CompletionStep({ searchSpaceId }: CompletionStepProps) {
const { llmConfigs } = useLLMConfigs(searchSpaceId);
const { globalConfigs } = useGlobalLLMConfigs();
const { preferences } = useLLMPreferences(searchSpaceId);
// Combine global and user-specific configs
const allConfigs = [...globalConfigs, ...llmConfigs];
const assignedConfigs = {
long_context: llmConfigs.find((c) => c.id === preferences.long_context_llm_id),
fast: llmConfigs.find((c) => c.id === preferences.fast_llm_id),
strategic: llmConfigs.find((c) => c.id === preferences.strategic_llm_id),
long_context: allConfigs.find((c) => c.id === preferences.long_context_llm_id),
fast: allConfigs.find((c) => c.id === preferences.fast_llm_id),
strategic: allConfigs.find((c) => c.id === preferences.strategic_llm_id),
};
return (
@ -86,6 +90,11 @@ export function CompletionStep({ searchSpaceId }: CompletionStepProps) {
</div>
</div>
<div className="flex items-center gap-2">
{config.is_global && (
<Badge variant="secondary" className="text-xs">
🌐 Global
</Badge>
)}
<Badge variant="outline">{config.provider}</Badge>
<span className="text-sm text-muted-foreground">{config.model_name}</span>
</div>
@ -115,8 +124,14 @@ export function CompletionStep({ searchSpaceId }: CompletionStepProps) {
</p>
<div className="flex flex-wrap gap-2 text-sm">
<Badge variant="secondary">
{llmConfigs.length} LLM provider{llmConfigs.length > 1 ? "s" : ""} configured
{allConfigs.length} LLM provider{allConfigs.length > 1 ? "s" : ""} available
</Badge>
{globalConfigs.length > 0 && (
<Badge variant="secondary"> {globalConfigs.length} Global config(s)</Badge>
)}
{llmConfigs.length > 0 && (
<Badge variant="secondary"> {llmConfigs.length} Custom config(s)</Badge>
)}
<Badge variant="secondary"> All roles assigned</Badge>
<Badge variant="secondary"> Ready to use</Badge>
</div>

View file

@ -27,7 +27,7 @@ import {
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
import { useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
import { useGlobalLLMConfigs, useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
const ROLE_DESCRIPTIONS = {
long_context: {
@ -67,6 +67,12 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
error: configsError,
refreshConfigs,
} = useLLMConfigs(searchSpaceId);
const {
globalConfigs,
loading: globalConfigsLoading,
error: globalConfigsError,
refreshGlobalConfigs,
} = useGlobalLLMConfigs();
const {
preferences,
loading: preferencesLoading,
@ -164,12 +170,17 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
const isAssignmentComplete =
assignments.long_context_llm_id && assignments.fast_llm_id && assignments.strategic_llm_id;
const assignedConfigIds = Object.values(assignments).filter((id) => id !== "");
const availableConfigs = llmConfigs.filter(
(config) => config.id && config.id.toString().trim() !== ""
);
const isLoading = configsLoading || preferencesLoading;
const hasError = configsError || preferencesError;
// Combine global and custom configs
const allConfigs = [
...globalConfigs.map((config) => ({ ...config, is_global: true })),
...llmConfigs.filter((config) => config.id && config.id.toString().trim() !== ""),
];
const availableConfigs = allConfigs;
const isLoading = configsLoading || preferencesLoading || globalConfigsLoading;
const hasError = configsError || preferencesError || globalConfigsError;
return (
<div className="space-y-6">
@ -218,7 +229,9 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
{hasError && (
<Alert variant="destructive">
<AlertCircle className="h-4 w-4" />
<AlertDescription>{configsError || preferencesError}</AlertDescription>
<AlertDescription>
{configsError || preferencesError || globalConfigsError}
</AlertDescription>
</Alert>
)}
@ -249,6 +262,10 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
<div className="space-y-1">
<p className="text-3xl font-bold tracking-tight">{availableConfigs.length}</p>
<p className="text-sm font-medium text-muted-foreground">Available Models</p>
<div className="flex gap-2 text-xs text-muted-foreground">
<span>🌐 {globalConfigs.length} Global</span>
<span> {llmConfigs.length} Custom</span>
</div>
</div>
<div className="flex h-12 w-12 items-center justify-center rounded-lg bg-blue-500/10">
<Bot className="h-6 w-6 text-blue-600" />
@ -422,30 +439,73 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
<SelectItem value="unassigned">
<span className="text-muted-foreground">Unassigned</span>
</SelectItem>
{availableConfigs.map((config) => (
<SelectItem key={config.id} value={config.id.toString()}>
<div className="flex items-center gap-2">
<Badge variant="outline" className="text-xs">
{config.provider}
</Badge>
<span>{config.name}</span>
<span className="text-muted-foreground">
({config.model_name})
</span>
{/* Global Configurations */}
{globalConfigs.length > 0 && (
<>
<div className="px-2 py-1.5 text-xs font-semibold text-muted-foreground">
Global Configurations
</div>
</SelectItem>
))}
{globalConfigs.map((config) => (
<SelectItem key={config.id} value={config.id.toString()}>
<div className="flex items-center gap-2">
<Badge variant="outline" className="text-xs">
{config.provider}
</Badge>
<span>{config.name}</span>
<span className="text-muted-foreground">
({config.model_name})
</span>
<Badge variant="secondary" className="text-xs">
🌐 Global
</Badge>
</div>
</SelectItem>
))}
</>
)}
{/* Custom Configurations */}
{llmConfigs.length > 0 && (
<>
<div className="px-2 py-1.5 text-xs font-semibold text-muted-foreground">
Your Configurations
</div>
{llmConfigs
.filter(
(config) => config.id && config.id.toString().trim() !== ""
)
.map((config) => (
<SelectItem key={config.id} value={config.id.toString()}>
<div className="flex items-center gap-2">
<Badge variant="outline" className="text-xs">
{config.provider}
</Badge>
<span>{config.name}</span>
<span className="text-muted-foreground">
({config.model_name})
</span>
</div>
</SelectItem>
))}
</>
)}
</SelectContent>
</Select>
</div>
{assignedConfig && (
<div className="mt-3 p-3 bg-muted/50 rounded-lg">
<div className="flex items-center gap-2 text-sm">
<div className="flex items-center gap-2 text-sm flex-wrap">
<Bot className="w-4 h-4" />
<span className="font-medium">Assigned:</span>
<Badge variant="secondary">{assignedConfig.provider}</Badge>
<span>{assignedConfig.name}</span>
{assignedConfig.is_global && (
<Badge variant="outline" className="text-xs">
🌐 Global
</Badge>
)}
</div>
<div className="text-xs text-muted-foreground mt-1">
Model: {assignedConfig.model_name}

View file

@ -51,7 +51,12 @@ import {
import { LANGUAGES } from "@/contracts/enums/languages";
import { getModelsByProvider } from "@/contracts/enums/llm-models";
import { LLM_PROVIDERS } from "@/contracts/enums/llm-providers";
import { type CreateLLMConfig, type LLMConfig, useLLMConfigs } from "@/hooks/use-llm-configs";
import {
type CreateLLMConfig,
type LLMConfig,
useGlobalLLMConfigs,
useLLMConfigs,
} from "@/hooks/use-llm-configs";
import { cn } from "@/lib/utils";
import InferenceParamsEditor from "../inference-params-editor";
@ -69,6 +74,7 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
deleteLLMConfig,
refreshConfigs,
} = useLLMConfigs(searchSpaceId);
const { globalConfigs } = useGlobalLLMConfigs();
const [isAddingNew, setIsAddingNew] = useState(false);
const [editingConfig, setEditingConfig] = useState<LLMConfig | null>(null);
const [showApiKey, setShowApiKey] = useState<Record<number, boolean>>({});
@ -224,6 +230,20 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
</Alert>
)}
{/* Global Configs Info Alert */}
{!loading && !error && globalConfigs.length > 0 && (
<Alert>
<CheckCircle className="h-4 w-4" />
<AlertDescription>
<strong>
{globalConfigs.length} global configuration{globalConfigs.length > 1 ? "s" : ""}
</strong>{" "}
available for use. You can assign them in the LLM Roles tab without adding your own API
keys.
</AlertDescription>
</Alert>
)}
{/* Loading State */}
{loading && (
<Card>
@ -310,8 +330,7 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
<div className="space-y-2 mb-6">
<h3 className="text-xl font-semibold">No Configurations Yet</h3>
<p className="text-muted-foreground max-w-sm">
Get started by adding your first LLM provider configuration to begin using the
system.
Add your own LLM provider configurations.
</p>
</div>
<Button onClick={() => setIsAddingNew(true)} size="lg">
@ -412,12 +431,14 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
{/* Metadata */}
<div className="flex flex-wrap items-center gap-4 pt-4 border-t border-border/50">
<div className="flex items-center gap-2 text-xs text-muted-foreground">
<Clock className="h-3 w-3" />
<span>
Created {new Date(config.created_at).toLocaleDateString()}
</span>
</div>
{config.created_at && (
<div className="flex items-center gap-2 text-xs text-muted-foreground">
<Clock className="h-3 w-3" />
<span>
Created {new Date(config.created_at).toLocaleDateString()}
</span>
</div>
)}
<div className="flex items-center gap-2 text-xs">
<div className="h-2 w-2 rounded-full bg-green-500"></div>
<span className="text-green-600 font-medium">Active</span>

View file

@ -12,8 +12,9 @@ export interface LLMConfig {
api_base?: string;
language?: string;
litellm_params?: Record<string, any>;
created_at: string;
search_space_id: number;
created_at?: string;
search_space_id?: number;
is_global?: boolean;
}
export interface LLMPreferences {
@ -283,3 +284,48 @@ export function useLLMPreferences(searchSpaceId: number | null) {
isOnboardingComplete,
};
}
export function useGlobalLLMConfigs() {
const [globalConfigs, setGlobalConfigs] = useState<LLMConfig[]>([]);
const [loading, setLoading] = useState(true);
const [error, setError] = useState<string | null>(null);
const fetchGlobalConfigs = async () => {
try {
setLoading(true);
const response = await fetch(
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/global-llm-configs`,
{
headers: {
Authorization: `Bearer ${localStorage.getItem("surfsense_bearer_token")}`,
},
method: "GET",
}
);
if (!response.ok) {
throw new Error("Failed to fetch global LLM configurations");
}
const data = await response.json();
setGlobalConfigs(data);
setError(null);
} catch (err: any) {
setError(err.message || "Failed to fetch global LLM configurations");
console.error("Error fetching global LLM configurations:", err);
} finally {
setLoading(false);
}
};
useEffect(() => {
fetchGlobalConfigs();
}, []);
return {
globalConfigs,
loading,
error,
refreshGlobalConfigs: fetchGlobalConfigs,
};
}

View file

@ -583,7 +583,9 @@
"assigned": "Assigned",
"all_roles_assigned_saved": "All roles assigned and saved!",
"progress": "Progress",
"roles_assigned": "{assigned} of {total} roles assigned"
"roles_assigned": "{assigned} of {total} roles assigned",
"global_configs": "Global Configurations",
"your_configs": "Your Configurations"
},
"model_config": {
"title": "Model Configurations",
@ -598,7 +600,7 @@
"manage_configs": "Manage and configure your LLM providers",
"add_config": "Add Configuration",
"no_configs": "No Configurations Yet",
"no_configs_desc": "Get started by adding your first LLM provider configuration to begin using the system.",
"no_configs_desc": "Add your own LLM provider configurations.",
"add_first_config": "Add First Configuration",
"created": "Created"
},

View file

@ -583,7 +583,9 @@
"assigned": "已分配",
"all_roles_assigned_saved": "所有角色已分配并保存!",
"progress": "进度",
"roles_assigned": "{assigned}/{total} 个角色已分配"
"roles_assigned": "{assigned}/{total} 个角色已分配",
"global_configs": "全局配置",
"your_configs": "您的配置"
},
"model_config": {
"title": "模型配置",
@ -598,7 +600,7 @@
"manage_configs": "管理和配置您的 LLM 提供商",
"add_config": "添加配置",
"no_configs": "暂无配置",
"no_configs_desc": "开始添加您的第一个 LLM 提供商配置以开始使用系统。",
"no_configs_desc": "添加您自己的 LLM 提供商配置。",
"add_first_config": "添加首个配置",
"created": "创建于"
},