Merge remote-tracking branch 'upstream/dev' into pr-611

This commit is contained in:
Anish Sarkar 2025-12-23 15:45:28 +05:30
commit 6f330e7b8d
92 changed files with 5331 additions and 6029 deletions

View file

@ -28,12 +28,15 @@ COPY surfsense_web/package.json surfsense_web/pnpm-lock.yaml* ./
COPY surfsense_web/source.config.ts ./
COPY surfsense_web/content ./content
# Install dependencies
RUN pnpm install --frozen-lockfile
# Install dependencies (skip postinstall which requires all source files)
RUN pnpm install --frozen-lockfile --ignore-scripts
# Copy source
COPY surfsense_web/ ./
# Run fumadocs-mdx postinstall now that source files are available
RUN pnpm fumadocs-mdx
# Build args for frontend
ARG NEXT_PUBLIC_FASTAPI_BACKEND_URL=http://localhost:8000
ARG NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=LOCAL

View file

@ -0,0 +1,114 @@
"""Add NewLLMConfig table for configurable LLM + prompt settings
Revision ID: 51
Revises: 50
"""
from collections.abc import Sequence
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "51"
down_revision: str | None = "50"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
"""
Add the new_llm_configs table that combines LLM model settings with prompt configuration.
This table includes:
- LLM model configuration (provider, model_name, api_key, etc.)
- Configurable system instructions
- Citation toggle
"""
# Create new_llm_configs table only if it doesn't already exist
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT FROM information_schema.tables
WHERE table_name = 'new_llm_configs'
) THEN
CREATE TABLE new_llm_configs (
id SERIAL PRIMARY KEY,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
-- Basic info
name VARCHAR(100) NOT NULL,
description VARCHAR(500),
-- LLM Model Configuration (same as llm_configs, excluding language)
provider litellmprovider NOT NULL,
custom_provider VARCHAR(100),
model_name VARCHAR(100) NOT NULL,
api_key TEXT NOT NULL,
api_base VARCHAR(500),
litellm_params JSONB DEFAULT '{}',
-- Prompt Configuration
system_instructions TEXT NOT NULL DEFAULT '',
use_default_system_instructions BOOLEAN NOT NULL DEFAULT TRUE,
citations_enabled BOOLEAN NOT NULL DEFAULT TRUE,
-- Default flag
is_default BOOLEAN NOT NULL DEFAULT FALSE,
-- Foreign key to search space
search_space_id INTEGER NOT NULL REFERENCES searchspaces(id) ON DELETE CASCADE
);
END IF;
END$$;
"""
)
# Create indexes if they don't exist
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_indexes
WHERE tablename = 'new_llm_configs' AND indexname = 'ix_new_llm_configs_id'
) THEN
CREATE INDEX ix_new_llm_configs_id ON new_llm_configs(id);
END IF;
IF NOT EXISTS (
SELECT 1 FROM pg_indexes
WHERE tablename = 'new_llm_configs' AND indexname = 'ix_new_llm_configs_created_at'
) THEN
CREATE INDEX ix_new_llm_configs_created_at ON new_llm_configs(created_at);
END IF;
IF NOT EXISTS (
SELECT 1 FROM pg_indexes
WHERE tablename = 'new_llm_configs' AND indexname = 'ix_new_llm_configs_name'
) THEN
CREATE INDEX ix_new_llm_configs_name ON new_llm_configs(name);
END IF;
IF NOT EXISTS (
SELECT 1 FROM pg_indexes
WHERE tablename = 'new_llm_configs' AND indexname = 'ix_new_llm_configs_search_space_id'
) THEN
CREATE INDEX ix_new_llm_configs_search_space_id ON new_llm_configs(search_space_id);
END IF;
END$$;
"""
)
def downgrade() -> None:
"""Remove the new_llm_configs table."""
# Drop indexes
op.execute("DROP INDEX IF EXISTS ix_new_llm_configs_search_space_id")
op.execute("DROP INDEX IF EXISTS ix_new_llm_configs_name")
op.execute("DROP INDEX IF EXISTS ix_new_llm_configs_created_at")
op.execute("DROP INDEX IF EXISTS ix_new_llm_configs_id")
# Drop table
op.execute("DROP TABLE IF EXISTS new_llm_configs")

View file

@ -0,0 +1,130 @@
"""Rename LLM preference columns in searchspaces table
Revision ID: 52
Revises: 51
Create Date: 2024-12-22
This migration renames the LLM preference columns:
- fast_llm_id -> agent_llm_id
- long_context_llm_id -> document_summary_llm_id
- strategic_llm_id is removed (data migrated to document_summary_llm_id)
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "52"
down_revision = "51"
branch_labels = None
depends_on = None
def upgrade():
# First, migrate any strategic_llm_id values to document_summary_llm_id
# (only if document_summary_llm_id/long_context_llm_id is NULL)
# Use IF EXISTS check to handle case where column might not exist
op.execute(
"""
DO $$
BEGIN
IF EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'searchspaces' AND column_name = 'strategic_llm_id'
) THEN
UPDATE searchspaces
SET long_context_llm_id = strategic_llm_id
WHERE long_context_llm_id IS NULL AND strategic_llm_id IS NOT NULL;
END IF;
END$$;
"""
)
# Rename columns (only if they exist with old names)
op.execute(
"""
DO $$
BEGIN
IF EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'searchspaces' AND column_name = 'fast_llm_id'
) THEN
ALTER TABLE searchspaces RENAME COLUMN fast_llm_id TO agent_llm_id;
END IF;
END$$;
"""
)
op.execute(
"""
DO $$
BEGIN
IF EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'searchspaces' AND column_name = 'long_context_llm_id'
) THEN
ALTER TABLE searchspaces RENAME COLUMN long_context_llm_id TO document_summary_llm_id;
END IF;
END$$;
"""
)
# Drop the strategic_llm_id column if it exists
op.execute(
"""
DO $$
BEGIN
IF EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'searchspaces' AND column_name = 'strategic_llm_id'
) THEN
ALTER TABLE searchspaces DROP COLUMN strategic_llm_id;
END IF;
END$$;
"""
)
def downgrade():
# Add back the strategic_llm_id column
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'searchspaces' AND column_name = 'strategic_llm_id'
) THEN
ALTER TABLE searchspaces ADD COLUMN strategic_llm_id INTEGER;
END IF;
END$$;
"""
)
# Rename columns back
op.execute(
"""
DO $$
BEGIN
IF EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'searchspaces' AND column_name = 'agent_llm_id'
) THEN
ALTER TABLE searchspaces RENAME COLUMN agent_llm_id TO fast_llm_id;
END IF;
END$$;
"""
)
op.execute(
"""
DO $$
BEGIN
IF EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'searchspaces' AND column_name = 'document_summary_llm_id'
) THEN
ALTER TABLE searchspaces RENAME COLUMN document_summary_llm_id TO long_context_llm_id;
END IF;
END$$;
"""
)

View file

@ -0,0 +1,244 @@
"""Migrate data from old llm_configs to new_llm_configs and cleanup
Revision ID: 53
Revises: 52
Create Date: 2024-12-22
This migration:
1. Migrates data from old llm_configs table to new_llm_configs (preserving user configs)
2. Drops the old llm_configs table (no longer used)
3. Removes the is_default column from new_llm_configs (roles now determine which config to use)
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "53"
down_revision = "52"
branch_labels = None
depends_on = None
def upgrade():
# STEP 1: Migrate data from old llm_configs to new_llm_configs
# This preserves any user-created configurations
op.execute(
"""
DO $$
BEGIN
-- Only migrate if both tables exist
IF EXISTS (
SELECT FROM information_schema.tables
WHERE table_name = 'llm_configs'
) AND EXISTS (
SELECT FROM information_schema.tables
WHERE table_name = 'new_llm_configs'
) THEN
-- Insert old configs into new table (skipping duplicates by name+search_space_id)
INSERT INTO new_llm_configs (
name,
description,
provider,
custom_provider,
model_name,
api_key,
api_base,
litellm_params,
system_instructions,
use_default_system_instructions,
citations_enabled,
is_default,
search_space_id,
created_at
)
SELECT
lc.name,
NULL as description, -- Old table didn't have description
lc.provider,
lc.custom_provider,
lc.model_name,
lc.api_key,
lc.api_base,
COALESCE(lc.litellm_params, '{}'::jsonb),
'' as system_instructions, -- Use defaults
TRUE as use_default_system_instructions,
TRUE as citations_enabled,
FALSE as is_default,
lc.search_space_id,
COALESCE(lc.created_at, NOW())
FROM llm_configs lc
WHERE lc.search_space_id IS NOT NULL
AND NOT EXISTS (
-- Skip if a config with same name already exists in new_llm_configs for this search space
SELECT 1 FROM new_llm_configs nlc
WHERE nlc.name = lc.name
AND nlc.search_space_id = lc.search_space_id
);
-- Log how many configs were migrated
RAISE NOTICE 'Migrated % configs from llm_configs to new_llm_configs',
(SELECT COUNT(*) FROM llm_configs WHERE search_space_id IS NOT NULL);
END IF;
END$$;
"""
)
# STEP 2: Update searchspaces to point to new_llm_configs for their agent LLM
# If a search space had an agent_llm_id pointing to old llm_configs,
# try to find the corresponding config in new_llm_configs
op.execute(
"""
DO $$
BEGIN
IF EXISTS (
SELECT FROM information_schema.tables
WHERE table_name = 'llm_configs'
) THEN
-- Update agent_llm_id to point to migrated config in new_llm_configs
UPDATE searchspaces ss
SET agent_llm_id = (
SELECT nlc.id
FROM new_llm_configs nlc
JOIN llm_configs lc ON lc.name = nlc.name AND lc.search_space_id = nlc.search_space_id
WHERE lc.id = ss.agent_llm_id
AND nlc.search_space_id = ss.id
LIMIT 1
)
WHERE ss.agent_llm_id IS NOT NULL
AND ss.agent_llm_id > 0 -- Only positive IDs (not global configs)
AND EXISTS (
SELECT 1 FROM llm_configs lc WHERE lc.id = ss.agent_llm_id
);
-- Update document_summary_llm_id similarly
UPDATE searchspaces ss
SET document_summary_llm_id = (
SELECT nlc.id
FROM new_llm_configs nlc
JOIN llm_configs lc ON lc.name = nlc.name AND lc.search_space_id = nlc.search_space_id
WHERE lc.id = ss.document_summary_llm_id
AND nlc.search_space_id = ss.id
LIMIT 1
)
WHERE ss.document_summary_llm_id IS NOT NULL
AND ss.document_summary_llm_id > 0 -- Only positive IDs (not global configs)
AND EXISTS (
SELECT 1 FROM llm_configs lc WHERE lc.id = ss.document_summary_llm_id
);
END IF;
END$$;
"""
)
# STEP 3: Drop the is_default column from new_llm_configs
# (role assignments now determine which config to use)
op.execute(
"""
DO $$
BEGIN
IF EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'new_llm_configs' AND column_name = 'is_default'
) THEN
ALTER TABLE new_llm_configs DROP COLUMN is_default;
END IF;
END$$;
"""
)
# STEP 4: Drop the old llm_configs table (data has been migrated)
op.execute("DROP TABLE IF EXISTS llm_configs CASCADE")
def downgrade():
# Recreate the old llm_configs table
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT FROM information_schema.tables
WHERE table_name = 'llm_configs'
) THEN
CREATE TABLE llm_configs (
id SERIAL PRIMARY KEY,
name VARCHAR(100) NOT NULL,
provider litellmprovider NOT NULL,
custom_provider VARCHAR(100),
model_name VARCHAR(100) NOT NULL,
api_key TEXT NOT NULL,
api_base VARCHAR(500),
language VARCHAR(50),
litellm_params JSONB DEFAULT '{}',
search_space_id INTEGER REFERENCES searchspaces(id) ON DELETE CASCADE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE
);
-- Create indexes
CREATE INDEX IF NOT EXISTS ix_llm_configs_id ON llm_configs(id);
CREATE INDEX IF NOT EXISTS ix_llm_configs_name ON llm_configs(name);
CREATE INDEX IF NOT EXISTS ix_llm_configs_created_at ON llm_configs(created_at);
END IF;
END$$;
"""
)
# Migrate data back from new_llm_configs to llm_configs
op.execute(
"""
DO $$
BEGIN
IF EXISTS (
SELECT FROM information_schema.tables
WHERE table_name = 'new_llm_configs'
) THEN
INSERT INTO llm_configs (
name,
provider,
custom_provider,
model_name,
api_key,
api_base,
language,
litellm_params,
search_space_id,
created_at
)
SELECT
nlc.name,
nlc.provider,
nlc.custom_provider,
nlc.model_name,
nlc.api_key,
nlc.api_base,
'English' as language, -- Default language
COALESCE(nlc.litellm_params, '{}'::jsonb),
nlc.search_space_id,
nlc.created_at
FROM new_llm_configs nlc
WHERE nlc.search_space_id IS NOT NULL
AND NOT EXISTS (
SELECT 1 FROM llm_configs lc
WHERE lc.name = nlc.name
AND lc.search_space_id = nlc.search_space_id
);
END IF;
END$$;
"""
)
# Add back the is_default column to new_llm_configs
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM information_schema.columns
WHERE table_name = 'new_llm_configs' AND column_name = 'is_default'
) THEN
ALTER TABLE new_llm_configs ADD COLUMN is_default BOOLEAN NOT NULL DEFAULT FALSE;
END IF;
END$$;
"""
)

View file

@ -31,56 +31,50 @@ from .system_prompt import (
)
# Tools - registry exports
# Tools - factory exports (for direct use)
# Tools - knowledge base utilities
from .tools import (
BUILTIN_TOOLS,
ToolDefinition,
build_tools,
get_all_tool_names,
get_default_enabled_tools,
get_tool_by_name,
)
# Tools - factory exports (for direct use)
from .tools import (
create_display_image_tool,
create_generate_podcast_tool,
create_link_preview_tool,
create_scrape_webpage_tool,
create_search_knowledge_base_tool,
)
# Tools - knowledge base utilities
from .tools import (
format_documents_for_context,
get_all_tool_names,
get_default_enabled_tools,
get_tool_by_name,
search_knowledge_base_async,
)
__all__ = [
# Agent factory
"create_surfsense_deep_agent",
# Context
"SurfSenseContextSchema",
# LLM config
"create_chat_litellm_from_config",
"load_llm_config_from_yaml",
# Tools registry
"BUILTIN_TOOLS",
# System prompt
"SURFSENSE_CITATION_INSTRUCTIONS",
"SURFSENSE_SYSTEM_PROMPT",
"build_surfsense_system_prompt",
# Tools registry
"BUILTIN_TOOLS",
# Context
"SurfSenseContextSchema",
"ToolDefinition",
"build_surfsense_system_prompt",
"build_tools",
"get_all_tool_names",
"get_default_enabled_tools",
"get_tool_by_name",
# LLM config
"create_chat_litellm_from_config",
# Tool factories
"create_display_image_tool",
"create_generate_podcast_tool",
"create_link_preview_tool",
"create_scrape_webpage_tool",
"create_search_knowledge_base_tool",
# Agent factory
"create_surfsense_deep_agent",
# Knowledge base utilities
"format_documents_for_context",
"get_all_tool_names",
"get_default_enabled_tools",
"get_tool_by_name",
"load_llm_config_from_yaml",
"search_knowledge_base_async",
]

View file

@ -2,7 +2,8 @@
SurfSense deep agent implementation.
This module provides the factory function for creating SurfSense deep agents
with configurable tools via the tools registry.
with configurable tools via the tools registry and configurable prompts
via NewLLMConfig.
"""
from collections.abc import Sequence
@ -14,7 +15,11 @@ from langgraph.types import Checkpointer
from sqlalchemy.ext.asyncio import AsyncSession
from app.agents.new_chat.context import SurfSenseContextSchema
from app.agents.new_chat.system_prompt import build_surfsense_system_prompt
from app.agents.new_chat.llm_config import AgentConfig
from app.agents.new_chat.system_prompt import (
build_configurable_system_prompt,
build_surfsense_system_prompt,
)
from app.agents.new_chat.tools import build_tools
from app.services.connector_service import ConnectorService
@ -29,13 +34,14 @@ def create_surfsense_deep_agent(
db_session: AsyncSession,
connector_service: ConnectorService,
checkpointer: Checkpointer,
agent_config: AgentConfig | None = None,
enabled_tools: list[str] | None = None,
disabled_tools: list[str] | None = None,
additional_tools: Sequence[BaseTool] | None = None,
firecrawl_api_key: str | None = None,
):
"""
Create a SurfSense deep agent with configurable tools.
Create a SurfSense deep agent with configurable tools and prompts.
The agent comes with built-in tools that can be configured:
- search_knowledge_base: Search the user's personal knowledge base
@ -44,6 +50,10 @@ def create_surfsense_deep_agent(
- display_image: Display images in chat
- scrape_webpage: Extract content from webpages
The system prompt can be configured via agent_config:
- Custom system instructions (or use defaults)
- Citation toggle (enable/disable citation requirements)
Args:
llm: ChatLiteLLM instance for the agent's language model
search_space_id: The user's search space ID
@ -51,6 +61,8 @@ def create_surfsense_deep_agent(
connector_service: Initialized connector service for knowledge base search
checkpointer: LangGraph checkpointer for conversation state persistence.
Use AsyncPostgresSaver for production or MemorySaver for testing.
agent_config: Optional AgentConfig from NewLLMConfig for prompt configuration.
If None, uses default system prompt with citations enabled.
enabled_tools: Explicit list of tool names to enable. If None, all default tools
are enabled. Use this to limit which tools are available.
disabled_tools: List of tool names to disable. Applied after enabled_tools.
@ -64,9 +76,21 @@ def create_surfsense_deep_agent(
CompiledStateGraph: The configured deep agent
Examples:
# Create agent with all default tools
# Create agent with all default tools and default prompt
agent = create_surfsense_deep_agent(llm, search_space_id, db_session, ...)
# Create agent with custom prompt configuration
agent = create_surfsense_deep_agent(
llm, search_space_id, db_session, ...,
agent_config=AgentConfig(
provider="OPENAI",
model_name="gpt-4",
api_key="...",
system_instructions="Custom instructions...",
citations_enabled=False,
)
)
# Create agent with only specific tools
agent = create_surfsense_deep_agent(
llm, search_space_id, db_session, ...,
@ -101,11 +125,23 @@ def create_surfsense_deep_agent(
additional_tools=list(additional_tools) if additional_tools else None,
)
# Build system prompt based on agent_config
if agent_config is not None:
# Use configurable prompt with settings from NewLLMConfig
system_prompt = build_configurable_system_prompt(
custom_system_instructions=agent_config.system_instructions,
use_default_system_instructions=agent_config.use_default_system_instructions,
citations_enabled=agent_config.citations_enabled,
)
else:
# Use default prompt (with citations enabled)
system_prompt = build_surfsense_system_prompt()
# Create the deep agent with system prompt and checkpointer
agent = create_deep_agent(
model=llm,
tools=tools,
system_prompt=build_surfsense_system_prompt(),
system_prompt=system_prompt,
context_schema=SurfSenseContextSchema,
checkpointer=checkpointer,
)

View file

@ -1,14 +1,144 @@
"""
LLM configuration utilities for SurfSense agents.
This module provides functions for loading LLM configurations from YAML files
and creating ChatLiteLLM instances from configuration dictionaries.
This module provides functions for loading LLM configurations from:
1. YAML files (global configs with negative IDs)
2. Database NewLLMConfig table (user-created configs with positive IDs)
It also provides utilities for creating ChatLiteLLM instances and
managing prompt configurations.
"""
from dataclasses import dataclass
from pathlib import Path
import yaml
from langchain_litellm import ChatLiteLLM
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
# Provider mapping for LiteLLM model string construction
PROVIDER_MAP = {
"OPENAI": "openai",
"ANTHROPIC": "anthropic",
"GROQ": "groq",
"COHERE": "cohere",
"GOOGLE": "gemini",
"OLLAMA": "ollama",
"MISTRAL": "mistral",
"AZURE_OPENAI": "azure",
"OPENROUTER": "openrouter",
"XAI": "xai",
"BEDROCK": "bedrock",
"VERTEX_AI": "vertex_ai",
"TOGETHER_AI": "together_ai",
"FIREWORKS_AI": "fireworks_ai",
"DEEPSEEK": "openai",
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai",
"REPLICATE": "replicate",
"PERPLEXITY": "perplexity",
"ANYSCALE": "anyscale",
"DEEPINFRA": "deepinfra",
"CEREBRAS": "cerebras",
"SAMBANOVA": "sambanova",
"AI21": "ai21",
"CLOUDFLARE": "cloudflare",
"DATABRICKS": "databricks",
"COMETAPI": "cometapi",
"HUGGINGFACE": "huggingface",
"CUSTOM": "custom",
}
@dataclass
class AgentConfig:
"""
Complete configuration for the SurfSense agent.
This combines LLM settings with prompt configuration from NewLLMConfig.
"""
# LLM Model Settings
provider: str
model_name: str
api_key: str
api_base: str | None = None
custom_provider: str | None = None
litellm_params: dict | None = None
# Prompt Configuration
system_instructions: str | None = None
use_default_system_instructions: bool = True
citations_enabled: bool = True
# Metadata
config_id: int | None = None
config_name: str | None = None
@classmethod
def from_new_llm_config(cls, config) -> "AgentConfig":
"""
Create an AgentConfig from a NewLLMConfig database model.
Args:
config: NewLLMConfig database model instance
Returns:
AgentConfig instance
"""
return cls(
provider=config.provider.value
if hasattr(config.provider, "value")
else str(config.provider),
model_name=config.model_name,
api_key=config.api_key,
api_base=config.api_base,
custom_provider=config.custom_provider,
litellm_params=config.litellm_params,
system_instructions=config.system_instructions,
use_default_system_instructions=config.use_default_system_instructions,
citations_enabled=config.citations_enabled,
config_id=config.id,
config_name=config.name,
)
@classmethod
def from_yaml_config(cls, yaml_config: dict) -> "AgentConfig":
"""
Create an AgentConfig from a YAML configuration dictionary.
YAML configs now support the same prompt configuration fields as NewLLMConfig:
- system_instructions: Custom system instructions (empty string uses defaults)
- use_default_system_instructions: Whether to use default instructions
- citations_enabled: Whether citations are enabled
Args:
yaml_config: Configuration dictionary from YAML file
Returns:
AgentConfig instance
"""
# Get system instructions from YAML, default to empty string
system_instructions = yaml_config.get("system_instructions", "")
return cls(
provider=yaml_config.get("provider", "").upper(),
model_name=yaml_config.get("model_name", ""),
api_key=yaml_config.get("api_key", ""),
api_base=yaml_config.get("api_base"),
custom_provider=yaml_config.get("custom_provider"),
litellm_params=yaml_config.get("litellm_params"),
# Prompt configuration from YAML (with defaults for backwards compatibility)
system_instructions=system_instructions if system_instructions else None,
use_default_system_instructions=yaml_config.get(
"use_default_system_instructions", True
),
citations_enabled=yaml_config.get("citations_enabled", True),
config_id=yaml_config.get("id"),
config_name=yaml_config.get("name"),
)
def load_llm_config_from_yaml(llm_config_id: int = -1) -> dict | None:
@ -47,9 +177,118 @@ def load_llm_config_from_yaml(llm_config_id: int = -1) -> dict | None:
return None
async def load_new_llm_config_from_db(
session: AsyncSession,
config_id: int,
) -> "AgentConfig | None":
"""
Load a NewLLMConfig from the database by ID.
Args:
session: AsyncSession for database access
config_id: The ID of the NewLLMConfig to load
Returns:
AgentConfig instance or None if not found
"""
# Import here to avoid circular imports
from app.db import NewLLMConfig
try:
result = await session.execute(
select(NewLLMConfig).filter(NewLLMConfig.id == config_id)
)
config = result.scalars().first()
if not config:
print(f"Error: NewLLMConfig with id {config_id} not found")
return None
return AgentConfig.from_new_llm_config(config)
except Exception as e:
print(f"Error loading NewLLMConfig from database: {e}")
return None
async def load_agent_llm_config_for_search_space(
session: AsyncSession,
search_space_id: int,
) -> "AgentConfig | None":
"""
Load the agent LLM configuration for a search space.
This loads the LLM config based on the search space's agent_llm_id setting:
- Positive ID: Load from NewLLMConfig database table
- Negative ID: Load from YAML global configs
- None: Falls back to first global config (id=-1)
Args:
session: AsyncSession for database access
search_space_id: The search space ID
Returns:
AgentConfig instance or None if not found
"""
# Import here to avoid circular imports
from app.db import SearchSpace
try:
# Get the search space to check its agent_llm_id preference
result = await session.execute(
select(SearchSpace).filter(SearchSpace.id == search_space_id)
)
search_space = result.scalars().first()
if not search_space:
print(f"Error: SearchSpace with id {search_space_id} not found")
return None
# Use agent_llm_id from search space, fallback to -1 (first global config)
config_id = (
search_space.agent_llm_id if search_space.agent_llm_id is not None else -1
)
# Load the config using the unified loader
return await load_agent_config(session, config_id, search_space_id)
except Exception as e:
print(f"Error loading agent LLM config for search space {search_space_id}: {e}")
return None
async def load_agent_config(
session: AsyncSession,
config_id: int,
search_space_id: int | None = None,
) -> "AgentConfig | None":
"""
Load an agent configuration, supporting both YAML (negative IDs) and database (positive IDs) configs.
This is the main entry point for loading configurations:
- Negative IDs: Load from YAML file (global configs)
- Positive IDs: Load from NewLLMConfig database table
Args:
session: AsyncSession for database access
config_id: The config ID (negative for YAML, positive for database)
search_space_id: Optional search space ID for context
Returns:
AgentConfig instance or None if not found
"""
if config_id < 0:
# Load from YAML (global configs have negative IDs)
yaml_config = load_llm_config_from_yaml(config_id)
if yaml_config:
return AgentConfig.from_yaml_config(yaml_config)
return None
else:
# Load from database (NewLLMConfig)
return await load_new_llm_config_from_db(session, config_id)
def create_chat_litellm_from_config(llm_config: dict) -> ChatLiteLLM | None:
"""
Create a ChatLiteLLM instance from a global LLM config.
Create a ChatLiteLLM instance from a global LLM config dictionary.
Args:
llm_config: LLM configuration dictionary from YAML
@ -57,34 +296,12 @@ def create_chat_litellm_from_config(llm_config: dict) -> ChatLiteLLM | None:
Returns:
ChatLiteLLM instance or None on error
"""
# Provider mapping (same as in llm_service.py)
provider_map = {
"OPENAI": "openai",
"ANTHROPIC": "anthropic",
"GROQ": "groq",
"COHERE": "cohere",
"GOOGLE": "gemini",
"OLLAMA": "ollama",
"MISTRAL": "mistral",
"AZURE_OPENAI": "azure",
"OPENROUTER": "openrouter",
"XAI": "xai",
"BEDROCK": "bedrock",
"VERTEX_AI": "vertex_ai",
"TOGETHER_AI": "together_ai",
"FIREWORKS_AI": "fireworks_ai",
"DEEPSEEK": "openai",
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai",
}
# Build the model string
if llm_config.get("custom_provider"):
model_string = f"{llm_config['custom_provider']}/{llm_config['model_name']}"
else:
provider = llm_config.get("provider", "").upper()
provider_prefix = provider_map.get(provider, provider.lower())
provider_prefix = PROVIDER_MAP.get(provider, provider.lower())
model_string = f"{provider_prefix}/{llm_config['model_name']}"
# Create ChatLiteLLM instance with streaming enabled
@ -103,3 +320,42 @@ def create_chat_litellm_from_config(llm_config: dict) -> ChatLiteLLM | None:
litellm_kwargs.update(llm_config["litellm_params"])
return ChatLiteLLM(**litellm_kwargs)
def create_chat_litellm_from_agent_config(
agent_config: AgentConfig,
) -> ChatLiteLLM | None:
"""
Create a ChatLiteLLM instance from an AgentConfig.
Args:
agent_config: AgentConfig instance
Returns:
ChatLiteLLM instance or None on error
"""
# Build the model string
if agent_config.custom_provider:
model_string = f"{agent_config.custom_provider}/{agent_config.model_name}"
else:
provider_prefix = PROVIDER_MAP.get(
agent_config.provider, agent_config.provider.lower()
)
model_string = f"{provider_prefix}/{agent_config.model_name}"
# Create ChatLiteLLM instance with streaming enabled
litellm_kwargs = {
"model": model_string,
"api_key": agent_config.api_key,
"streaming": True, # Enable streaming for real-time token streaming
}
# Add optional parameters
if agent_config.api_base:
litellm_kwargs["api_base"] = agent_config.api_base
# Add any additional litellm parameters
if agent_config.litellm_params:
litellm_kwargs.update(agent_config.litellm_params)
return ChatLiteLLM(**litellm_kwargs)

View file

@ -3,10 +3,16 @@ System prompt building for SurfSense agents.
This module provides functions and constants for building the SurfSense system prompt
with configurable user instructions and citation support.
The prompt is composed of three parts:
1. System Instructions (configurable via NewLLMConfig)
2. Tools Instructions (always included, not configurable)
3. Citation Instructions (toggleable via NewLLMConfig.citations_enabled)
"""
from datetime import UTC, datetime
# Default system instructions - can be overridden via NewLLMConfig.system_instructions
SURFSENSE_SYSTEM_INSTRUCTIONS = """
<system_instruction>
You are SurfSense, a reasoning and acting AI agent designed to answer user questions using the user's personal knowledge base.
@ -219,12 +225,38 @@ However, from your video learning, it's important to note that asyncio is not su
</citation_instructions>
"""
# Anti-citation prompt - used when citations are disabled
# This explicitly tells the model NOT to include citations
SURFSENSE_NO_CITATION_INSTRUCTIONS = """
<citation_instructions>
IMPORTANT: Citations are DISABLED for this configuration.
DO NOT include any citations in your responses. Specifically:
1. Do NOT use the [citation:chunk_id] format anywhere in your response.
2. Do NOT reference document IDs, chunk IDs, or source IDs.
3. Simply provide the information naturally without any citation markers.
4. Write your response as if you're having a normal conversation, incorporating the information from your knowledge seamlessly.
When answering questions based on documents from the knowledge base:
- Present the information directly and confidently
- Do not mention that information comes from specific documents or chunks
- Integrate facts naturally into your response without attribution markers
Your goal is to provide helpful, informative answers in a clean, readable format without any citation notation.
</citation_instructions>
"""
def build_surfsense_system_prompt(
today: datetime | None = None,
) -> str:
"""
Build the SurfSense system prompt.
Build the SurfSense system prompt with default settings.
This is a convenience function that builds the prompt with:
- Default system instructions
- Tools instructions (always included)
- Citation instructions enabled
Args:
today: Optional datetime for today's date (defaults to current UTC date)
@ -241,4 +273,74 @@ def build_surfsense_system_prompt(
)
def build_configurable_system_prompt(
custom_system_instructions: str | None = None,
use_default_system_instructions: bool = True,
citations_enabled: bool = True,
today: datetime | None = None,
) -> str:
"""
Build a configurable SurfSense system prompt based on NewLLMConfig settings.
The prompt is composed of three parts:
1. System Instructions - either custom or default SURFSENSE_SYSTEM_INSTRUCTIONS
2. Tools Instructions - always included (SURFSENSE_TOOLS_INSTRUCTIONS)
3. Citation Instructions - either SURFSENSE_CITATION_INSTRUCTIONS or SURFSENSE_NO_CITATION_INSTRUCTIONS
Args:
custom_system_instructions: Custom system instructions to use. If empty/None and
use_default_system_instructions is True, defaults to
SURFSENSE_SYSTEM_INSTRUCTIONS.
use_default_system_instructions: Whether to use default instructions when
custom_system_instructions is empty/None.
citations_enabled: Whether to include citation instructions (True) or
anti-citation instructions (False).
today: Optional datetime for today's date (defaults to current UTC date)
Returns:
Complete system prompt string
"""
resolved_today = (today or datetime.now(UTC)).astimezone(UTC).date().isoformat()
# Determine system instructions
if custom_system_instructions and custom_system_instructions.strip():
# Use custom instructions, injecting the date placeholder if present
system_instructions = custom_system_instructions.format(
resolved_today=resolved_today
)
elif use_default_system_instructions:
# Use default instructions
system_instructions = SURFSENSE_SYSTEM_INSTRUCTIONS.format(
resolved_today=resolved_today
)
else:
# No system instructions (edge case)
system_instructions = ""
# Tools instructions are always included
tools_instructions = SURFSENSE_TOOLS_INSTRUCTIONS
# Citation instructions based on toggle
citation_instructions = (
SURFSENSE_CITATION_INSTRUCTIONS
if citations_enabled
else SURFSENSE_NO_CITATION_INSTRUCTIONS
)
return system_instructions + tools_instructions + citation_instructions
def get_default_system_instructions() -> str:
"""
Get the default system instructions template.
This is useful for populating the UI with the default value when
creating a new NewLLMConfig.
Returns:
Default system instructions string (with {resolved_today} placeholder)
"""
return SURFSENSE_SYSTEM_INSTRUCTIONS.strip()
SURFSENSE_SYSTEM_PROMPT = build_surfsense_system_prompt()

View file

@ -13,15 +13,6 @@ Available tools:
"""
# Registry exports
from .registry import (
BUILTIN_TOOLS,
ToolDefinition,
build_tools,
get_all_tool_names,
get_default_enabled_tools,
get_tool_by_name,
)
# Tool factory exports (for direct use)
from .display_image import create_display_image_tool
from .knowledge_base import (
@ -31,6 +22,14 @@ from .knowledge_base import (
)
from .link_preview import create_link_preview_tool
from .podcast import create_generate_podcast_tool
from .registry import (
BUILTIN_TOOLS,
ToolDefinition,
build_tools,
get_all_tool_names,
get_default_enabled_tools,
get_tool_by_name,
)
from .scrape_webpage import create_scrape_webpage_tool
__all__ = [
@ -38,9 +37,6 @@ __all__ = [
"BUILTIN_TOOLS",
"ToolDefinition",
"build_tools",
"get_all_tool_names",
"get_default_enabled_tools",
"get_tool_by_name",
# Tool factories
"create_display_image_tool",
"create_generate_podcast_tool",
@ -49,6 +45,8 @@ __all__ = [
"create_search_knowledge_base_tool",
# Knowledge base utilities
"format_documents_for_context",
"get_all_tool_names",
"get_default_enabled_tools",
"get_tool_by_name",
"search_knowledge_base_async",
]

View file

@ -86,7 +86,9 @@ def create_display_image_tool():
ratio = "16:9" # Default
if "unsplash.com" in src or "pexels.com" in src:
ratio = "16:9"
elif "imgur.com" in src or "github.com" in src or "githubusercontent.com" in src:
elif (
"imgur.com" in src or "github.com" in src or "githubusercontent.com" in src
):
ratio = "auto"
return {
@ -101,4 +103,3 @@ def create_display_image_tool():
}
return display_image

View file

@ -605,4 +605,3 @@ def create_search_knowledge_base_tool(
)
return search_knowledge_base

View file

@ -46,13 +46,17 @@ def extract_og_content(html: str, property_name: str) -> str | None:
def extract_twitter_content(html: str, name: str) -> str | None:
"""Extract Twitter Card meta content from HTML."""
pattern = rf'<meta[^>]+name=["\']twitter:{name}["\'][^>]+content=["\']([^"\']+)["\']'
pattern = (
rf'<meta[^>]+name=["\']twitter:{name}["\'][^>]+content=["\']([^"\']+)["\']'
)
match = re.search(pattern, html, re.IGNORECASE)
if match:
return match.group(1)
# Try content before name
pattern = rf'<meta[^>]+content=["\']([^"\']+)["\'][^>]+name=["\']twitter:{name}["\']'
pattern = (
rf'<meta[^>]+content=["\']([^"\']+)["\'][^>]+name=["\']twitter:{name}["\']'
)
match = re.search(pattern, html, re.IGNORECASE)
if match:
return match.group(1)
@ -289,4 +293,3 @@ def create_link_preview_tool():
}
return link_preview

View file

@ -171,4 +171,3 @@ def create_generate_podcast_tool(
}
return generate_podcast

View file

@ -37,11 +37,18 @@ Example of adding a new tool:
),
"""
from collections.abc import Callable
from dataclasses import dataclass, field
from typing import Any, Callable
from typing import Any
from langchain_core.tools import BaseTool
from .display_image import create_display_image_tool
from .knowledge_base import create_search_knowledge_base_tool
from .link_preview import create_link_preview_tool
from .podcast import create_generate_podcast_tool
from .scrape_webpage import create_scrape_webpage_tool
# =============================================================================
# Tool Definition
# =============================================================================
@ -71,13 +78,6 @@ class ToolDefinition:
# Built-in Tools Registry
# =============================================================================
# Import tool factory functions
from .display_image import create_display_image_tool
from .knowledge_base import create_search_knowledge_base_tool
from .link_preview import create_link_preview_tool
from .podcast import create_generate_podcast_tool
from .scrape_webpage import create_scrape_webpage_tool
# Registry of all built-in tools
# Contributors: Add your new tools here!
BUILTIN_TOOLS: list[ToolDefinition] = [
@ -228,4 +228,3 @@ def build_tools(
tools.extend(additional_tools)
return tools

View file

@ -156,7 +156,9 @@ def create_scrape_webpage_tool(firecrawl_api_key: str | None = None):
if not description and content:
# Use first paragraph as description
first_para = content.split("\n\n")[0] if content else ""
description = first_para[:300] + "..." if len(first_para) > 300 else first_para
description = (
first_para[:300] + "..." if len(first_para) > 300 else first_para
)
# Truncate content if needed
content, was_truncated = truncate_content(content, max_length)
@ -194,4 +196,3 @@ def create_scrape_webpage_tool(firecrawl_api_key: str | None = None):
}
return scrape_webpage

View file

@ -12,7 +12,7 @@ from litellm import aspeech
from app.config import config as app_config
from app.services.kokoro_tts_service import get_kokoro_tts_service
from app.services.llm_service import get_long_context_llm
from app.services.llm_service import get_document_summary_llm
from .configuration import Configuration
from .prompts import get_podcast_generation_prompt
@ -30,10 +30,12 @@ async def create_podcast_transcript(
search_space_id = configuration.search_space_id
user_prompt = configuration.user_prompt
# Get search space's long context LLM
llm = await get_long_context_llm(state.db_session, search_space_id)
# Get search space's document summary LLM
llm = await get_document_summary_llm(state.db_session, search_space_id)
if not llm:
error_message = f"No long context LLM configured for search space {search_space_id}"
error_message = (
f"No document summary LLM configured for search space {search_space_id}"
)
print(error_message)
raise RuntimeError(error_message)

View file

@ -35,12 +35,6 @@ def load_global_llm_configs():
# Try main config file first
global_config_file = BASE_DIR / "app" / "config" / "global_llm_config.yaml"
# Fall back to example file for testing
# if not global_config_file.exists():
# global_config_file = BASE_DIR / "app" / "config" / "global_llm_config.example.yaml"
# if global_config_file.exists():
# print("Info: Using global_llm_config.example.yaml (copy to global_llm_config.yaml for production)")
if not global_config_file.exists():
# No global configs available
return []

View file

@ -9,72 +9,101 @@
#
# These configurations will be available to all users as a convenient option
# Users can choose to use these global configs or add their own
#
# Structure matches NewLLMConfig:
# - LLM model configuration (provider, model_name, api_key, etc.)
# - Prompt configuration (system_instructions, citations_enabled)
global_llm_configs:
# Example: OpenAI GPT-4 Turbo
# Example: OpenAI GPT-4 Turbo with citations enabled
- id: -1
name: "Global GPT-4 Turbo"
description: "OpenAI's GPT-4 Turbo with default prompts and citations"
provider: "OPENAI"
model_name: "gpt-4-turbo-preview"
api_key: "sk-your-openai-api-key-here"
api_base: ""
language: "English"
litellm_params:
temperature: 0.7
max_tokens: 4000
# Prompt Configuration
system_instructions: "" # Empty = use default SURFSENSE_SYSTEM_INSTRUCTIONS
use_default_system_instructions: true
citations_enabled: true
# Example: Anthropic Claude 3 Opus
- id: -2
name: "Global Claude 3 Opus"
description: "Anthropic's most capable model with citations"
provider: "ANTHROPIC"
model_name: "claude-3-opus-20240229"
api_key: "sk-ant-your-anthropic-api-key-here"
api_base: ""
language: "English"
litellm_params:
temperature: 0.7
max_tokens: 4000
system_instructions: ""
use_default_system_instructions: true
citations_enabled: true
# Example: Fast model - GPT-3.5 Turbo
# Example: Fast model - GPT-3.5 Turbo (citations disabled for speed)
- id: -3
name: "Global GPT-3.5 Turbo"
name: "Global GPT-3.5 Turbo (Fast)"
description: "Fast responses without citations for quick queries"
provider: "OPENAI"
model_name: "gpt-3.5-turbo"
api_key: "sk-your-openai-api-key-here"
api_base: ""
language: "English"
litellm_params:
temperature: 0.5
max_tokens: 2000
system_instructions: ""
use_default_system_instructions: true
citations_enabled: false # Disabled for faster responses
# Example: Chinese LLM - DeepSeek
# Example: Chinese LLM - DeepSeek with custom instructions
- id: -4
name: "Global DeepSeek Chat"
name: "Global DeepSeek Chat (Chinese)"
description: "DeepSeek optimized for Chinese language responses"
provider: "DEEPSEEK"
model_name: "deepseek-chat"
api_key: "your-deepseek-api-key-here"
api_base: "https://api.deepseek.com/v1"
language: "Chinese"
litellm_params:
temperature: 0.7
max_tokens: 4000
# Custom system instructions for Chinese responses
system_instructions: |
<system_instruction>
You are SurfSense, a reasoning and acting AI agent designed to answer user questions using the user's personal knowledge base.
Today's date (UTC): {resolved_today}
IMPORTANT: Please respond in Chinese (简体中文) unless the user specifically requests another language.
</system_instruction>
use_default_system_instructions: false
citations_enabled: true
# Example: Groq - Fast inference
- id: -5
name: "Global Groq Llama 3"
description: "Ultra-fast Llama 3 70B via Groq"
provider: "GROQ"
model_name: "llama3-70b-8192"
api_key: "your-groq-api-key-here"
api_base: ""
language: "English"
litellm_params:
temperature: 0.7
max_tokens: 8000
system_instructions: ""
use_default_system_instructions: true
citations_enabled: true
# Notes:
# - Use negative IDs to distinguish global configs from user configs
# - Use negative IDs to distinguish global configs from user configs (NewLLMConfig in DB)
# - IDs should be unique and sequential (e.g., -1, -2, -3, etc.)
# - The 'api_key' field will not be exposed to users via API
# - Users can select these configs for their long_context, fast, or strategic LLM roles
# - system_instructions: Custom prompt or empty string to use defaults
# - use_default_system_instructions: true = use SURFSENSE_SYSTEM_INSTRUCTIONS when system_instructions is empty
# - citations_enabled: true = include citation instructions, false = include anti-citation instructions
# - All standard LiteLLM providers are supported

View file

@ -452,9 +452,10 @@ class SearchSpace(BaseModel, TimestampMixin):
# Search space-level LLM preferences (shared by all members)
# Note: These can be negative IDs for global configs (from YAML) or positive IDs for custom configs (from DB)
long_context_llm_id = Column(Integer, nullable=True)
fast_llm_id = Column(Integer, nullable=True)
strategic_llm_id = Column(Integer, nullable=True)
agent_llm_id = Column(Integer, nullable=True) # For agent/chat operations
document_summary_llm_id = Column(
Integer, nullable=True
) # For document summarization
user_id = Column(
UUID(as_uuid=True), ForeignKey("user.id", ondelete="CASCADE"), nullable=False
@ -491,10 +492,10 @@ class SearchSpace(BaseModel, TimestampMixin):
order_by="SearchSourceConnector.id",
cascade="all, delete-orphan",
)
llm_configs = relationship(
"LLMConfig",
new_llm_configs = relationship(
"NewLLMConfig",
back_populates="search_space",
order_by="LLMConfig.id",
order_by="NewLLMConfig.id",
cascade="all, delete-orphan",
)
@ -553,10 +554,24 @@ class SearchSourceConnector(BaseModel, TimestampMixin):
)
class LLMConfig(BaseModel, TimestampMixin):
__tablename__ = "llm_configs"
class NewLLMConfig(BaseModel, TimestampMixin):
"""
New LLM configuration table that combines model settings with prompt configuration.
This table provides:
- LLM model configuration (provider, model_name, api_key, etc.)
- Configurable system instructions (defaults to SURFSENSE_SYSTEM_INSTRUCTIONS)
- Citation toggle (enable/disable citation instructions)
Note: SURFSENSE_TOOLS_INSTRUCTIONS is always used and not configurable.
"""
__tablename__ = "new_llm_configs"
name = Column(String(100), nullable=False, index=True)
description = Column(String(500), nullable=True)
# === LLM Model Configuration (from original LLMConfig, excluding 'language') ===
# Provider from the enum
provider = Column(SQLAlchemyEnum(LiteLLMProvider), nullable=False)
# Custom provider name when provider is CUSTOM
@ -566,16 +581,29 @@ class LLMConfig(BaseModel, TimestampMixin):
# API Key should be encrypted before storing
api_key = Column(String, nullable=False)
api_base = Column(String(500), nullable=True)
language = Column(String(50), nullable=True, default="English")
# For any other parameters that litellm supports
litellm_params = Column(JSON, nullable=True, default={})
# === Prompt Configuration ===
# Configurable system instructions (defaults to SURFSENSE_SYSTEM_INSTRUCTIONS)
# Users can customize this from the UI
system_instructions = Column(
Text,
nullable=False,
default="", # Empty string means use default SURFSENSE_SYSTEM_INSTRUCTIONS
)
# Whether to use the default system instructions when system_instructions is empty
use_default_system_instructions = Column(Boolean, nullable=False, default=True)
# Citation toggle - when enabled, SURFSENSE_CITATION_INSTRUCTIONS is injected
# When disabled, an anti-citation prompt is injected instead
citations_enabled = Column(Boolean, nullable=False, default=True)
# === Relationships ===
search_space_id = Column(
Integer, ForeignKey("searchspaces.id", ondelete="CASCADE"), nullable=False
)
search_space = relationship("SearchSpace", back_populates="llm_configs")
search_space = relationship("SearchSpace", back_populates="new_llm_configs")
class Log(BaseModel, TimestampMixin):

View file

@ -1,190 +0,0 @@
prompts:
# Developer-focused prompts
- key: ethereum_developer
value: "Imagine you are an experienced Ethereum developer tasked with creating a smart contract for a blockchain messenger. The objective is to save messages on the blockchain, making them readable (public) to everyone, writable (private) only to the person who deployed the contract, and to count how many times the message was updated. Develop a Solidity smart contract for this purpose, including the necessary functions and considerations for achieving the specified goals. Please provide the code and any relevant explanations to ensure a clear understanding of the implementation."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "developer"
- key: linux_terminal
value: "I want you to act as a linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. do not write explanations. do not type commands unless I instruct you to do so. when i need to tell you something in english, i will do so by putting text inside curly brackets {like this}."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "developer"
- key: javascript_console
value: "I want you to act as a javascript console. I will type commands and you will reply with what the javascript console should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. do not write explanations. do not type commands unless I instruct you to do so. when i need to tell you something in english, i will do so by putting text inside curly brackets {like this}."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "developer"
- key: fullstack_developer
value: "I want you to act as a software developer. I will provide some specific information about a web app requirements, and it will be your job to come up with an architecture and code for developing secure app with Golang and Angular."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "developer"
- key: regex_generator
value: "I want you to act as a regex generator. Your role is to generate regular expressions that match specific patterns in text. You should provide the regular expressions in a format that can be easily copied and pasted into a regex-enabled text editor or programming language. Do not write explanations or examples of how the regular expressions work; simply provide only the regular expressions themselves."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "developer"
- key: senior_frontend_developer
value: "I want you to act as a Senior Frontend developer. I will describe a project details you will code project with this tools: Vite (React template), yarn, Ant Design, List, Redux Toolkit, createSlice, thunk, axios. You should merge files in single index.js file and nothing else. Do not write explanations."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "developer"
- key: code_reviewer
value: "I want you to act as a Code reviewer who is experienced developer in the given code language. I will provide you with the code block or methods or code file along with the code language name, and I would like you to review the code and share the feedback, suggestions and alternative recommended approaches. Please write explanations behind the feedback or suggestions or alternative approaches."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "developer"
- key: machine_learning_engineer
value: "I want you to act as a machine learning engineer. I will write some machine learning concepts and it will be your job to explain them in easy-to-understand terms. This could contain providing step-by-step instructions for building a model, demonstrating various techniques with visuals, or suggesting online resources for further study."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "developer"
- key: sql_terminal
value: "I want you to act as a SQL terminal in front of an example database. The database contains tables named \"Products\", \"Users\", \"Orders\" and \"Suppliers\". I will type queries and you will reply with what the terminal would show. I want you to reply with a table of query results in a single code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so in curly braces {like this)."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "developer"
- key: python_interpreter
value: "Act as a Python interpreter. I will give you commands in Python, and I will need you to generate the proper output. Only say the output. But if there is none, say nothing, and don't give me an explanation. If I need to say something, I will do so through comments."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "developer"
- key: devops_engineer
value: "You are a Senior DevOps engineer working at a Big Company. Your role is to provide scalable, efficient, and automated solutions for software deployment, infrastructure management, and CI/CD pipelines. Suggest the best DevOps practices, including infrastructure setup, deployment strategies, automation tools, and cost-effective scaling solutions."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "developer"
- key: cyber_security_specialist
value: "I want you to act as a cyber security specialist. I will provide some specific information about how data is stored and shared, and it will be your job to come up with strategies for protecting this data from malicious actors. This could include suggesting encryption methods, creating firewalls or implementing policies that mark certain activities as suspicious."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "developer"
# General productivity prompts
- key: english_translator
value: "I want you to act as an English translator, spelling corrector and improver. I will speak to you in any language and you will detect the language, translate it and answer in the corrected and improved version of my text, in English. I want you to replace my simplified A0-level words and sentences with more beautiful and elegant, upper level English words and sentences. Keep the meaning same, but make them more literary. I want you to only reply the correction, the improvements and nothing else, do not write explanations."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "general"
- key: proofreader
value: "I want you act as a proofreader. I will provide you texts and I would like you to review them for any spelling, grammar, or punctuation errors. Once you have finished reviewing the text, provide me with any necessary corrections or suggestions for improve the text."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "general"
- key: note_taking_assistant
value: "I want you to act as a note-taking assistant for a lecture. Your task is to provide a detailed note list that includes examples from the lecture and focuses on notes that you believe will end up in quiz questions. Additionally, please make a separate list for notes that have numbers and data in them and another separated list for the examples that included in this lecture. The notes should be concise and easy to read."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "general"
- key: essay_writer
value: "I want you to act as an essay writer. You will need to research a given topic, formulate a thesis statement, and create a persuasive piece of work that is both informative and engaging."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "general"
- key: career_counselor
value: "I want you to act as a career counselor. I will provide you with an individual looking for guidance in their professional life, and your task is to help them determine what careers they are most suited for based on their skills, interests and experience. You should also conduct research into the various options available, explain the job market trends in different industries and advice on which qualifications would be beneficial for pursuing particular fields."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "general"
- key: life_coach
value: "I want you to act as a life coach. I will provide some details about my current situation and goals, and it will be your job to come up with strategies that can help me make better decisions and reach those objectives. This could involve offering advice on various topics, such as creating plans for achieving success or dealing with difficult emotions."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "general"
- key: motivational_coach
value: "I want you to act as a motivational coach. I will provide you with some information about someone's goals and challenges, and it will be your job to come up with strategies that can help this person achieve their goals. This could involve providing positive affirmations, giving helpful advice or suggesting activities they can do to reach their end goal."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "general"
- key: travel_guide
value: "I want you to act as a travel guide. I will write you my location and you will suggest a place to visit near my location. In some cases, I will also give you the type of places I will visit. You will also suggest me places of similar type that are close to my first location."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "general"
# Creative prompts
- key: storyteller
value: "I want you to act as a storyteller. You will come up with entertaining stories that are engaging, imaginative and captivating for the audience. It can be fairy tales, educational stories or any other type of stories which has the potential to capture people's attention and imagination. Depending on the target audience, you may choose specific themes or topics for your storytelling session e.g., if it's children then you can talk about animals; If it's adults then history-based tales might engage them better etc."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "creative"
- key: screenwriter
value: "I want you to act as a screenwriter. You will develop an engaging and creative script for either a feature length film, or a Web Series that can captivate its viewers. Start with coming up with interesting characters, the setting of the story, dialogues between the characters etc. Once your character development is complete - create an exciting storyline filled with twists and turns that keeps the viewers in suspense until the end."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "creative"
- key: novelist
value: "I want you to act as a novelist. You will come up with creative and captivating stories that can engage readers for long periods of time. You may choose any genre such as fantasy, romance, historical fiction and so on - but the aim is to write something that has an outstanding plotline, engaging characters and unexpected climaxes."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "creative"
- key: poet
value: "I want you to act as a poet. You will create poems that evoke emotions and have the power to stir people's soul. Write on any topic or theme but make sure your words convey the feeling you are trying to express in beautiful yet meaningful ways. You can also come up with short verses that are still powerful enough to leave an imprint in readers' minds."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "creative"
- key: rapper
value: "I want you to act as a rapper. You will come up with powerful and meaningful lyrics, beats and rhythm that can 'wow' the audience. Your lyrics should have an intriguing meaning and message which people can relate too. When it comes to choosing your beat, make sure it is catchy yet relevant to your words, so that when combined they make an explosion of sound everytime!"
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "creative"
- key: composer
value: "I want you to act as a composer. I will provide the lyrics to a song and you will create music for it. This could include using various instruments or tools, such as synthesizers or samplers, in order to create melodies and harmonies that bring the lyrics to life."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "creative"
# Educational prompts
- key: math_teacher
value: "I want you to act as a math teacher. I will provide some mathematical equations or concepts, and it will be your job to explain them in easy-to-understand terms. This could include providing step-by-step instructions for solving a problem, demonstrating various techniques with visuals or suggesting online resources for further study."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "educational"
- key: philosophy_teacher
value: "I want you to act as a philosophy teacher. I will provide some topics related to the study of philosophy, and it will be your job to explain these concepts in an easy-to-understand manner. This could include providing examples, posing questions or breaking down complex ideas into smaller pieces that are easier to comprehend."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "educational"
- key: historian
value: "I want you to act as a historian. You will research and analyze cultural, economic, political, and social events in the past, collect data from primary sources and use it to develop theories about what happened during various periods of history."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "educational"
- key: debater
value: "I want you to act as a debater. I will provide you with some topics related to current events and your task is to research both sides of the debates, present valid arguments for each side, refute opposing points of view, and draw persuasive conclusions based on evidence. Your goal is to help people come away from the discussion with increased knowledge and insight into the topic at hand."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "educational"
- key: explainer_with_analogies
value: "I want you to act as an explainer who uses analogies to clarify complex topics. When I give you a subject (technical, philosophical or scientific), you'll follow this structure: 1. Ask me 1-2 quick questions to assess my current level of understanding. 2. Based on my answer, create three analogies to explain the topic: one that a 10-year-old would understand, one for a high-school student, and one for a college-level person. 3. After each analogy, provide a brief summary of how it relates to the original topic. 4. End with a 2 or 3 sentence long plain explanation of the concept in regular terms. Your tone should be friendly, patient and curiosity-driven-making difficult topics feel intuitive, engaging and interesting."
author: "awesome-chatgpt-prompts"
link: "https://github.com/f/awesome-chatgpt-prompts"
category: "educational"

View file

@ -11,10 +11,10 @@ from .google_calendar_add_connector_route import (
from .google_gmail_add_connector_route import (
router as google_gmail_add_connector_router,
)
from .llm_config_routes import router as llm_config_router
from .logs_routes import router as logs_router
from .luma_add_connector_route import router as luma_add_connector_router
from .new_chat_routes import router as new_chat_router
from .new_llm_config_routes import router as new_llm_config_router
from .notes_routes import router as notes_router
from .podcasts_routes import router as podcasts_router
from .rbac_routes import router as rbac_router
@ -35,5 +35,5 @@ router.include_router(google_calendar_add_connector_router)
router.include_router(google_gmail_add_connector_router)
router.include_router(airtable_add_connector_router)
router.include_router(luma_add_connector_router)
router.include_router(llm_config_router)
router.include_router(new_llm_config_router) # LLM configs with prompt configuration
router.include_router(logs_router)

View file

@ -1,576 +0,0 @@
import logging
from fastapi import APIRouter, Depends, HTTPException
from pydantic import BaseModel
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from app.config import config
from app.db import (
LLMConfig,
Permission,
SearchSpace,
User,
get_async_session,
)
from app.schemas import LLMConfigCreate, LLMConfigRead, LLMConfigUpdate
from app.services.llm_service import validate_llm_config
from app.users import current_active_user
from app.utils.rbac import check_permission
router = APIRouter()
logger = logging.getLogger(__name__)
class LLMPreferencesUpdate(BaseModel):
"""Schema for updating search space LLM preferences"""
long_context_llm_id: int | None = None
fast_llm_id: int | None = None
strategic_llm_id: int | None = None
class LLMPreferencesRead(BaseModel):
"""Schema for reading search space LLM preferences"""
long_context_llm_id: int | None = None
fast_llm_id: int | None = None
strategic_llm_id: int | None = None
long_context_llm: LLMConfigRead | None = None
fast_llm: LLMConfigRead | None = None
strategic_llm: LLMConfigRead | None = None
class GlobalLLMConfigRead(BaseModel):
"""Schema for reading global LLM configs (without API key)"""
id: int
name: str
provider: str
custom_provider: str | None = None
model_name: str
api_base: str | None = None
language: str | None = None
litellm_params: dict | None = None
is_global: bool = True
# Global LLM Config endpoints
@router.get("/global-llm-configs", response_model=list[GlobalLLMConfigRead])
async def get_global_llm_configs(
user: User = Depends(current_active_user),
):
"""
Get all available global LLM configurations.
These are pre-configured by the system administrator and available to all users.
API keys are not exposed through this endpoint.
"""
try:
global_configs = config.GLOBAL_LLM_CONFIGS
# Remove API keys from response
safe_configs = []
for cfg in global_configs:
safe_config = {
"id": cfg.get("id"),
"name": cfg.get("name"),
"provider": cfg.get("provider"),
"custom_provider": cfg.get("custom_provider"),
"model_name": cfg.get("model_name"),
"api_base": cfg.get("api_base"),
"language": cfg.get("language"),
"litellm_params": cfg.get("litellm_params", {}),
"is_global": True,
}
safe_configs.append(safe_config)
return safe_configs
except Exception as e:
raise HTTPException(
status_code=500, detail=f"Failed to fetch global LLM configs: {e!s}"
) from e
@router.post("/llm-configs", response_model=LLMConfigRead)
async def create_llm_config(
llm_config: LLMConfigCreate,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Create a new LLM configuration for a search space.
Requires LLM_CONFIGS_CREATE permission.
"""
try:
# Verify user has permission to create LLM configs
await check_permission(
session,
user,
llm_config.search_space_id,
Permission.LLM_CONFIGS_CREATE.value,
"You don't have permission to create LLM configurations in this search space",
)
# Validate the LLM configuration by making a test API call
is_valid, error_message = await validate_llm_config(
provider=llm_config.provider.value,
model_name=llm_config.model_name,
api_key=llm_config.api_key,
api_base=llm_config.api_base,
custom_provider=llm_config.custom_provider,
litellm_params=llm_config.litellm_params,
)
if not is_valid:
raise HTTPException(
status_code=400,
detail=f"Invalid LLM configuration: {error_message}",
)
db_llm_config = LLMConfig(**llm_config.model_dump())
session.add(db_llm_config)
await session.commit()
await session.refresh(db_llm_config)
return db_llm_config
except HTTPException:
raise
except Exception as e:
await session.rollback()
raise HTTPException(
status_code=500, detail=f"Failed to create LLM configuration: {e!s}"
) from e
@router.get("/llm-configs", response_model=list[LLMConfigRead])
async def read_llm_configs(
search_space_id: int,
skip: int = 0,
limit: int = 200,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Get all LLM configurations for a search space.
Requires LLM_CONFIGS_READ permission.
"""
try:
# Verify user has permission to read LLM configs
await check_permission(
session,
user,
search_space_id,
Permission.LLM_CONFIGS_READ.value,
"You don't have permission to view LLM configurations in this search space",
)
result = await session.execute(
select(LLMConfig)
.filter(LLMConfig.search_space_id == search_space_id)
.offset(skip)
.limit(limit)
)
return result.scalars().all()
except HTTPException:
raise
except Exception as e:
raise HTTPException(
status_code=500, detail=f"Failed to fetch LLM configurations: {e!s}"
) from e
@router.get("/llm-configs/{llm_config_id}", response_model=LLMConfigRead)
async def read_llm_config(
llm_config_id: int,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Get a specific LLM configuration by ID.
Requires LLM_CONFIGS_READ permission.
"""
try:
# Get the LLM config
result = await session.execute(
select(LLMConfig).filter(LLMConfig.id == llm_config_id)
)
llm_config = result.scalars().first()
if not llm_config:
raise HTTPException(status_code=404, detail="LLM configuration not found")
# Verify user has permission to read LLM configs
await check_permission(
session,
user,
llm_config.search_space_id,
Permission.LLM_CONFIGS_READ.value,
"You don't have permission to view LLM configurations in this search space",
)
return llm_config
except HTTPException:
raise
except Exception as e:
raise HTTPException(
status_code=500, detail=f"Failed to fetch LLM configuration: {e!s}"
) from e
@router.put("/llm-configs/{llm_config_id}", response_model=LLMConfigRead)
async def update_llm_config(
llm_config_id: int,
llm_config_update: LLMConfigUpdate,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Update an existing LLM configuration.
Requires LLM_CONFIGS_UPDATE permission.
"""
try:
# Get the LLM config
result = await session.execute(
select(LLMConfig).filter(LLMConfig.id == llm_config_id)
)
db_llm_config = result.scalars().first()
if not db_llm_config:
raise HTTPException(status_code=404, detail="LLM configuration not found")
# Verify user has permission to update LLM configs
await check_permission(
session,
user,
db_llm_config.search_space_id,
Permission.LLM_CONFIGS_UPDATE.value,
"You don't have permission to update LLM configurations in this search space",
)
update_data = llm_config_update.model_dump(exclude_unset=True)
# Apply updates to a temporary copy for validation
temp_config = {
"provider": update_data.get("provider", db_llm_config.provider.value),
"model_name": update_data.get("model_name", db_llm_config.model_name),
"api_key": update_data.get("api_key", db_llm_config.api_key),
"api_base": update_data.get("api_base", db_llm_config.api_base),
"custom_provider": update_data.get(
"custom_provider", db_llm_config.custom_provider
),
"litellm_params": update_data.get(
"litellm_params", db_llm_config.litellm_params
),
}
# Validate the updated configuration
is_valid, error_message = await validate_llm_config(
provider=temp_config["provider"],
model_name=temp_config["model_name"],
api_key=temp_config["api_key"],
api_base=temp_config["api_base"],
custom_provider=temp_config["custom_provider"],
litellm_params=temp_config["litellm_params"],
)
if not is_valid:
raise HTTPException(
status_code=400,
detail=f"Invalid LLM configuration: {error_message}",
)
# Apply updates to the database object
for key, value in update_data.items():
setattr(db_llm_config, key, value)
await session.commit()
await session.refresh(db_llm_config)
return db_llm_config
except HTTPException:
raise
except Exception as e:
await session.rollback()
raise HTTPException(
status_code=500, detail=f"Failed to update LLM configuration: {e!s}"
) from e
@router.delete("/llm-configs/{llm_config_id}", response_model=dict)
async def delete_llm_config(
llm_config_id: int,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Delete an LLM configuration.
Requires LLM_CONFIGS_DELETE permission.
"""
try:
# Get the LLM config
result = await session.execute(
select(LLMConfig).filter(LLMConfig.id == llm_config_id)
)
db_llm_config = result.scalars().first()
if not db_llm_config:
raise HTTPException(status_code=404, detail="LLM configuration not found")
# Verify user has permission to delete LLM configs
await check_permission(
session,
user,
db_llm_config.search_space_id,
Permission.LLM_CONFIGS_DELETE.value,
"You don't have permission to delete LLM configurations in this search space",
)
await session.delete(db_llm_config)
await session.commit()
return {"message": "LLM configuration deleted successfully"}
except HTTPException:
raise
except Exception as e:
await session.rollback()
raise HTTPException(
status_code=500, detail=f"Failed to delete LLM configuration: {e!s}"
) from e
# Search Space LLM Preferences endpoints
@router.get(
"/search-spaces/{search_space_id}/llm-preferences",
response_model=LLMPreferencesRead,
)
async def get_llm_preferences(
search_space_id: int,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Get the LLM preferences for a specific search space.
LLM preferences are shared by all members of the search space.
Requires LLM_CONFIGS_READ permission.
"""
try:
# Verify user has permission to read LLM configs
await check_permission(
session,
user,
search_space_id,
Permission.LLM_CONFIGS_READ.value,
"You don't have permission to view LLM preferences in this search space",
)
# Get the search space
result = await session.execute(
select(SearchSpace).filter(SearchSpace.id == search_space_id)
)
search_space = result.scalars().first()
if not search_space:
raise HTTPException(status_code=404, detail="Search space not found")
# Helper function to get config (global or custom)
async def get_config_for_id(config_id):
if config_id is None:
return None
# Check if it's a global config (negative ID)
if config_id < 0:
for cfg in config.GLOBAL_LLM_CONFIGS:
if cfg.get("id") == config_id:
# Return as LLMConfigRead-compatible dict
return {
"id": cfg.get("id"),
"name": cfg.get("name"),
"provider": cfg.get("provider"),
"custom_provider": cfg.get("custom_provider"),
"model_name": cfg.get("model_name"),
"api_key": "***GLOBAL***", # Don't expose the actual key
"api_base": cfg.get("api_base"),
"language": cfg.get("language"),
"litellm_params": cfg.get("litellm_params"),
"created_at": None,
"search_space_id": search_space_id,
}
return None
# It's a custom config, fetch from database
result = await session.execute(
select(LLMConfig).filter(LLMConfig.id == config_id)
)
return result.scalars().first()
# Get the configs (from DB for custom, or constructed for global)
long_context_llm = await get_config_for_id(search_space.long_context_llm_id)
fast_llm = await get_config_for_id(search_space.fast_llm_id)
strategic_llm = await get_config_for_id(search_space.strategic_llm_id)
return {
"long_context_llm_id": search_space.long_context_llm_id,
"fast_llm_id": search_space.fast_llm_id,
"strategic_llm_id": search_space.strategic_llm_id,
"long_context_llm": long_context_llm,
"fast_llm": fast_llm,
"strategic_llm": strategic_llm,
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(
status_code=500, detail=f"Failed to fetch LLM preferences: {e!s}"
) from e
@router.put(
"/search-spaces/{search_space_id}/llm-preferences",
response_model=LLMPreferencesRead,
)
async def update_llm_preferences(
search_space_id: int,
preferences: LLMPreferencesUpdate,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Update the LLM preferences for a specific search space.
LLM preferences are shared by all members of the search space.
Requires SETTINGS_UPDATE permission (only users with settings access can change).
"""
try:
# Verify user has permission to update settings (not just LLM configs)
# This ensures only users with settings access can change shared LLM preferences
await check_permission(
session,
user,
search_space_id,
Permission.SETTINGS_UPDATE.value,
"You don't have permission to update LLM preferences in this search space",
)
# Get the search space
result = await session.execute(
select(SearchSpace).filter(SearchSpace.id == search_space_id)
)
search_space = result.scalars().first()
if not search_space:
raise HTTPException(status_code=404, detail="Search space not found")
# Validate that all provided LLM config IDs belong to the search space
update_data = preferences.model_dump(exclude_unset=True)
# Store language from configs to validate consistency
languages = set()
for _key, llm_config_id in update_data.items():
if llm_config_id is not None:
# Check if this is a global config (negative ID)
if llm_config_id < 0:
# Validate global config exists
global_config = None
for cfg in config.GLOBAL_LLM_CONFIGS:
if cfg.get("id") == llm_config_id:
global_config = cfg
break
if not global_config:
raise HTTPException(
status_code=404,
detail=f"Global LLM configuration {llm_config_id} not found",
)
# Collect language for consistency check (if explicitly set)
lang = global_config.get("language")
if lang and lang.strip(): # Only add non-empty languages
languages.add(lang.strip())
else:
# Verify the LLM config belongs to the search space (custom config)
result = await session.execute(
select(LLMConfig).filter(
LLMConfig.id == llm_config_id,
LLMConfig.search_space_id == search_space_id,
)
)
llm_config = result.scalars().first()
if not llm_config:
raise HTTPException(
status_code=404,
detail=f"LLM configuration {llm_config_id} not found in this search space",
)
# Collect language for consistency check (if explicitly set)
if llm_config.language and llm_config.language.strip():
languages.add(llm_config.language.strip())
# Language consistency check - only warn if there are multiple explicit languages
# Allow mixing configs with and without language settings
if len(languages) > 1:
# Log warning but allow the operation
logger.warning(
f"Multiple languages detected in LLM selection for search_space {search_space_id}: {languages}. "
"This may affect response quality."
)
# Update search space LLM preferences
for key, value in update_data.items():
setattr(search_space, key, value)
await session.commit()
await session.refresh(search_space)
# Helper function to get config (global or custom)
async def get_config_for_id(config_id):
if config_id is None:
return None
# Check if it's a global config (negative ID)
if config_id < 0:
for cfg in config.GLOBAL_LLM_CONFIGS:
if cfg.get("id") == config_id:
# Return as LLMConfigRead-compatible dict
return {
"id": cfg.get("id"),
"name": cfg.get("name"),
"provider": cfg.get("provider"),
"custom_provider": cfg.get("custom_provider"),
"model_name": cfg.get("model_name"),
"api_key": "***GLOBAL***", # Don't expose the actual key
"api_base": cfg.get("api_base"),
"language": cfg.get("language"),
"litellm_params": cfg.get("litellm_params"),
"created_at": None,
"search_space_id": search_space_id,
}
return None
# It's a custom config, fetch from database
result = await session.execute(
select(LLMConfig).filter(LLMConfig.id == config_id)
)
return result.scalars().first()
# Get the configs (from DB for custom, or constructed for global)
long_context_llm = await get_config_for_id(search_space.long_context_llm_id)
fast_llm = await get_config_for_id(search_space.fast_llm_id)
strategic_llm = await get_config_for_id(search_space.strategic_llm_id)
# Return updated preferences
return {
"long_context_llm_id": search_space.long_context_llm_id,
"fast_llm_id": search_space.fast_llm_id,
"strategic_llm_id": search_space.strategic_llm_id,
"long_context_llm": long_context_llm,
"fast_llm": fast_llm,
"strategic_llm": strategic_llm,
}
except HTTPException:
raise
except Exception as e:
await session.rollback()
raise HTTPException(
status_code=500, detail=f"Failed to update LLM preferences: {e!s}"
) from e

View file

@ -685,8 +685,16 @@ async def handle_new_chat(
)
search_space = search_space_result.scalars().first()
# TODO: Add new llm config arch then complete this
llm_config_id = -4
if not search_space:
raise HTTPException(status_code=404, detail="Search space not found")
# Use agent_llm_id from search space for chat operations
# Positive IDs load from NewLLMConfig database table
# Negative IDs load from YAML global configs
# Falls back to -1 (first global config) if not configured
llm_config_id = (
search_space.agent_llm_id if search_space.agent_llm_id is not None else -1
)
# Return streaming response
return StreamingResponse(
@ -696,7 +704,6 @@ async def handle_new_chat(
chat_id=request.chat_id,
session=session,
llm_config_id=llm_config_id,
messages=request.messages,
attachments=request.attachments,
mentioned_document_ids=request.mentioned_document_ids,
),

View file

@ -0,0 +1,376 @@
"""
API routes for NewLLMConfig CRUD operations.
NewLLMConfig combines LLM model settings with prompt configuration:
- LLM provider, model, API key, etc.
- Configurable system instructions
- Citation toggle
"""
import logging
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from app.agents.new_chat.system_prompt import get_default_system_instructions
from app.config import config
from app.db import (
NewLLMConfig,
Permission,
User,
get_async_session,
)
from app.schemas import (
DefaultSystemInstructionsResponse,
GlobalNewLLMConfigRead,
NewLLMConfigCreate,
NewLLMConfigRead,
NewLLMConfigUpdate,
)
from app.services.llm_service import validate_llm_config
from app.users import current_active_user
from app.utils.rbac import check_permission
router = APIRouter()
logger = logging.getLogger(__name__)
# =============================================================================
# Global Configs Routes
# =============================================================================
@router.get("/global-new-llm-configs", response_model=list[GlobalNewLLMConfigRead])
async def get_global_new_llm_configs(
user: User = Depends(current_active_user),
):
"""
Get all available global NewLLMConfig configurations.
These are pre-configured by the system administrator and available to all users.
API keys are not exposed through this endpoint.
Global configs have negative IDs to distinguish from user-created configs.
"""
try:
global_configs = config.GLOBAL_LLM_CONFIGS
# Transform to new structure, hiding API keys
safe_configs = []
for cfg in global_configs:
safe_config = {
"id": cfg.get("id"),
"name": cfg.get("name"),
"description": cfg.get("description"),
"provider": cfg.get("provider"),
"custom_provider": cfg.get("custom_provider"),
"model_name": cfg.get("model_name"),
"api_base": cfg.get("api_base") or None,
"litellm_params": cfg.get("litellm_params", {}),
# New prompt configuration fields
"system_instructions": cfg.get("system_instructions", ""),
"use_default_system_instructions": cfg.get(
"use_default_system_instructions", True
),
"citations_enabled": cfg.get("citations_enabled", True),
"is_global": True,
}
safe_configs.append(safe_config)
return safe_configs
except Exception as e:
logger.exception("Failed to fetch global NewLLMConfigs")
raise HTTPException(
status_code=500, detail=f"Failed to fetch global configurations: {e!s}"
) from e
# =============================================================================
# CRUD Routes
# =============================================================================
@router.post("/new-llm-configs", response_model=NewLLMConfigRead)
async def create_new_llm_config(
config_data: NewLLMConfigCreate,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Create a new NewLLMConfig for a search space.
Requires LLM_CONFIGS_CREATE permission.
"""
try:
# Verify user has permission
await check_permission(
session,
user,
config_data.search_space_id,
Permission.LLM_CONFIGS_CREATE.value,
"You don't have permission to create LLM configurations in this search space",
)
# Validate the LLM configuration by making a test API call
is_valid, error_message = await validate_llm_config(
provider=config_data.provider.value,
model_name=config_data.model_name,
api_key=config_data.api_key,
api_base=config_data.api_base,
custom_provider=config_data.custom_provider,
litellm_params=config_data.litellm_params,
)
if not is_valid:
raise HTTPException(
status_code=400,
detail=f"Invalid LLM configuration: {error_message}",
)
# Create the config
db_config = NewLLMConfig(**config_data.model_dump())
session.add(db_config)
await session.commit()
await session.refresh(db_config)
return db_config
except HTTPException:
raise
except Exception as e:
await session.rollback()
logger.exception("Failed to create NewLLMConfig")
raise HTTPException(
status_code=500, detail=f"Failed to create configuration: {e!s}"
) from e
@router.get("/new-llm-configs", response_model=list[NewLLMConfigRead])
async def list_new_llm_configs(
search_space_id: int,
skip: int = 0,
limit: int = 100,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Get all NewLLMConfigs for a search space.
Requires LLM_CONFIGS_READ permission.
"""
try:
# Verify user has permission
await check_permission(
session,
user,
search_space_id,
Permission.LLM_CONFIGS_READ.value,
"You don't have permission to view LLM configurations in this search space",
)
result = await session.execute(
select(NewLLMConfig)
.filter(NewLLMConfig.search_space_id == search_space_id)
.order_by(NewLLMConfig.created_at.desc())
.offset(skip)
.limit(limit)
)
return result.scalars().all()
except HTTPException:
raise
except Exception as e:
logger.exception("Failed to list NewLLMConfigs")
raise HTTPException(
status_code=500, detail=f"Failed to fetch configurations: {e!s}"
) from e
@router.get(
"/new-llm-configs/default-system-instructions",
response_model=DefaultSystemInstructionsResponse,
)
async def get_default_system_instructions_endpoint(
user: User = Depends(current_active_user),
):
"""
Get the default SURFSENSE_SYSTEM_INSTRUCTIONS template.
Useful for pre-populating the UI when creating a new configuration.
"""
return DefaultSystemInstructionsResponse(
default_system_instructions=get_default_system_instructions()
)
@router.get("/new-llm-configs/{config_id}", response_model=NewLLMConfigRead)
async def get_new_llm_config(
config_id: int,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Get a specific NewLLMConfig by ID.
Requires LLM_CONFIGS_READ permission.
"""
try:
result = await session.execute(
select(NewLLMConfig).filter(NewLLMConfig.id == config_id)
)
config = result.scalars().first()
if not config:
raise HTTPException(status_code=404, detail="Configuration not found")
# Verify user has permission
await check_permission(
session,
user,
config.search_space_id,
Permission.LLM_CONFIGS_READ.value,
"You don't have permission to view LLM configurations in this search space",
)
return config
except HTTPException:
raise
except Exception as e:
logger.exception("Failed to get NewLLMConfig")
raise HTTPException(
status_code=500, detail=f"Failed to fetch configuration: {e!s}"
) from e
@router.put("/new-llm-configs/{config_id}", response_model=NewLLMConfigRead)
async def update_new_llm_config(
config_id: int,
update_data: NewLLMConfigUpdate,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Update an existing NewLLMConfig.
Requires LLM_CONFIGS_UPDATE permission.
"""
try:
result = await session.execute(
select(NewLLMConfig).filter(NewLLMConfig.id == config_id)
)
config = result.scalars().first()
if not config:
raise HTTPException(status_code=404, detail="Configuration not found")
# Verify user has permission
await check_permission(
session,
user,
config.search_space_id,
Permission.LLM_CONFIGS_UPDATE.value,
"You don't have permission to update LLM configurations in this search space",
)
update_dict = update_data.model_dump(exclude_unset=True)
# If updating LLM settings, validate them
if any(
key in update_dict
for key in [
"provider",
"model_name",
"api_key",
"api_base",
"custom_provider",
"litellm_params",
]
):
# Build the validation config from existing + updates
validation_config = {
"provider": update_dict.get("provider", config.provider).value
if hasattr(update_dict.get("provider", config.provider), "value")
else update_dict.get("provider", config.provider.value),
"model_name": update_dict.get("model_name", config.model_name),
"api_key": update_dict.get("api_key", config.api_key),
"api_base": update_dict.get("api_base", config.api_base),
"custom_provider": update_dict.get(
"custom_provider", config.custom_provider
),
"litellm_params": update_dict.get(
"litellm_params", config.litellm_params
),
}
is_valid, error_message = await validate_llm_config(
provider=validation_config["provider"],
model_name=validation_config["model_name"],
api_key=validation_config["api_key"],
api_base=validation_config["api_base"],
custom_provider=validation_config["custom_provider"],
litellm_params=validation_config["litellm_params"],
)
if not is_valid:
raise HTTPException(
status_code=400,
detail=f"Invalid LLM configuration: {error_message}",
)
# Apply updates
for key, value in update_dict.items():
setattr(config, key, value)
await session.commit()
await session.refresh(config)
return config
except HTTPException:
raise
except Exception as e:
await session.rollback()
logger.exception("Failed to update NewLLMConfig")
raise HTTPException(
status_code=500, detail=f"Failed to update configuration: {e!s}"
) from e
@router.delete("/new-llm-configs/{config_id}", response_model=dict)
async def delete_new_llm_config(
config_id: int,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Delete a NewLLMConfig.
Requires LLM_CONFIGS_DELETE permission.
"""
try:
result = await session.execute(
select(NewLLMConfig).filter(NewLLMConfig.id == config_id)
)
config = result.scalars().first()
if not config:
raise HTTPException(status_code=404, detail="Configuration not found")
# Verify user has permission
await check_permission(
session,
user,
config.search_space_id,
Permission.LLM_CONFIGS_DELETE.value,
"You don't have permission to delete LLM configurations in this search space",
)
await session.delete(config)
await session.commit()
return {"message": "Configuration deleted successfully", "id": config_id}
except HTTPException:
raise
except Exception as e:
await session.rollback()
logger.exception("Failed to delete NewLLMConfig")
raise HTTPException(
status_code=500, detail=f"Failed to delete configuration: {e!s}"
) from e

View file

@ -1,13 +1,13 @@
import logging
from pathlib import Path
import yaml
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy import func
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from app.config import config
from app.db import (
NewLLMConfig,
Permission,
SearchSpace,
SearchSpaceMembership,
@ -17,6 +17,8 @@ from app.db import (
get_default_roles_config,
)
from app.schemas import (
LLMPreferencesRead,
LLMPreferencesUpdate,
SearchSpaceCreate,
SearchSpaceRead,
SearchSpaceUpdate,
@ -184,37 +186,6 @@ async def read_search_spaces(
) from e
@router.get("/searchspaces/prompts/community")
async def get_community_prompts():
"""
Get community-curated prompts for SearchSpace System Instructions.
This endpoint does not require authentication as it serves public prompts.
"""
try:
# Get the path to the prompts YAML file
prompts_file = (
Path(__file__).parent.parent
/ "prompts"
/ "public_search_space_prompts.yaml"
)
if not prompts_file.exists():
raise HTTPException(
status_code=404, detail="Community prompts file not found"
)
with open(prompts_file, encoding="utf-8") as f:
data = yaml.safe_load(f)
return data.get("prompts", [])
except HTTPException:
raise
except Exception as e:
raise HTTPException(
status_code=500, detail=f"Failed to load community prompts: {e!s}"
) from e
@router.get("/searchspaces/{search_space_id}", response_model=SearchSpaceRead)
async def read_search_space(
search_space_id: int,
@ -329,3 +300,184 @@ async def delete_search_space(
raise HTTPException(
status_code=500, detail=f"Failed to delete search space: {e!s}"
) from e
# =============================================================================
# LLM Preferences Routes
# =============================================================================
async def _get_llm_config_by_id(
session: AsyncSession, config_id: int | None
) -> dict | None:
"""
Get an LLM config by ID as a dictionary. Returns database config for positive IDs,
global config for negative IDs, or None if ID is None.
"""
if config_id is None:
return None
if config_id < 0:
# Global config - find from YAML
global_configs = config.GLOBAL_LLM_CONFIGS
for cfg in global_configs:
if cfg.get("id") == config_id:
return {
"id": cfg.get("id"),
"name": cfg.get("name"),
"description": cfg.get("description"),
"provider": cfg.get("provider"),
"custom_provider": cfg.get("custom_provider"),
"model_name": cfg.get("model_name"),
"api_base": cfg.get("api_base"),
"litellm_params": cfg.get("litellm_params", {}),
"system_instructions": cfg.get("system_instructions", ""),
"use_default_system_instructions": cfg.get(
"use_default_system_instructions", True
),
"citations_enabled": cfg.get("citations_enabled", True),
"is_global": True,
}
return None
else:
# Database config - convert to dict
result = await session.execute(
select(NewLLMConfig).filter(NewLLMConfig.id == config_id)
)
db_config = result.scalars().first()
if db_config:
return {
"id": db_config.id,
"name": db_config.name,
"description": db_config.description,
"provider": db_config.provider.value if db_config.provider else None,
"custom_provider": db_config.custom_provider,
"model_name": db_config.model_name,
"api_key": db_config.api_key,
"api_base": db_config.api_base,
"litellm_params": db_config.litellm_params or {},
"system_instructions": db_config.system_instructions or "",
"use_default_system_instructions": db_config.use_default_system_instructions,
"citations_enabled": db_config.citations_enabled,
"created_at": db_config.created_at.isoformat()
if db_config.created_at
else None,
"search_space_id": db_config.search_space_id,
}
return None
@router.get(
"/search-spaces/{search_space_id}/llm-preferences",
response_model=LLMPreferencesRead,
)
async def get_llm_preferences(
search_space_id: int,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Get LLM preferences (role assignments) for a search space.
Requires LLM_CONFIGS_READ permission.
"""
try:
# Check permission
await check_permission(
session,
user,
search_space_id,
Permission.LLM_CONFIGS_READ.value,
"You don't have permission to view LLM preferences",
)
result = await session.execute(
select(SearchSpace).filter(SearchSpace.id == search_space_id)
)
search_space = result.scalars().first()
if not search_space:
raise HTTPException(status_code=404, detail="Search space not found")
# Get full config objects for each role
agent_llm = await _get_llm_config_by_id(session, search_space.agent_llm_id)
document_summary_llm = await _get_llm_config_by_id(
session, search_space.document_summary_llm_id
)
return LLMPreferencesRead(
agent_llm_id=search_space.agent_llm_id,
document_summary_llm_id=search_space.document_summary_llm_id,
agent_llm=agent_llm,
document_summary_llm=document_summary_llm,
)
except HTTPException:
raise
except Exception as e:
logger.exception("Failed to get LLM preferences")
raise HTTPException(
status_code=500, detail=f"Failed to get LLM preferences: {e!s}"
) from e
@router.put(
"/search-spaces/{search_space_id}/llm-preferences",
response_model=LLMPreferencesRead,
)
async def update_llm_preferences(
search_space_id: int,
preferences: LLMPreferencesUpdate,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Update LLM preferences (role assignments) for a search space.
Requires LLM_CONFIGS_UPDATE permission.
"""
try:
# Check permission
await check_permission(
session,
user,
search_space_id,
Permission.LLM_CONFIGS_UPDATE.value,
"You don't have permission to update LLM preferences",
)
result = await session.execute(
select(SearchSpace).filter(SearchSpace.id == search_space_id)
)
search_space = result.scalars().first()
if not search_space:
raise HTTPException(status_code=404, detail="Search space not found")
# Update preferences
update_data = preferences.model_dump(exclude_unset=True)
for key, value in update_data.items():
setattr(search_space, key, value)
await session.commit()
await session.refresh(search_space)
# Get full config objects for response
agent_llm = await _get_llm_config_by_id(session, search_space.agent_llm_id)
document_summary_llm = await _get_llm_config_by_id(
session, search_space.document_summary_llm_id
)
return LLMPreferencesRead(
agent_llm_id=search_space.agent_llm_id,
document_summary_llm_id=search_space.document_summary_llm_id,
agent_llm=agent_llm,
document_summary_llm=document_summary_llm,
)
except HTTPException:
raise
except Exception as e:
await session.rollback()
logger.exception("Failed to update LLM preferences")
raise HTTPException(
status_code=500, detail=f"Failed to update LLM preferences: {e!s}"
) from e

View file

@ -10,7 +10,6 @@ from .documents import (
ExtensionDocumentMetadata,
PaginatedResponse,
)
from .llm_config import LLMConfigBase, LLMConfigCreate, LLMConfigRead, LLMConfigUpdate
from .logs import LogBase, LogCreate, LogFilter, LogRead, LogUpdate
from .new_chat import (
ChatMessage,
@ -26,6 +25,16 @@ from .new_chat import (
ThreadListItem,
ThreadListResponse,
)
from .new_llm_config import (
DefaultSystemInstructionsResponse,
GlobalNewLLMConfigRead,
LLMPreferencesRead,
LLMPreferencesUpdate,
NewLLMConfigCreate,
NewLLMConfigPublic,
NewLLMConfigRead,
NewLLMConfigUpdate,
)
from .podcasts import PodcastBase, PodcastCreate, PodcastRead, PodcastUpdate
from .rbac_schemas import (
InviteAcceptRequest,
@ -67,6 +76,7 @@ __all__ = [
"ChunkCreate",
"ChunkRead",
"ChunkUpdate",
"DefaultSystemInstructionsResponse",
# Document schemas
"DocumentBase",
"DocumentRead",
@ -75,6 +85,7 @@ __all__ = [
"DocumentsCreate",
"ExtensionDocumentContent",
"ExtensionDocumentMetadata",
"GlobalNewLLMConfigRead",
# Base schemas
"IDModel",
# RBAC schemas
@ -84,11 +95,9 @@ __all__ = [
"InviteInfoResponse",
"InviteRead",
"InviteUpdate",
# LLM Config schemas
"LLMConfigBase",
"LLMConfigCreate",
"LLMConfigRead",
"LLMConfigUpdate",
# LLM Preferences schemas
"LLMPreferencesRead",
"LLMPreferencesUpdate",
# Log schemas
"LogBase",
"LogCreate",
@ -106,6 +115,11 @@ __all__ = [
"NewChatThreadRead",
"NewChatThreadUpdate",
"NewChatThreadWithMessages",
# NewLLMConfig schemas
"NewLLMConfigCreate",
"NewLLMConfigPublic",
"NewLLMConfigRead",
"NewLLMConfigUpdate",
"PaginatedResponse",
"PermissionInfo",
"PermissionsListResponse",

View file

@ -1,72 +0,0 @@
from datetime import datetime
from typing import Any
from pydantic import BaseModel, ConfigDict, Field
from app.db import LiteLLMProvider
from .base import IDModel, TimestampModel
class LLMConfigBase(BaseModel):
name: str = Field(
..., max_length=100, description="User-friendly name for the LLM configuration"
)
provider: LiteLLMProvider = Field(..., description="LiteLLM provider type")
custom_provider: str | None = Field(
None, max_length=100, description="Custom provider name when provider is CUSTOM"
)
model_name: str = Field(
..., max_length=100, description="Model name without provider prefix"
)
api_key: str = Field(..., description="API key for the provider")
api_base: str | None = Field(
None, max_length=500, description="Optional API base URL"
)
litellm_params: dict[str, Any] | None = Field(
default=None, description="Additional LiteLLM parameters"
)
language: str | None = Field(
default="English", max_length=50, description="Language for the LLM"
)
class LLMConfigCreate(LLMConfigBase):
search_space_id: int = Field(
..., description="Search space ID to associate the LLM config with"
)
class LLMConfigUpdate(BaseModel):
name: str | None = Field(
None, max_length=100, description="User-friendly name for the LLM configuration"
)
provider: LiteLLMProvider | None = Field(None, description="LiteLLM provider type")
custom_provider: str | None = Field(
None, max_length=100, description="Custom provider name when provider is CUSTOM"
)
model_name: str | None = Field(
None, max_length=100, description="Model name without provider prefix"
)
api_key: str | None = Field(None, description="API key for the provider")
api_base: str | None = Field(
None, max_length=500, description="Optional API base URL"
)
language: str | None = Field(
None, max_length=50, description="Language for the LLM"
)
litellm_params: dict[str, Any] | None = Field(
None, description="Additional LiteLLM parameters"
)
class LLMConfigRead(LLMConfigBase, IDModel, TimestampModel):
id: int
created_at: datetime | None = Field(
None, description="Creation timestamp (None for global configs)"
)
search_space_id: int | None = Field(
None, description="Search space ID (None for global configs)"
)
model_config = ConfigDict(from_attributes=True)

View file

@ -0,0 +1,191 @@
"""
Pydantic schemas for the NewLLMConfig API.
NewLLMConfig combines LLM model settings with prompt configuration:
- LLM provider, model, API key, etc.
- Configurable system instructions
- Citation toggle
"""
from datetime import datetime
from typing import Any
from pydantic import BaseModel, ConfigDict, Field
from app.db import LiteLLMProvider
class NewLLMConfigBase(BaseModel):
"""Base schema with common fields for NewLLMConfig."""
name: str = Field(
..., max_length=100, description="User-friendly name for the configuration"
)
description: str | None = Field(
None, max_length=500, description="Optional description"
)
# LLM Model Configuration
provider: LiteLLMProvider = Field(..., description="LiteLLM provider type")
custom_provider: str | None = Field(
None, max_length=100, description="Custom provider name when provider is CUSTOM"
)
model_name: str = Field(
..., max_length=100, description="Model name without provider prefix"
)
api_key: str = Field(..., description="API key for the provider")
api_base: str | None = Field(
None, max_length=500, description="Optional API base URL"
)
litellm_params: dict[str, Any] | None = Field(
default=None, description="Additional LiteLLM parameters"
)
# Prompt Configuration
system_instructions: str = Field(
default="",
description="Custom system instructions. Empty string uses default SURFSENSE_SYSTEM_INSTRUCTIONS.",
)
use_default_system_instructions: bool = Field(
default=True,
description="Whether to use default instructions when system_instructions is empty",
)
citations_enabled: bool = Field(
default=True,
description="Whether to include citation instructions in the system prompt",
)
class NewLLMConfigCreate(NewLLMConfigBase):
"""Schema for creating a new NewLLMConfig."""
search_space_id: int = Field(
..., description="Search space ID to associate the config with"
)
class NewLLMConfigUpdate(BaseModel):
"""Schema for updating an existing NewLLMConfig. All fields are optional."""
name: str | None = Field(None, max_length=100)
description: str | None = Field(None, max_length=500)
# LLM Model Configuration
provider: LiteLLMProvider | None = None
custom_provider: str | None = Field(None, max_length=100)
model_name: str | None = Field(None, max_length=100)
api_key: str | None = None
api_base: str | None = Field(None, max_length=500)
litellm_params: dict[str, Any] | None = None
# Prompt Configuration
system_instructions: str | None = None
use_default_system_instructions: bool | None = None
citations_enabled: bool | None = None
class NewLLMConfigRead(NewLLMConfigBase):
"""Schema for reading a NewLLMConfig (includes id and timestamps)."""
id: int
created_at: datetime
search_space_id: int
model_config = ConfigDict(from_attributes=True)
class NewLLMConfigPublic(BaseModel):
"""
Public schema for NewLLMConfig that hides the API key.
Used when returning configs in list views or to users who shouldn't see keys.
"""
id: int
name: str
description: str | None = None
# LLM Model Configuration (no api_key)
provider: LiteLLMProvider
custom_provider: str | None = None
model_name: str
api_base: str | None = None
litellm_params: dict[str, Any] | None = None
# Prompt Configuration
system_instructions: str
use_default_system_instructions: bool
citations_enabled: bool
created_at: datetime
search_space_id: int
model_config = ConfigDict(from_attributes=True)
class DefaultSystemInstructionsResponse(BaseModel):
"""Response schema for getting default system instructions."""
default_system_instructions: str = Field(
..., description="The default SURFSENSE_SYSTEM_INSTRUCTIONS template"
)
class GlobalNewLLMConfigRead(BaseModel):
"""
Schema for reading global LLM configs from YAML.
Global configs have negative IDs and no search_space_id.
API key is hidden for security.
"""
id: int = Field(..., description="Negative ID for global configs")
name: str
description: str | None = None
# LLM Model Configuration (no api_key)
provider: str # String because YAML doesn't enforce enum
custom_provider: str | None = None
model_name: str
api_base: str | None = None
litellm_params: dict[str, Any] | None = None
# Prompt Configuration
system_instructions: str = ""
use_default_system_instructions: bool = True
citations_enabled: bool = True
is_global: bool = True # Always true for global configs
# =============================================================================
# LLM Preferences Schemas (for role assignments)
# =============================================================================
class LLMPreferencesRead(BaseModel):
"""Schema for reading LLM preferences (role assignments) for a search space."""
agent_llm_id: int | None = Field(
None, description="ID of the LLM config to use for agent/chat tasks"
)
document_summary_llm_id: int | None = Field(
None, description="ID of the LLM config to use for document summarization"
)
agent_llm: dict[str, Any] | None = Field(
None, description="Full config for agent LLM"
)
document_summary_llm: dict[str, Any] | None = Field(
None, description="Full config for document summary LLM"
)
model_config = ConfigDict(from_attributes=True)
class LLMPreferencesUpdate(BaseModel):
"""Schema for updating LLM preferences."""
agent_llm_id: int | None = Field(
None, description="ID of the LLM config to use for agent/chat tasks"
)
document_summary_llm_id: int | None = Field(
None, description="ID of the LLM config to use for document summarization"
)

View file

@ -7,7 +7,7 @@ from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from app.config import config
from app.db import LLMConfig, SearchSpace
from app.db import NewLLMConfig, SearchSpace
# Configure litellm to automatically drop unsupported parameters
litellm.drop_params = True
@ -16,9 +16,8 @@ logger = logging.getLogger(__name__)
class LLMRole:
LONG_CONTEXT = "long_context"
FAST = "fast"
STRATEGIC = "strategic"
AGENT = "agent" # For agent/chat operations
DOCUMENT_SUMMARY = "document_summary" # For document summarization
def get_global_llm_config(llm_config_id: int) -> dict | None:
@ -155,7 +154,7 @@ async def get_search_space_llm_instance(
Args:
session: Database session
search_space_id: Search Space ID
role: LLM role ('long_context', 'fast', or 'strategic')
role: LLM role ('agent' or 'document_summary')
Returns:
ChatLiteLLM instance or None if not found
@ -173,12 +172,10 @@ async def get_search_space_llm_instance(
# Get the appropriate LLM config ID based on role
llm_config_id = None
if role == LLMRole.LONG_CONTEXT:
llm_config_id = search_space.long_context_llm_id
elif role == LLMRole.FAST:
llm_config_id = search_space.fast_llm_id
elif role == LLMRole.STRATEGIC:
llm_config_id = search_space.strategic_llm_id
if role == LLMRole.AGENT:
llm_config_id = search_space.agent_llm_id
elif role == LLMRole.DOCUMENT_SUMMARY:
llm_config_id = search_space.document_summary_llm_id
else:
logger.error(f"Invalid LLM role: {role}")
return None
@ -250,11 +247,11 @@ async def get_search_space_llm_instance(
return ChatLiteLLM(**litellm_kwargs)
# Get the LLM configuration from database (user-specific config)
# Get the LLM configuration from database (NewLLMConfig)
result = await session.execute(
select(LLMConfig).where(
LLMConfig.id == llm_config_id,
LLMConfig.search_space_id == search_space_id,
select(NewLLMConfig).where(
NewLLMConfig.id == llm_config_id,
NewLLMConfig.search_space_id == search_space_id,
)
)
llm_config = result.scalars().first()
@ -265,11 +262,11 @@ async def get_search_space_llm_instance(
)
return None
# Build the model string for litellm / 构建 LiteLLM 的模型字符串
# Build the model string for litellm
if llm_config.custom_provider:
model_string = f"{llm_config.custom_provider}/{llm_config.model_name}"
else:
# Map provider enum to litellm format / 将提供商枚举映射为 LiteLLM 格式
# Map provider enum to litellm format
provider_map = {
"OPENAI": "openai",
"ANTHROPIC": "anthropic",
@ -283,7 +280,7 @@ async def get_search_space_llm_instance(
"COMETAPI": "cometapi",
"XAI": "xai",
"BEDROCK": "bedrock",
"AWS_BEDROCK": "bedrock", # Legacy support (backward compatibility)
"AWS_BEDROCK": "bedrock",
"VERTEX_AI": "vertex_ai",
"TOGETHER_AI": "together_ai",
"FIREWORKS_AI": "fireworks_ai",
@ -296,7 +293,6 @@ async def get_search_space_llm_instance(
"AI21": "ai21",
"CLOUDFLARE": "cloudflare",
"DATABRICKS": "databricks",
# Chinese LLM providers
"DEEPSEEK": "openai",
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
@ -330,28 +326,19 @@ async def get_search_space_llm_instance(
return None
async def get_long_context_llm(
async def get_agent_llm(
session: AsyncSession, search_space_id: int
) -> ChatLiteLLM | None:
"""Get the search space's long context LLM instance."""
"""Get the search space's agent LLM instance for chat operations."""
return await get_search_space_llm_instance(session, search_space_id, LLMRole.AGENT)
async def get_document_summary_llm(
session: AsyncSession, search_space_id: int
) -> ChatLiteLLM | None:
"""Get the search space's document summary LLM instance."""
return await get_search_space_llm_instance(
session, search_space_id, LLMRole.LONG_CONTEXT
)
async def get_fast_llm(
session: AsyncSession, search_space_id: int
) -> ChatLiteLLM | None:
"""Get the search space's fast LLM instance."""
return await get_search_space_llm_instance(session, search_space_id, LLMRole.FAST)
async def get_strategic_llm(
session: AsyncSession, search_space_id: int
) -> ChatLiteLLM | None:
"""Get the search space's strategic LLM instance."""
return await get_search_space_llm_instance(
session, search_space_id, LLMRole.STRATEGIC
session, search_space_id, LLMRole.DOCUMENT_SUMMARY
)
@ -366,22 +353,54 @@ async def get_user_llm_instance(
return await get_search_space_llm_instance(session, search_space_id, role)
# Legacy aliases for backward compatibility
async def get_long_context_llm(
session: AsyncSession, search_space_id: int
) -> ChatLiteLLM | None:
"""Deprecated: Use get_document_summary_llm instead."""
return await get_document_summary_llm(session, search_space_id)
async def get_fast_llm(
session: AsyncSession, search_space_id: int
) -> ChatLiteLLM | None:
"""Deprecated: Use get_agent_llm instead."""
return await get_agent_llm(session, search_space_id)
async def get_strategic_llm(
session: AsyncSession, search_space_id: int
) -> ChatLiteLLM | None:
"""Deprecated: Use get_document_summary_llm instead."""
return await get_document_summary_llm(session, search_space_id)
# User-based legacy aliases (LLM preferences are now per-search-space, not per-user)
async def get_user_long_context_llm(
session: AsyncSession, user_id: str, search_space_id: int
) -> ChatLiteLLM | None:
"""Deprecated: Use get_long_context_llm instead."""
return await get_long_context_llm(session, search_space_id)
"""
Deprecated: Use get_document_summary_llm instead.
The user_id parameter is ignored as LLM preferences are now per-search-space.
"""
return await get_document_summary_llm(session, search_space_id)
async def get_user_fast_llm(
session: AsyncSession, user_id: str, search_space_id: int
) -> ChatLiteLLM | None:
"""Deprecated: Use get_fast_llm instead."""
return await get_fast_llm(session, search_space_id)
"""
Deprecated: Use get_agent_llm instead.
The user_id parameter is ignored as LLM preferences are now per-search-space.
"""
return await get_agent_llm(session, search_space_id)
async def get_user_strategic_llm(
session: AsyncSession, user_id: str, search_space_id: int
) -> ChatLiteLLM | None:
"""Deprecated: Use get_strategic_llm instead."""
return await get_strategic_llm(session, search_space_id)
"""
Deprecated: Use get_document_summary_llm instead.
The user_id parameter is ignored as LLM preferences are now per-search-space.
"""
return await get_document_summary_llm(session, search_space_id)

View file

@ -4,7 +4,7 @@ from typing import Any
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from sqlalchemy.ext.asyncio import AsyncSession
from app.services.llm_service import get_strategic_llm
from app.services.llm_service import get_document_summary_llm
class QueryService:
@ -20,7 +20,7 @@ class QueryService:
chat_history_str: str | None = None,
) -> str:
"""
Reformulate the user query using the search space's strategic LLM to make it more
Reformulate the user query using the search space's document summary LLM to make it more
effective for information retrieval and research purposes.
Args:
@ -36,11 +36,11 @@ class QueryService:
return user_query
try:
# Get the search space's strategic LLM instance
llm = await get_strategic_llm(session, search_space_id)
# Get the search space's document summary LLM instance
llm = await get_document_summary_llm(session, search_space_id)
if not llm:
print(
f"Warning: No strategic LLM configured for search space {search_space_id}. Using original query."
f"Warning: No document summary LLM configured for search space {search_space_id}. Using original query."
)
return user_query

View file

@ -1,191 +0,0 @@
import json
from typing import Any
class StreamingService:
def __init__(self):
self.terminal_idx = 1
self.message_annotations = [
{"type": "TERMINAL_INFO", "content": []},
{"type": "SOURCES", "content": []},
{"type": "ANSWER", "content": []},
{"type": "FURTHER_QUESTIONS", "content": []},
]
# DEPRECATED: This sends the full annotation array every time (inefficient)
def _format_annotations(self) -> str:
"""
Format the annotations as a string
DEPRECATED: This method sends the full annotation state every time.
Use the delta formatters instead for optimal streaming.
Returns:
str: The formatted annotations string
"""
return f"8:{json.dumps(self.message_annotations)}\n"
def format_terminal_info_delta(self, text: str, message_type: str = "info") -> str:
"""
Format a single terminal info message as a delta annotation
Args:
text: The terminal message text
message_type: The message type (info, error, success, etc.)
Returns:
str: The formatted annotation delta string
"""
message = {"id": self.terminal_idx, "text": text, "type": message_type}
self.terminal_idx += 1
# Update internal state for reference
self.message_annotations[0]["content"].append(message)
# Return only the delta annotation
annotation = {"type": "TERMINAL_INFO", "data": message}
return f"8:[{json.dumps(annotation)}]\n"
def format_sources_delta(self, sources: list[dict[str, Any]]) -> str:
"""
Format sources as a delta annotation
Args:
sources: List of source objects
Returns:
str: The formatted annotation delta string
"""
# Update internal state
self.message_annotations[1]["content"] = sources
# Return only the delta annotation
nodes = []
for group in sources:
for source in group.get("sources", []):
node = {
"id": str(source.get("id", "")),
"text": source.get("description", "").strip(),
"url": source.get("url", ""),
"metadata": {
"title": source.get("title", ""),
"source_type": group.get("type", ""),
"group_name": group.get("name", ""),
},
}
nodes.append(node)
annotation = {"type": "sources", "data": {"nodes": nodes}}
return f"8:[{json.dumps(annotation)}]\n"
def format_answer_delta(self, answer_chunk: str) -> str:
"""
Format a single answer chunk as a delta annotation
Args:
answer_chunk: The new answer chunk to add
Returns:
str: The formatted annotation delta string
"""
# Update internal state by appending the chunk
if isinstance(self.message_annotations[2]["content"], list):
self.message_annotations[2]["content"].append(answer_chunk)
else:
self.message_annotations[2]["content"] = [answer_chunk]
# Return only the delta annotation with the new chunk
annotation = {"type": "ANSWER", "content": [answer_chunk]}
return f"8:[{json.dumps(annotation)}]\n"
def format_answer_annotation(self, answer_lines: list[str]) -> str:
"""
Format the complete answer as a replacement annotation
Args:
answer_lines: Complete list of answer lines
Returns:
str: The formatted annotation string
"""
# Update internal state
self.message_annotations[2]["content"] = answer_lines
# Return the full answer annotation
annotation = {"type": "ANSWER", "content": answer_lines}
return f"8:[{json.dumps(annotation)}]\n"
def format_further_questions_delta(
self, further_questions: list[dict[str, Any]]
) -> str:
"""
Format further questions as a delta annotation
Args:
further_questions: List of further question objects
Returns:
str: The formatted annotation delta string
"""
# Update internal state
self.message_annotations[3]["content"] = further_questions
# Return only the delta annotation
annotation = {
"type": "FURTHER_QUESTIONS",
"data": [
question.get("question", "")
for question in further_questions
if question.get("question", "") != ""
],
}
return f"8:[{json.dumps(annotation)}]\n"
def format_text_chunk(self, text: str) -> str:
"""
Format a text chunk using the text stream part
Args:
text: The text chunk to stream
Returns:
str: The formatted text part string
"""
return f"0:{json.dumps(text)}\n"
def format_error(self, error_message: str) -> str:
"""
Format an error using the error stream part
Args:
error_message: The error message
Returns:
str: The formatted error part string
"""
return f"3:{json.dumps(error_message)}\n"
def format_completion(
self, prompt_tokens: int = 156, completion_tokens: int = 204
) -> str:
"""
Format a completion message
Args:
prompt_tokens: Number of prompt tokens
completion_tokens: Number of completion tokens
Returns:
str: The formatted completion string
"""
total_tokens = prompt_tokens + completion_tokens
completion_data = {
"finishReason": "stop",
"usage": {
"promptTokens": prompt_tokens,
"completionTokens": completion_tokens,
"totalTokens": total_tokens,
},
}
return f"d:{json.dumps(completion_data)}\n"

View file

@ -3,10 +3,15 @@ Streaming task for the new SurfSense deep agent chat.
This module streams responses from the deep agent using the Vercel AI SDK
Data Stream Protocol (SSE format).
Supports loading LLM configurations from:
- YAML files (negative IDs for global configs)
- NewLLMConfig database table (positive IDs for user-created configs with prompt settings)
"""
import json
from collections.abc import AsyncGenerator
from langchain_core.messages import HumanMessage
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
@ -14,11 +19,14 @@ from sqlalchemy.future import select
from app.agents.new_chat.chat_deepagent import create_surfsense_deep_agent
from app.agents.new_chat.checkpointer import get_checkpointer
from app.agents.new_chat.llm_config import (
AgentConfig,
create_chat_litellm_from_agent_config,
create_chat_litellm_from_config,
load_agent_config,
load_llm_config_from_yaml,
)
from app.db import Document
from app.schemas.new_chat import ChatAttachment, ChatMessage
from app.schemas.new_chat import ChatAttachment
from app.services.connector_service import ConnectorService
from app.services.new_streaming_service import VercelStreamingService
@ -67,7 +75,6 @@ async def stream_new_chat(
chat_id: int,
session: AsyncSession,
llm_config_id: int = -1,
messages: list[ChatMessage] | None = None,
attachments: list[ChatAttachment] | None = None,
mentioned_document_ids: list[int] | None = None,
) -> AsyncGenerator[str, None]:
@ -97,17 +104,40 @@ async def stream_new_chat(
current_text_id: str | None = None
try:
# Load LLM config
llm_config = load_llm_config_from_yaml(llm_config_id=llm_config_id)
if not llm_config:
yield streaming_service.format_error(
f"Failed to load LLM config with id {llm_config_id}"
)
yield streaming_service.format_done()
return
# Load LLM config - supports both YAML (negative IDs) and database (positive IDs)
agent_config: AgentConfig | None = None
if llm_config_id >= 0:
# Positive ID: Load from NewLLMConfig database table
agent_config = await load_agent_config(
session=session,
config_id=llm_config_id,
search_space_id=search_space_id,
)
if not agent_config:
yield streaming_service.format_error(
f"Failed to load NewLLMConfig with id {llm_config_id}"
)
yield streaming_service.format_done()
return
# Create ChatLiteLLM from AgentConfig
llm = create_chat_litellm_from_agent_config(agent_config)
else:
# Negative ID: Load from YAML (global configs)
llm_config = load_llm_config_from_yaml(llm_config_id=llm_config_id)
if not llm_config:
yield streaming_service.format_error(
f"Failed to load LLM config with id {llm_config_id}"
)
yield streaming_service.format_done()
return
# Create ChatLiteLLM from YAML config dict
llm = create_chat_litellm_from_config(llm_config)
# Create AgentConfig from YAML for consistency (uses defaults for prompt settings)
agent_config = AgentConfig.from_yaml_config(llm_config)
# Create ChatLiteLLM instance
llm = create_chat_litellm_from_config(llm_config)
if not llm:
yield streaming_service.format_error("Failed to create LLM instance")
yield streaming_service.format_done()
@ -119,13 +149,14 @@ async def stream_new_chat(
# Get the PostgreSQL checkpointer for persistent conversation memory
checkpointer = await get_checkpointer()
# Create the deep agent with checkpointer with podcast capability
# Create the deep agent with checkpointer and configurable prompts
agent = create_surfsense_deep_agent(
llm=llm,
search_space_id=search_space_id,
db_session=session,
connector_service=connector_service,
checkpointer=checkpointer,
agent_config=agent_config, # Pass prompt configuration
)
# Build input with message history from frontend
@ -223,7 +254,9 @@ async def stream_new_chat(
analyze_step_id = next_thinking_step_id()
last_active_step_id = analyze_step_id
last_active_step_title = "Understanding your request"
last_active_step_items = [f"Processing: {user_query[:80]}{'...' if len(user_query) > 80 else ''}"]
last_active_step_items = [
f"Processing: {user_query[:80]}{'...' if len(user_query) > 80 else ''}"
]
yield streaming_service.format_thinking_step(
step_id=analyze_step_id,
title="Understanding your request",
@ -298,7 +331,9 @@ async def stream_new_chat(
else str(tool_input)
)
last_active_step_title = "Searching knowledge base"
last_active_step_items = [f"Query: {query[:100]}{'...' if len(query) > 100 else ''}"]
last_active_step_items = [
f"Query: {query[:100]}{'...' if len(query) > 100 else ''}"
]
yield streaming_service.format_thinking_step(
step_id=tool_step_id,
title="Searching knowledge base",
@ -312,7 +347,9 @@ async def stream_new_chat(
else str(tool_input)
)
last_active_step_title = "Fetching link preview"
last_active_step_items = [f"URL: {url[:80]}{'...' if len(url) > 80 else ''}"]
last_active_step_items = [
f"URL: {url[:80]}{'...' if len(url) > 80 else ''}"
]
yield streaming_service.format_thinking_step(
step_id=tool_step_id,
title="Fetching link preview",
@ -347,7 +384,9 @@ async def stream_new_chat(
else str(tool_input)
)
last_active_step_title = "Scraping webpage"
last_active_step_items = [f"URL: {url[:80]}{'...' if len(url) > 80 else ''}"]
last_active_step_items = [
f"URL: {url[:80]}{'...' if len(url) > 80 else ''}"
]
yield streaming_service.format_thinking_step(
step_id=tool_step_id,
title="Scraping webpage",
@ -484,7 +523,9 @@ async def stream_new_chat(
tool_call_id = f"call_{run_id[:32]}" if run_id else "call_unknown"
# Get the original tool step ID to update it (not create a new one)
original_step_id = tool_step_ids.get(run_id, f"thinking-unknown-{run_id[:8]}")
original_step_id = tool_step_ids.get(
run_id, f"thinking-unknown-{run_id[:8]}"
)
# Mark the tool thinking step as completed using the SAME step ID
# Also add to completed set so we don't try to complete it again
@ -495,7 +536,9 @@ async def stream_new_chat(
if isinstance(tool_output, dict):
result_len = tool_output.get("result_length", 0)
if result_len > 0:
result_info = f"Found relevant information ({result_len} chars)"
result_info = (
f"Found relevant information ({result_len} chars)"
)
# Include original query in completed items
completed_items = [*last_active_step_items, result_info]
yield streaming_service.format_thinking_step(
@ -584,7 +627,7 @@ async def stream_new_chat(
if isinstance(tool_output, dict)
else "Podcast"
)
if podcast_status == "processing":
completed_items = [
f"Title: {podcast_title}",
@ -609,7 +652,7 @@ async def stream_new_chat(
]
else:
completed_items = last_active_step_items
yield streaming_service.format_thinking_step(
step_id=original_step_id,
title="Generating podcast",
@ -695,7 +738,9 @@ async def stream_new_chat(
)
# Send terminal message
if isinstance(tool_output, dict):
title = tool_output.get("title") or tool_output.get("alt", "Image")
title = tool_output.get("title") or tool_output.get(
"alt", "Image"
)
yield streaming_service.format_terminal_info(
f"Image displayed: {title[:40]}{'...' if len(title) > 40 else ''}",
"success",

View file

@ -1,23 +0,0 @@
"""
Legacy podcast task for old chat system.
NOTE: The old Chat model has been removed. This module is kept for backwards
compatibility but the generate_chat_podcast function will raise an error
if called. Use generate_content_podcast_task in celery_tasks/podcast_tasks.py
for new-chat podcast generation instead.
"""
from app.db import Podcast # noqa: F401 - imported for backwards compatibility
async def generate_chat_podcast(*args, **kwargs):
"""
Legacy function for generating podcasts from old chat system.
This function is deprecated as the old Chat model has been removed.
Use generate_content_podcast_task for new-chat podcast generation.
"""
raise NotImplementedError(
"generate_chat_podcast is deprecated. The old Chat model has been removed. "
"Use generate_content_podcast_task for podcast generation from new-chat."
)

View file

@ -3,10 +3,8 @@
import { CTAHomepage } from "@/components/homepage/cta";
import { FeaturesBentoGrid } from "@/components/homepage/features-bento-grid";
import { FeaturesCards } from "@/components/homepage/features-card";
import { Footer } from "@/components/homepage/footer";
import { HeroSection } from "@/components/homepage/hero-section";
import ExternalIntegrations from "@/components/homepage/integrations";
import { Navbar } from "@/components/homepage/navbar";
export default function HomePage() {
return (

View file

@ -5,9 +5,14 @@ import { Loader2 } from "lucide-react";
import { useParams, usePathname, useRouter } from "next/navigation";
import { useTranslations } from "next-intl";
import type React from "react";
import { useCallback, useEffect, useMemo, useState } from "react";
import { llmPreferencesAtom } from "@/atoms/llm-config/llm-config-query.atoms";
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { toast } from "sonner";
import { myAccessAtom } from "@/atoms/members/members-query.atoms";
import { updateLLMPreferencesMutationAtom } from "@/atoms/new-llm-config/new-llm-config-mutation.atoms";
import {
globalNewLLMConfigsAtom,
llmPreferencesAtom,
} from "@/atoms/new-llm-config/new-llm-config-query.atoms";
import { activeSearchSpaceIdAtom } from "@/atoms/search-spaces/search-space-query.atoms";
import { DashboardBreadcrumb } from "@/components/dashboard-breadcrumb";
import { LanguageSwitcher } from "@/components/LanguageSwitcher";
@ -33,18 +38,24 @@ export function DashboardClientLayout({
const { search_space_id } = useParams();
const setActiveSearchSpaceIdState = useSetAtom(activeSearchSpaceIdAtom);
const { data: preferences = {}, isFetching: loading, error } = useAtomValue(llmPreferencesAtom);
const {
data: preferences = {},
isFetching: loading,
error,
refetch: refetchPreferences,
} = useAtomValue(llmPreferencesAtom);
const { data: globalConfigs = [], isFetching: globalConfigsLoading } =
useAtomValue(globalNewLLMConfigsAtom);
const { mutateAsync: updatePreferences } = useAtomValue(updateLLMPreferencesMutationAtom);
const isOnboardingComplete = useCallback(() => {
return !!(
preferences.long_context_llm_id &&
preferences.fast_llm_id &&
preferences.strategic_llm_id
);
return !!(preferences.agent_llm_id && preferences.document_summary_llm_id);
}, [preferences]);
const { data: access = null, isLoading: accessLoading } = useAtomValue(myAccessAtom);
const [hasCheckedOnboarding, setHasCheckedOnboarding] = useState(false);
const [isAutoConfiguring, setIsAutoConfiguring] = useState(false);
const hasAttemptedAutoConfig = useRef(false);
// Skip onboarding check if we're already on the onboarding page
const isOnboardingPage = pathname?.includes("/onboard");
@ -89,27 +100,82 @@ export function DashboardClientLayout({
return;
}
// Wait for both preferences and access data to load
if (!loading && !accessLoading && !hasCheckedOnboarding) {
// Wait for all data to load
if (
!loading &&
!accessLoading &&
!globalConfigsLoading &&
!hasCheckedOnboarding &&
!isAutoConfiguring
) {
const onboardingComplete = isOnboardingComplete();
// Only redirect to onboarding if user is the owner and onboarding is not complete
// Invited members (non-owners) should skip onboarding and use existing config
if (!onboardingComplete && isOwner) {
router.push(`/dashboard/${searchSpaceId}/onboard`);
// If onboarding is complete, nothing to do
if (onboardingComplete) {
setHasCheckedOnboarding(true);
return;
}
// Only handle onboarding for owners
if (!isOwner) {
setHasCheckedOnboarding(true);
return;
}
// If global configs available, auto-configure without going to onboard page
if (globalConfigs.length > 0 && !hasAttemptedAutoConfig.current) {
hasAttemptedAutoConfig.current = true;
setIsAutoConfiguring(true);
const autoConfigureWithGlobal = async () => {
try {
const firstGlobalConfig = globalConfigs[0];
await updatePreferences({
search_space_id: Number(searchSpaceId),
data: {
agent_llm_id: firstGlobalConfig.id,
document_summary_llm_id: firstGlobalConfig.id,
},
});
await refetchPreferences();
toast.success("AI configured automatically!", {
description: `Using ${firstGlobalConfig.name}. Customize in Settings.`,
});
setHasCheckedOnboarding(true);
} catch (error) {
console.error("Auto-configuration failed:", error);
// Fall back to onboard page
router.push(`/dashboard/${searchSpaceId}/onboard`);
} finally {
setIsAutoConfiguring(false);
}
};
autoConfigureWithGlobal();
return;
}
// No global configs - redirect to onboard page
router.push(`/dashboard/${searchSpaceId}/onboard`);
setHasCheckedOnboarding(true);
}
}, [
loading,
accessLoading,
globalConfigsLoading,
isOnboardingComplete,
isOnboardingPage,
isOwner,
isAutoConfiguring,
globalConfigs,
router,
searchSpaceId,
hasCheckedOnboarding,
updatePreferences,
refetchPreferences,
]);
// Synchronize active search space and chat IDs with URL
@ -124,14 +190,25 @@ export function DashboardClientLayout({
setActiveSearchSpaceIdState(activeSeacrhSpaceId);
}, [search_space_id, setActiveSearchSpaceIdState]);
// Show loading screen while checking onboarding status (only on first load)
if (!hasCheckedOnboarding && (loading || accessLoading) && !isOnboardingPage) {
// Show loading screen while checking onboarding status or auto-configuring
if (
(!hasCheckedOnboarding &&
(loading || accessLoading || globalConfigsLoading) &&
!isOnboardingPage) ||
isAutoConfiguring
) {
return (
<div className="flex flex-col items-center justify-center min-h-screen space-y-4">
<Card className="w-[350px] bg-background/60 backdrop-blur-sm">
<CardHeader className="pb-2">
<CardTitle className="text-xl font-medium">{t("loading_config")}</CardTitle>
<CardDescription>{t("checking_llm_prefs")}</CardDescription>
<CardTitle className="text-xl font-medium">
{isAutoConfiguring ? "Setting up AI..." : t("loading_config")}
</CardTitle>
<CardDescription>
{isAutoConfiguring
? "Auto-configuring with available settings"
: t("checking_llm_prefs")}
</CardDescription>
</CardHeader>
<CardContent className="flex justify-center py-6">
<Loader2 className="h-12 w-12 text-primary animate-spin" />

View file

@ -12,11 +12,12 @@ import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { toast } from "sonner";
import { mentionedDocumentIdsAtom, mentionedDocumentsAtom, messageDocumentsMapAtom, type MentionedDocumentInfo } from "@/atoms/chat/mentioned-documents.atom";
import { Thread } from "@/components/assistant-ui/thread";
import { ChatHeader } from "@/components/new-chat/chat-header";
import type { ThinkingStep } from "@/components/tool-ui/deepagent-thinking";
import { DisplayImageToolUI } from "@/components/tool-ui/display-image";
import { GeneratePodcastToolUI } from "@/components/tool-ui/generate-podcast";
import { LinkPreviewToolUI } from "@/components/tool-ui/link-preview";
import { DisplayImageToolUI } from "@/components/tool-ui/display-image";
import { ScrapeWebpageToolUI } from "@/components/tool-ui/scrape-webpage";
import type { ThinkingStep } from "@/components/tool-ui/deepagent-thinking";
import { getBearerToken } from "@/lib/auth-utils";
import { createAttachmentAdapter, extractAttachmentContent } from "@/lib/chat/attachment-adapter";
import {
@ -36,15 +37,15 @@ import {
*/
function extractThinkingSteps(content: unknown): ThinkingStep[] {
if (!Array.isArray(content)) return [];
const thinkingPart = content.find(
(part: unknown) =>
typeof part === "object" &&
part !== null &&
"type" in part &&
(part: unknown) =>
typeof part === "object" &&
part !== null &&
"type" in part &&
(part as { type: string }).type === "thinking-steps"
) as { type: "thinking-steps"; steps: ThinkingStep[] } | undefined;
return thinkingPart?.steps || [];
}
@ -67,7 +68,7 @@ function extractMentionedDocuments(content: unknown): MentionedDocumentInfo[] {
/**
* Convert backend message to assistant-ui ThreadMessageLike format
* Filters out 'thinking-steps' part as it's handled separately
* Filters out 'thinking-steps' part as it's handled separately via messageThinkingSteps
*/
function convertToThreadMessage(msg: MessageRecord): ThreadMessageLike {
let content: ThreadMessageLike["content"];
@ -77,16 +78,18 @@ function convertToThreadMessage(msg: MessageRecord): ThreadMessageLike {
} else if (Array.isArray(msg.content)) {
// Filter out custom metadata parts - they're handled separately
const filteredContent = msg.content.filter(
(part: unknown) => {
if (typeof part !== "object" || part === null || !("type" in part)) return true;
const partType = (part as { type: string }).type;
// Filter out thinking-steps and mentioned-documents
return partType !== "thinking-steps" && partType !== "mentioned-documents";
}
(part: unknown) =>
!(
typeof part === "object" &&
part !== null &&
"type" in part &&
(part as { type: string }).type === "thinking-steps"
)
);
content = filteredContent.length > 0
? (filteredContent as ThreadMessageLike["content"])
: [{ type: "text", text: "" }];
content =
filteredContent.length > 0
? (filteredContent as ThreadMessageLike["content"])
: [{ type: "text", text: "" }];
} else {
content = [{ type: "text", text: String(msg.content) }];
}
@ -102,7 +105,12 @@ function convertToThreadMessage(msg: MessageRecord): ThreadMessageLike {
/**
* Tools that should render custom UI in the chat.
*/
const TOOLS_WITH_UI = new Set(["generate_podcast", "link_preview", "display_image", "scrape_webpage"]);
const TOOLS_WITH_UI = new Set([
"generate_podcast",
"link_preview",
"display_image",
"scrape_webpage",
]);
/**
* Type for thinking step data from the backend
@ -121,10 +129,11 @@ export default function NewChatPage() {
const [threadId, setThreadId] = useState<number | null>(null);
const [messages, setMessages] = useState<ThreadMessageLike[]>([]);
const [isRunning, setIsRunning] = useState(false);
// Store thinking steps per message ID
const [messageThinkingSteps, setMessageThinkingSteps] = useState<
Map<string, ThinkingStep[]>
>(new Map());
// Store thinking steps per message ID - kept separate from content to avoid
// "unsupported part type" errors from assistant-ui
const [messageThinkingSteps, setMessageThinkingSteps] = useState<Map<string, ThinkingStep[]>>(
new Map()
);
const abortControllerRef = useRef<AbortController | null>(null);
// Get mentioned document IDs from the composer
@ -168,7 +177,7 @@ export default function NewChatPage() {
if (response.messages && response.messages.length > 0) {
const loadedMessages = response.messages.map(convertToThreadMessage);
setMessages(loadedMessages);
// Extract and restore thinking steps from persisted messages
const restoredThinkingSteps = new Map<string, ThinkingStep[]>();
// Extract and restore mentioned documents from persisted messages
@ -309,10 +318,10 @@ export default function NewChatPage() {
// Prepare assistant message
const assistantMsgId = `msg-assistant-${Date.now()}`;
const currentThinkingSteps = new Map<string, ThinkingStepData>();
// Ordered content parts to preserve inline tool call positions
// Each part is either a text segment or a tool call
type ContentPart =
type ContentPart =
| { type: "text"; text: string }
| {
type: "tool-call";
@ -322,13 +331,13 @@ export default function NewChatPage() {
result?: unknown;
};
const contentParts: ContentPart[] = [];
// Track the current text segment index (for appending text deltas)
let currentTextPartIndex = -1;
// Map to track tool call indices for updating results
const toolCallIndices = new Map<string, number>();
// Helper to get or create the current text part for appending text
const appendText = (delta: string) => {
if (currentTextPartIndex >= 0 && contentParts[currentTextPartIndex]?.type === "text") {
@ -340,7 +349,7 @@ export default function NewChatPage() {
currentTextPartIndex = contentParts.length - 1;
}
};
// Helper to add a tool call (this "breaks" the current text segment)
const addToolCall = (toolCallId: string, toolName: string, args: Record<string, unknown>) => {
if (TOOLS_WITH_UI.has(toolName)) {
@ -355,9 +364,12 @@ export default function NewChatPage() {
currentTextPartIndex = -1;
}
};
// Helper to update a tool call's args or result
const updateToolCall = (toolCallId: string, update: { args?: Record<string, unknown>; result?: unknown }) => {
const updateToolCall = (
toolCallId: string,
update: { args?: Record<string, unknown>; result?: unknown }
) => {
const index = toolCallIndices.get(toolCallId);
if (index !== undefined && contentParts[index]?.type === "tool-call") {
const tc = contentParts[index] as ContentPart & { type: "tool-call" };
@ -366,7 +378,7 @@ export default function NewChatPage() {
}
};
// Helper to build content for UI (without thinking-steps)
// Helper to build content for UI (without thinking-steps to avoid assistant-ui errors)
const buildContentForUI = (): ThreadMessageLike["content"] => {
// Filter to only include text parts with content and tool-calls with UI
const filtered = contentParts.filter((part) => {
@ -379,10 +391,10 @@ export default function NewChatPage() {
: [{ type: "text", text: "" }];
};
// Helper to build content for persistence (includes thinking-steps)
// Helper to build content for persistence (includes thinking-steps for restoration)
const buildContentForPersistence = (): unknown[] => {
const parts: unknown[] = [];
// Include thinking steps for persistence
if (currentThinkingSteps.size > 0) {
parts.push({
@ -390,7 +402,7 @@ export default function NewChatPage() {
steps: Array.from(currentThinkingSteps.values()),
});
}
// Add content parts (filtered)
for (const part of contentParts) {
if (part.type === "text" && part.text.length > 0) {
@ -399,7 +411,7 @@ export default function NewChatPage() {
parts.push(part);
}
}
return parts.length > 0 ? parts : [{ type: "text", text: "" }];
};
@ -554,13 +566,12 @@ export default function NewChatPage() {
const stepData = parsed.data as ThinkingStepData;
if (stepData?.id) {
currentThinkingSteps.set(stepData.id, stepData);
// Update message-specific thinking steps
// Update thinking steps state for rendering
// The ThinkingStepsScrollHandler in Thread component
// will handle auto-scrolling when this state changes
setMessageThinkingSteps((prev) => {
const newMap = new Map(prev);
newMap.set(
assistantMsgId,
Array.from(currentThinkingSteps.values())
);
newMap.set(assistantMsgId, Array.from(currentThinkingSteps.values()));
return newMap;
});
}
@ -686,8 +697,11 @@ export default function NewChatPage() {
<LinkPreviewToolUI />
<DisplayImageToolUI />
<ScrapeWebpageToolUI />
<div className="h-[calc(100vh-64px)] max-h-[calc(100vh-64px)] overflow-hidden">
<Thread messageThinkingSteps={messageThinkingSteps} />
<div className="flex flex-col h-[calc(100vh-64px)] max-h-[calc(100vh-64px)] overflow-hidden">
<ChatHeader searchSpaceId={searchSpaceId} />
<div className="flex-1 min-h-0 overflow-hidden">
<Thread messageThinkingSteps={messageThinkingSteps} />
</div>
</div>
</AssistantRuntimeProvider>
);

View file

@ -1,312 +1,268 @@
"use client";
import { useAtomValue } from "jotai";
import { FileText, MessageSquare, UserPlus, Users } from "lucide-react";
import { Loader2 } from "lucide-react";
import { motion } from "motion/react";
import { useParams, useRouter } from "next/navigation";
import { useTranslations } from "next-intl";
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { useEffect, useRef, useState } from "react";
import { toast } from "sonner";
import { updateLLMPreferencesMutationAtom } from "@/atoms/llm-config/llm-config-mutation.atoms";
import {
globalLLMConfigsAtom,
llmConfigsAtom,
createNewLLMConfigMutationAtom,
updateLLMPreferencesMutationAtom,
} from "@/atoms/new-llm-config/new-llm-config-mutation.atoms";
import {
globalNewLLMConfigsAtom,
llmPreferencesAtom,
} from "@/atoms/llm-config/llm-config-query.atoms";
import { OnboardActionCard } from "@/components/onboard/onboard-action-card";
import { OnboardAdvancedSettings } from "@/components/onboard/onboard-advanced-settings";
import { OnboardHeader } from "@/components/onboard/onboard-header";
import { OnboardLLMSetup } from "@/components/onboard/onboard-llm-setup";
import { OnboardLoading } from "@/components/onboard/onboard-loading";
import { OnboardStats } from "@/components/onboard/onboard-stats";
} from "@/atoms/new-llm-config/new-llm-config-query.atoms";
import { Logo } from "@/components/Logo";
import { LLMConfigForm, type LLMConfigFormData } from "@/components/shared/llm-config-form";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import { getBearerToken, redirectToLogin } from "@/lib/auth-utils";
const OnboardPage = () => {
const t = useTranslations("onboard");
export default function OnboardPage() {
const router = useRouter();
const params = useParams();
const searchSpaceId = Number(params.search_space_id);
// Queries
const {
data: llmConfigs = [],
isFetching: configsLoading,
refetch: refreshConfigs,
} = useAtomValue(llmConfigsAtom);
const { data: globalConfigs = [], isFetching: globalConfigsLoading } =
useAtomValue(globalLLMConfigsAtom);
const {
data: preferences = {},
isFetching: preferencesLoading,
refetch: refreshPreferences,
} = useAtomValue(llmPreferencesAtom);
const { mutateAsync: updatePreferences } = useAtomValue(updateLLMPreferencesMutationAtom);
data: globalConfigs = [],
isFetching: globalConfigsLoading,
isSuccess: globalConfigsLoaded,
} = useAtomValue(globalNewLLMConfigsAtom);
const { data: preferences = {}, isFetching: preferencesLoading } =
useAtomValue(llmPreferencesAtom);
// Compute isOnboardingComplete
const isOnboardingComplete = useMemo(() => {
return !!(
preferences.long_context_llm_id &&
preferences.fast_llm_id &&
preferences.strategic_llm_id
);
}, [preferences]);
// Mutations
const { mutateAsync: createConfig, isPending: isCreating } = useAtomValue(
createNewLLMConfigMutationAtom
);
const { mutateAsync: updatePreferences, isPending: isUpdatingPreferences } = useAtomValue(
updateLLMPreferencesMutationAtom
);
// State
const [isAutoConfiguring, setIsAutoConfiguring] = useState(false);
const [autoConfigComplete, setAutoConfigComplete] = useState(false);
const [showAdvancedSettings, setShowAdvancedSettings] = useState(false);
const [showPromptSettings, setShowPromptSettings] = useState(false);
const handleRefreshPreferences = useCallback(async () => {
await refreshPreferences();
}, []);
// Track if we've already attempted auto-configuration
const hasAttemptedAutoConfig = useRef(false);
// Track if onboarding was complete on initial mount
const wasCompleteOnMount = useRef<boolean | null>(null);
const hasCheckedInitialState = useRef(false);
// Check if user is authenticated
// Check authentication
useEffect(() => {
const token = getBearerToken();
if (!token) {
// Save current path and redirect to login
redirectToLogin();
return;
}
}, []);
// Capture onboarding state on first load
// Check if onboarding is already complete
const isOnboardingComplete = preferences.agent_llm_id && preferences.document_summary_llm_id;
// If onboarding is already complete, redirect immediately
useEffect(() => {
if (
!hasCheckedInitialState.current &&
!preferencesLoading &&
!configsLoading &&
!globalConfigsLoading
) {
wasCompleteOnMount.current = isOnboardingComplete;
hasCheckedInitialState.current = true;
if (!preferencesLoading && isOnboardingComplete) {
router.push(`/dashboard/${searchSpaceId}/new-chat`);
}
}, [preferencesLoading, configsLoading, globalConfigsLoading, isOnboardingComplete]);
}, [preferencesLoading, isOnboardingComplete, router, searchSpaceId]);
// Redirect to dashboard if onboarding was already complete
// Auto-configure if global configs are available
useEffect(() => {
if (
wasCompleteOnMount.current === true &&
!preferencesLoading &&
!configsLoading &&
!globalConfigsLoading
) {
const timer = setTimeout(() => {
router.push(`/dashboard/${searchSpaceId}`);
}, 300);
return () => clearTimeout(timer);
}
}, [preferencesLoading, configsLoading, globalConfigsLoading, router, searchSpaceId]);
const autoConfigureWithGlobal = async () => {
if (hasAttemptedAutoConfig.current) return;
if (globalConfigsLoading || preferencesLoading) return;
if (!globalConfigsLoaded) return;
if (isOnboardingComplete) return;
// Auto-configure LLM roles if global configs are available
const autoConfigureLLMs = useCallback(async () => {
if (hasAttemptedAutoConfig.current) return;
if (globalConfigs.length === 0) return;
if (isOnboardingComplete) {
setAutoConfigComplete(true);
return;
}
// Only auto-configure if we have global configs
if (globalConfigs.length > 0) {
hasAttemptedAutoConfig.current = true;
setIsAutoConfiguring(true);
hasAttemptedAutoConfig.current = true;
setIsAutoConfiguring(true);
try {
const firstGlobalConfig = globalConfigs[0];
try {
const allConfigs = [...globalConfigs, ...llmConfigs];
await updatePreferences({
search_space_id: searchSpaceId,
data: {
agent_llm_id: firstGlobalConfig.id,
document_summary_llm_id: firstGlobalConfig.id,
},
});
if (allConfigs.length === 0) {
setIsAutoConfiguring(false);
return;
toast.success("AI configured automatically!", {
description: `Using ${firstGlobalConfig.name}. You can customize this later in Settings.`,
});
// Redirect to new-chat
router.push(`/dashboard/${searchSpaceId}/new-chat`);
} catch (error) {
console.error("Auto-configuration failed:", error);
toast.error("Auto-configuration failed. Please add a configuration manually.");
setIsAutoConfiguring(false);
}
}
};
// Use first available config for all roles
const defaultConfigId = allConfigs[0].id;
autoConfigureWithGlobal();
}, [
globalConfigs,
globalConfigsLoading,
globalConfigsLoaded,
preferencesLoading,
isOnboardingComplete,
updatePreferences,
searchSpaceId,
router,
]);
const newPreferences = {
long_context_llm_id: defaultConfigId,
fast_llm_id: defaultConfigId,
strategic_llm_id: defaultConfigId,
};
// Handle form submission
const handleSubmit = async (formData: LLMConfigFormData) => {
try {
// Create the config
const newConfig = await createConfig(formData);
// Auto-assign to all roles
await updatePreferences({
search_space_id: searchSpaceId,
data: newPreferences,
data: {
agent_llm_id: newConfig.id,
document_summary_llm_id: newConfig.id,
},
});
await refreshPreferences();
setAutoConfigComplete(true);
toast.success("AI models configured automatically!", {
description: "You can customize these in advanced settings.",
toast.success("Configuration created!", {
description: "Redirecting to chat...",
});
// Redirect to new-chat
router.push(`/dashboard/${searchSpaceId}/new-chat`);
} catch (error) {
console.error("Auto-configuration failed:", error);
} finally {
setIsAutoConfiguring(false);
console.error("Failed to create config:", error);
if (error instanceof Error) {
toast.error(error.message || "Failed to create configuration");
}
}
}, [globalConfigs, llmConfigs, isOnboardingComplete, updatePreferences, refreshPreferences]);
};
// Trigger auto-configuration once data is loaded
useEffect(() => {
if (!configsLoading && !globalConfigsLoading && !preferencesLoading) {
autoConfigureLLMs();
}
}, [configsLoading, globalConfigsLoading, preferencesLoading, autoConfigureLLMs]);
const allConfigs = [...globalConfigs, ...llmConfigs];
const isReady = autoConfigComplete || isOnboardingComplete;
const isSubmitting = isCreating || isUpdatingPreferences;
// Loading state
if (configsLoading || preferencesLoading || globalConfigsLoading || isAutoConfiguring) {
if (globalConfigsLoading || preferencesLoading || isAutoConfiguring) {
return (
<OnboardLoading
title={isAutoConfiguring ? "Setting up your AI assistant..." : t("loading_config")}
subtitle={
isAutoConfiguring
? "Auto-configuring optimal settings for you"
: "Please wait while we load your configuration"
}
/>
);
}
// Show LLM setup if no configs available OR if roles are not assigned yet
// This forces users to complete role assignment before seeing the final screen
if (allConfigs.length === 0 || !isOnboardingComplete) {
return (
<OnboardLLMSetup
searchSpaceId={searchSpaceId}
title={t("welcome_title")}
configTitle={
allConfigs.length === 0 ? t("setup_llm_configuration") : t("assign_llm_roles_title")
}
configDescription={
allConfigs.length === 0
? t("configure_providers_and_assign_roles")
: t("complete_role_assignment")
}
onConfigCreated={() => refreshConfigs()}
onConfigDeleted={() => refreshConfigs()}
onPreferencesUpdated={handleRefreshPreferences}
/>
);
}
// Main onboarding view
return (
<div className="min-h-screen bg-background">
<div className="flex items-center justify-center min-h-screen p-4 md:p-8">
<div className="min-h-screen bg-gradient-to-b from-background to-muted/20 flex items-center justify-center">
<motion.div
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
transition={{ duration: 0.6 }}
className="w-full max-w-5xl"
initial={{ opacity: 0, scale: 0.95 }}
animate={{ opacity: 1, scale: 1 }}
className="text-center space-y-6"
>
<div className="relative">
<div className="absolute inset-0 blur-3xl bg-gradient-to-r from-violet-500/20 to-cyan-500/20 rounded-full" />
<div className="relative flex items-center justify-center w-24 h-24 mx-auto rounded-2xl bg-gradient-to-br from-violet-500 to-purple-600 shadow-2xl shadow-violet-500/25">
<Loader2 className="h-12 w-12 text-white animate-spin" />
</div>
</div>
<div className="space-y-2">
<h2 className="text-2xl font-bold tracking-tight">
{isAutoConfiguring ? "Setting up your AI..." : "Loading..."}
</h2>
<p className="text-muted-foreground">
{isAutoConfiguring
? "Auto-configuring with available settings"
: "Please wait while we check your configuration"}
</p>
</div>
<div className="flex justify-center gap-1">
{[0, 1, 2].map((i) => (
<motion.div
key={i}
className="w-2 h-2 rounded-full bg-violet-500"
animate={{ scale: [1, 1.5, 1], opacity: [0.5, 1, 0.5] }}
transition={{ duration: 1, repeat: Infinity, delay: i * 0.2 }}
/>
))}
</div>
</motion.div>
</div>
);
}
// If global configs exist but auto-config failed, show simple message
if (globalConfigs.length > 0 && !isAutoConfiguring) {
return null; // Will redirect via useEffect
}
// No global configs - show the config form
return (
<div className="min-h-screen bg-gradient-to-b from-background via-background to-muted/30">
<div className="container mx-auto px-4 py-8 md:py-12 max-w-3xl">
<motion.div
initial={{ opacity: 0, y: 20 }}
animate={{ opacity: 1, y: 0 }}
transition={{ duration: 0.5 }}
className="space-y-8"
>
{/* Header */}
<OnboardHeader
title={t("welcome_title")}
subtitle={
isReady ? "You're all set! Choose what you'd like to do next." : t("welcome_subtitle")
}
isReady={isReady}
/>
<div className="text-center space-y-4">
<motion.div
initial={{ scale: 0 }}
animate={{ scale: 1 }}
transition={{ type: "spring", delay: 0.2 }}
className="relative inline-block"
>
<Logo className="w-20 h-20 mx-auto rounded-full" />
</motion.div>
{/* Quick Stats */}
<OnboardStats
globalConfigsCount={globalConfigs.length}
userConfigsCount={llmConfigs.length}
/>
<div className="space-y-2">
<h1 className="text-3xl font-bold tracking-tight">Configure Your AI</h1>
<p className="text-muted-foreground text-lg">
Add your LLM provider to get started with SurfSense
</p>
</div>
</div>
{/* Action Cards */}
{/* Config Form */}
<motion.div
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
transition={{ delay: 0.6 }}
className="grid grid-cols-1 md:grid-cols-3 gap-6 mb-10"
initial={{ opacity: 0, y: 20 }}
animate={{ opacity: 1, y: 0 }}
transition={{ delay: 0.3 }}
>
<OnboardActionCard
title="Start Chatting"
description="Jump right into the AI researcher and start asking questions"
icon={MessageSquare}
features={[
"AI-powered conversations",
"Research and explore topics",
"Get instant insights",
]}
buttonText="Start Chatting"
onClick={() => router.push(`/dashboard/${searchSpaceId}/researcher`)}
colorScheme="violet"
delay={0.9}
/>
<OnboardActionCard
title="Add Sources"
description="Connect your data sources to start building your knowledge base"
icon={FileText}
features={[
"Connect documents and files",
"Import from various sources",
"Build your knowledge base",
]}
buttonText="Add Sources"
onClick={() => router.push(`/dashboard/${searchSpaceId}/sources/add`)}
colorScheme="blue"
delay={0.8}
/>
<OnboardActionCard
title="Manage Team"
description="Invite team members and collaborate on your search space"
icon={Users}
features={[
"Invite team members",
"Assign roles & permissions",
"Collaborate together",
]}
buttonText="Manage Team"
onClick={() => router.push(`/dashboard/${searchSpaceId}/team`)}
colorScheme="emerald"
delay={0.7}
/>
<Card className="border-2 border-muted shadow-xl overflow-hidden">
<CardHeader className="pb-4">
<CardTitle className="text-xl">LLM Configuration</CardTitle>
</CardHeader>
<CardContent>
<LLMConfigForm
searchSpaceId={searchSpaceId}
onSubmit={handleSubmit}
isSubmitting={isSubmitting}
mode="create"
showAdvanced={true}
submitLabel="Start Using SurfSense"
initialData={{
citations_enabled: true,
use_default_system_instructions: true,
}}
/>
</CardContent>
</Card>
</motion.div>
{/* Advanced Settings */}
<OnboardAdvancedSettings
searchSpaceId={searchSpaceId}
showLLMSettings={showAdvancedSettings}
setShowLLMSettings={setShowAdvancedSettings}
showPromptSettings={showPromptSettings}
setShowPromptSettings={setShowPromptSettings}
onConfigCreated={() => refreshConfigs()}
onConfigDeleted={() => refreshConfigs()}
onPreferencesUpdated={handleRefreshPreferences}
/>
{/* Footer */}
<motion.div
{/* Footer note */}
<motion.p
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
transition={{ delay: 1.1 }}
className="text-center mt-10 text-muted-foreground text-sm"
transition={{ delay: 0.5 }}
className="text-center text-sm text-muted-foreground"
>
<p>
You can always adjust these settings later in{" "}
<button
type="button"
onClick={() => router.push(`/dashboard/${searchSpaceId}/settings`)}
className="text-primary hover:underline underline-offset-2 transition-colors"
>
Settings
</button>
</p>
</motion.div>
You can add more configurations and customize settings anytime in{" "}
<button
type="button"
onClick={() => router.push(`/dashboard/${searchSpaceId}/settings`)}
className="text-violet-500 hover:underline"
>
Settings
</button>
</motion.p>
</motion.div>
</div>
</div>
);
};
export default OnboardPage;
}

View file

@ -30,20 +30,20 @@ interface SettingsNavItem {
const settingsNavItems: SettingsNavItem[] = [
{
id: "models",
label: "Model Configs",
description: "Configure AI models and providers",
label: "Agent Configs",
description: "LLM models with prompts & citations",
icon: Bot,
},
{
id: "roles",
label: "LLM Roles",
description: "Manage language model roles",
label: "Role Assignments",
description: "Assign configs to agent roles",
icon: Brain,
},
{
id: "prompts",
label: "System Instructions",
description: "Customize system prompts",
description: "SearchSpace-wide AI instructions",
icon: MessageSquare,
},
];
@ -236,9 +236,6 @@ function SettingsContent({
<h1 className="text-xl md:text-2xl font-bold tracking-tight truncate">
{activeItem?.label}
</h1>
<p className="text-sm text-muted-foreground mt-0.5 truncate">
{activeItem?.description}
</p>
</div>
</div>
</motion.div>
@ -275,7 +272,7 @@ export default function SettingsPage() {
const [isSidebarOpen, setIsSidebarOpen] = useState(false);
const handleBackToApp = useCallback(() => {
router.push(`/dashboard/${searchSpaceId}/researcher`);
router.push(`/dashboard/${searchSpaceId}/new-chat`);
}, [router, searchSpaceId]);
return (

View file

@ -807,7 +807,6 @@ function RolesTab({
<DropdownMenuItem
onClick={() => {
// TODO: Implement edit role dialog/modal
console.log("Edit role not yet implemented", role);
}}
>
<Edit2 className="h-4 w-4 mr-2" />

View file

@ -1,110 +0,0 @@
import { atomWithMutation } from "jotai-tanstack-query";
import { toast } from "sonner";
import type {
CreateLLMConfigRequest,
DeleteLLMConfigRequest,
GetLLMConfigsResponse,
UpdateLLMConfigRequest,
UpdateLLMConfigResponse,
UpdateLLMPreferencesRequest,
} from "@/contracts/types/llm-config.types";
import { llmConfigApiService } from "@/lib/apis/llm-config-api.service";
import { cacheKeys } from "@/lib/query-client/cache-keys";
import { queryClient } from "@/lib/query-client/client";
import { activeSearchSpaceIdAtom } from "../search-spaces/search-space-query.atoms";
export const createLLMConfigMutationAtom = atomWithMutation((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
mutationKey: cacheKeys.llmConfigs.all(searchSpaceId!),
enabled: !!searchSpaceId,
mutationFn: async (request: CreateLLMConfigRequest) => {
return llmConfigApiService.createLLMConfig(request);
},
onSuccess: () => {
toast.success("LLM configuration created successfully");
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.all(searchSpaceId!),
});
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.global(),
});
},
};
});
export const updateLLMConfigMutationAtom = atomWithMutation((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
mutationKey: cacheKeys.llmConfigs.all(searchSpaceId!),
enabled: !!searchSpaceId,
mutationFn: async (request: UpdateLLMConfigRequest) => {
return llmConfigApiService.updateLLMConfig(request);
},
onSuccess: (_: UpdateLLMConfigResponse, request: UpdateLLMConfigRequest) => {
toast.success("LLM configuration updated successfully");
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.all(searchSpaceId!),
});
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.byId(String(request.id)),
});
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.global(),
});
},
};
});
export const deleteLLMConfigMutationAtom = atomWithMutation((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
const authToken = localStorage.getItem("surfsense_bearer_token");
return {
mutationKey: cacheKeys.llmConfigs.all(searchSpaceId!),
enabled: !!searchSpaceId && !!authToken,
mutationFn: async (request: DeleteLLMConfigRequest) => {
return llmConfigApiService.deleteLLMConfig(request);
},
onSuccess: (_, request: DeleteLLMConfigRequest) => {
toast.success("LLM configuration deleted successfully");
queryClient.setQueryData(
cacheKeys.llmConfigs.all(searchSpaceId!),
(oldData: GetLLMConfigsResponse | undefined) => {
if (!oldData) return oldData;
return oldData.filter((config) => config.id !== request.id);
}
);
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.byId(String(request.id)),
});
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.global(),
});
},
};
});
export const updateLLMPreferencesMutationAtom = atomWithMutation((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
mutationKey: cacheKeys.llmConfigs.preferences(searchSpaceId!),
enabled: !!searchSpaceId,
mutationFn: async (request: UpdateLLMPreferencesRequest) => {
return llmConfigApiService.updateLLMPreferences(request);
},
onSuccess: () => {
toast.success("LLM preferences updated successfully");
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.preferences(searchSpaceId!),
});
},
};
});

View file

@ -1,46 +0,0 @@
import { atomWithQuery } from "jotai-tanstack-query";
import { llmConfigApiService } from "@/lib/apis/llm-config-api.service";
import { cacheKeys } from "@/lib/query-client/cache-keys";
import { activeSearchSpaceIdAtom } from "../search-spaces/search-space-query.atoms";
export const llmConfigsAtom = atomWithQuery((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
queryKey: cacheKeys.llmConfigs.all(searchSpaceId!),
enabled: !!searchSpaceId,
staleTime: 5 * 60 * 1000, // 5 minutes
queryFn: async () => {
return llmConfigApiService.getLLMConfigs({
queryParams: {
search_space_id: searchSpaceId!,
},
});
},
};
});
export const globalLLMConfigsAtom = atomWithQuery(() => {
return {
queryKey: cacheKeys.llmConfigs.global(),
staleTime: 10 * 60 * 1000, // 10 minutes
queryFn: async () => {
return llmConfigApiService.getGlobalLLMConfigs();
},
};
});
export const llmPreferencesAtom = atomWithQuery((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
queryKey: cacheKeys.llmConfigs.preferences(String(searchSpaceId)),
enabled: !!searchSpaceId,
staleTime: 5 * 60 * 1000, // 5 minutes
queryFn: async () => {
return llmConfigApiService.getLLMPreferences({
search_space_id: Number(searchSpaceId),
});
},
};
});

View file

@ -0,0 +1,116 @@
import { atomWithMutation } from "jotai-tanstack-query";
import { toast } from "sonner";
import type {
CreateNewLLMConfigRequest,
DeleteNewLLMConfigRequest,
GetNewLLMConfigsResponse,
UpdateLLMPreferencesRequest,
UpdateNewLLMConfigRequest,
UpdateNewLLMConfigResponse,
} from "@/contracts/types/new-llm-config.types";
import { newLLMConfigApiService } from "@/lib/apis/new-llm-config-api.service";
import { cacheKeys } from "@/lib/query-client/cache-keys";
import { queryClient } from "@/lib/query-client/client";
import { activeSearchSpaceIdAtom } from "../search-spaces/search-space-query.atoms";
/**
* Mutation atom for creating a new NewLLMConfig
*/
export const createNewLLMConfigMutationAtom = atomWithMutation((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
mutationKey: ["new-llm-configs", "create"],
enabled: !!searchSpaceId,
mutationFn: async (request: CreateNewLLMConfigRequest) => {
return newLLMConfigApiService.createConfig(request);
},
onSuccess: () => {
toast.success("Configuration created successfully");
queryClient.invalidateQueries({
queryKey: cacheKeys.newLLMConfigs.all(Number(searchSpaceId)),
});
},
onError: (error: Error) => {
toast.error(error.message || "Failed to create configuration");
},
};
});
/**
* Mutation atom for updating an existing NewLLMConfig
*/
export const updateNewLLMConfigMutationAtom = atomWithMutation((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
mutationKey: ["new-llm-configs", "update"],
enabled: !!searchSpaceId,
mutationFn: async (request: UpdateNewLLMConfigRequest) => {
return newLLMConfigApiService.updateConfig(request);
},
onSuccess: (_: UpdateNewLLMConfigResponse, request: UpdateNewLLMConfigRequest) => {
toast.success("Configuration updated successfully");
queryClient.invalidateQueries({
queryKey: cacheKeys.newLLMConfigs.all(Number(searchSpaceId)),
});
queryClient.invalidateQueries({
queryKey: cacheKeys.newLLMConfigs.byId(request.id),
});
},
onError: (error: Error) => {
toast.error(error.message || "Failed to update configuration");
},
};
});
/**
* Mutation atom for deleting a NewLLMConfig
*/
export const deleteNewLLMConfigMutationAtom = atomWithMutation((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
mutationKey: ["new-llm-configs", "delete"],
enabled: !!searchSpaceId,
mutationFn: async (request: DeleteNewLLMConfigRequest) => {
return newLLMConfigApiService.deleteConfig(request);
},
onSuccess: (_, request: DeleteNewLLMConfigRequest) => {
toast.success("Configuration deleted successfully");
queryClient.setQueryData(
cacheKeys.newLLMConfigs.all(Number(searchSpaceId)),
(oldData: GetNewLLMConfigsResponse | undefined) => {
if (!oldData) return oldData;
return oldData.filter((config) => config.id !== request.id);
}
);
},
onError: (error: Error) => {
toast.error(error.message || "Failed to delete configuration");
},
};
});
/**
* Mutation atom for updating LLM preferences (role assignments)
*/
export const updateLLMPreferencesMutationAtom = atomWithMutation((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
mutationKey: ["llm-preferences", "update"],
enabled: !!searchSpaceId,
mutationFn: async (request: UpdateLLMPreferencesRequest) => {
return newLLMConfigApiService.updateLLMPreferences(request);
},
onSuccess: () => {
queryClient.invalidateQueries({
queryKey: cacheKeys.newLLMConfigs.preferences(Number(searchSpaceId)),
});
},
onError: (error: Error) => {
toast.error(error.message || "Failed to update LLM preferences");
},
};
});

View file

@ -0,0 +1,64 @@
import { atomWithQuery } from "jotai-tanstack-query";
import { newLLMConfigApiService } from "@/lib/apis/new-llm-config-api.service";
import { cacheKeys } from "@/lib/query-client/cache-keys";
import { activeSearchSpaceIdAtom } from "../search-spaces/search-space-query.atoms";
/**
* Query atom for fetching all NewLLMConfigs for the active search space
*/
export const newLLMConfigsAtom = atomWithQuery((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
queryKey: cacheKeys.newLLMConfigs.all(Number(searchSpaceId)),
enabled: !!searchSpaceId,
staleTime: 5 * 60 * 1000, // 5 minutes
queryFn: async () => {
return newLLMConfigApiService.getConfigs({
search_space_id: Number(searchSpaceId),
});
},
};
});
/**
* Query atom for fetching global NewLLMConfigs (from YAML, negative IDs)
*/
export const globalNewLLMConfigsAtom = atomWithQuery(() => {
return {
queryKey: cacheKeys.newLLMConfigs.global(),
staleTime: 10 * 60 * 1000, // 10 minutes - global configs rarely change
queryFn: async () => {
return newLLMConfigApiService.getGlobalConfigs();
},
};
});
/**
* Query atom for fetching LLM preferences (role assignments) for the active search space
*/
export const llmPreferencesAtom = atomWithQuery((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
queryKey: cacheKeys.newLLMConfigs.preferences(Number(searchSpaceId)),
enabled: !!searchSpaceId,
staleTime: 5 * 60 * 1000, // 5 minutes
queryFn: async () => {
return newLLMConfigApiService.getLLMPreferences(Number(searchSpaceId));
},
};
});
/**
* Query atom for fetching default system instructions template
*/
export const defaultSystemInstructionsAtom = atomWithQuery(() => {
return {
queryKey: cacheKeys.newLLMConfigs.defaultInstructions(),
staleTime: 60 * 60 * 1000, // 1 hour - this rarely changes
queryFn: async () => {
return newLLMConfigApiService.getDefaultSystemInstructions();
},
};
});

View file

@ -25,13 +25,3 @@ export const searchSpacesAtom = atomWithQuery((get) => {
},
};
});
export const communityPromptsAtom = atomWithQuery(() => {
return {
queryKey: cacheKeys.searchSpaces.communityPrompts,
staleTime: 30 * 60 * 1000,
queryFn: async () => {
return searchSpacesApiService.getCommunityPrompts();
},
};
});

View file

@ -7,8 +7,11 @@ import {
MessagePrimitive,
ThreadPrimitive,
useAssistantState,
useThreadViewport,
} from "@assistant-ui/react";
import { useAtomValue } from "jotai";
import {
AlertCircle,
ArrowDownIcon,
ArrowUpIcon,
Brain,
@ -40,7 +43,14 @@ import { documentTypeCountsAtom } from "@/atoms/documents/document-query.atoms";
import { useSearchSourceConnectors } from "@/hooks/use-search-source-connectors";
import { getConnectorIcon } from "@/contracts/enums/connectorIcons";
import { getDocumentTypeLabel } from "@/app/dashboard/[search_space_id]/documents/(manage)/components/DocumentTypeIcon";
import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover";
import { documentTypeCountsAtom } from "@/atoms/documents/document-query.atoms";
import {
globalNewLLMConfigsAtom,
llmPreferencesAtom,
newLLMConfigsAtom,
} from "@/atoms/new-llm-config/new-llm-config-query.atoms";
import { activeSearchSpaceIdAtom } from "@/atoms/search-spaces/search-space-query.atoms";
import { currentUserAtom } from "@/atoms/user/user-query.atoms";
import {
ComposerAddAttachment,
ComposerAttachments,
@ -57,11 +67,13 @@ import {
ChainOfThoughtTrigger,
} from "@/components/prompt-kit/chain-of-thought";
import { DocumentsDataTable, type DocumentsDataTableRef } from "@/components/new-chat/DocumentsDataTable";
import type { ThinkingStep } from "@/components/tool-ui/deepagent-thinking";
import { Button } from "@/components/ui/button";
import type { Document } from "@/contracts/types/document.types";
import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover";
import { getConnectorIcon } from "@/contracts/enums/connectorIcons";
import { useSearchSourceConnectors } from "@/hooks/use-search-source-connectors";
import { cn } from "@/lib/utils";
import { currentUserAtom } from "@/atoms/user/user-query.atoms";
import type { ThinkingStep } from "@/components/tool-ui/deepagent-thinking";
/**
* Props for the Thread component
@ -78,35 +90,38 @@ const ThinkingStepsContext = createContext<Map<string, ThinkingStep[]>>(new Map(
*/
function getStepIcon(status: "pending" | "in_progress" | "completed", title: string) {
const titleLower = title.toLowerCase();
if (status === "in_progress") {
return <Loader2 className="size-4 animate-spin text-primary" />;
}
if (status === "completed") {
return <CheckCircle2 className="size-4 text-emerald-500" />;
}
if (titleLower.includes("search") || titleLower.includes("knowledge")) {
return <Search className="size-4 text-muted-foreground" />;
}
if (titleLower.includes("analy") || titleLower.includes("understand")) {
return <Brain className="size-4 text-muted-foreground" />;
}
return <Sparkles className="size-4 text-muted-foreground" />;
}
/**
* Chain of thought display component with smart expand/collapse behavior
*/
const ThinkingStepsDisplay: FC<{ steps: ThinkingStep[]; isThreadRunning?: boolean }> = ({ steps, isThreadRunning = true }) => {
const ThinkingStepsDisplay: FC<{ steps: ThinkingStep[]; isThreadRunning?: boolean }> = ({
steps,
isThreadRunning = true,
}) => {
// Track which steps the user has manually toggled (overrides auto behavior)
const [manualOverrides, setManualOverrides] = useState<Record<string, boolean>>({});
// Track previous step statuses to detect changes
const prevStatusesRef = useRef<Record<string, string>>({});
// Derive effective status: if thread stopped and step is in_progress, treat as completed
const getEffectiveStatus = (step: ThinkingStep): "pending" | "in_progress" | "completed" => {
if (step.status === "in_progress" && !isThreadRunning) {
@ -114,24 +129,24 @@ const ThinkingStepsDisplay: FC<{ steps: ThinkingStep[]; isThreadRunning?: boolea
}
return step.status;
};
// Check if any step is effectively in progress
const hasInProgressStep = steps.some(step => getEffectiveStatus(step) === "in_progress");
const hasInProgressStep = steps.some((step) => getEffectiveStatus(step) === "in_progress");
// Find the last completed step index (using effective status)
const lastCompletedIndex = steps
.map((s, i) => getEffectiveStatus(s) === "completed" ? i : -1)
.filter(i => i !== -1)
.map((s, i) => (getEffectiveStatus(s) === "completed" ? i : -1))
.filter((i) => i !== -1)
.pop();
// Clear manual overrides when a step's status changes
useEffect(() => {
const currentStatuses: Record<string, string> = {};
steps.forEach(step => {
steps.forEach((step) => {
currentStatuses[step.id] = step.status;
// If status changed, clear any manual override for this step
if (prevStatusesRef.current[step.id] && prevStatusesRef.current[step.id] !== step.status) {
setManualOverrides(prev => {
setManualOverrides((prev) => {
const next = { ...prev };
delete next[step.id];
return next;
@ -140,9 +155,9 @@ const ThinkingStepsDisplay: FC<{ steps: ThinkingStep[]; isThreadRunning?: boolea
});
prevStatusesRef.current = currentStatuses;
}, [steps]);
if (steps.length === 0) return null;
const getStepOpenState = (step: ThinkingStep, index: number): boolean => {
const effectiveStatus = getEffectiveStatus(step);
// If user has manually toggled, respect that
@ -160,14 +175,14 @@ const ThinkingStepsDisplay: FC<{ steps: ThinkingStep[]; isThreadRunning?: boolea
// Default: collapsed
return false;
};
const handleToggle = (stepId: string, currentOpen: boolean) => {
setManualOverrides(prev => ({
setManualOverrides((prev) => ({
...prev,
[stepId]: !currentOpen,
}));
};
return (
<div className="mx-auto w-full max-w-(--thread-max-width) px-2 py-2">
<ChainOfThought>
@ -176,8 +191,8 @@ const ThinkingStepsDisplay: FC<{ steps: ThinkingStep[]; isThreadRunning?: boolea
const icon = getStepIcon(effectiveStatus, step.title);
const isOpen = getStepOpenState(step, index);
return (
<ChainOfThoughtStep
key={step.id}
<ChainOfThoughtStep
key={step.id}
open={isOpen}
onOpenChange={() => handleToggle(step.id, isOpen)}
>
@ -194,9 +209,7 @@ const ThinkingStepsDisplay: FC<{ steps: ThinkingStep[]; isThreadRunning?: boolea
{step.items && step.items.length > 0 && (
<ChainOfThoughtContent>
{step.items.map((item, idx) => (
<ChainOfThoughtItem key={`${step.id}-item-${idx}`}>
{item}
</ChainOfThoughtItem>
<ChainOfThoughtItem key={`${step.id}-item-${idx}`}>{item}</ChainOfThoughtItem>
))}
</ChainOfThoughtContent>
)}
@ -208,6 +221,56 @@ const ThinkingStepsDisplay: FC<{ steps: ThinkingStep[]; isThreadRunning?: boolea
);
};
/**
* Component that handles auto-scroll when thinking steps update.
* Uses useThreadViewport to scroll to bottom when thinking steps change,
* ensuring the user always sees the latest content during streaming.
*/
const ThinkingStepsScrollHandler: FC = () => {
const thinkingStepsMap = useContext(ThinkingStepsContext);
const viewport = useThreadViewport();
const isRunning = useAssistantState(({ thread }) => thread.isRunning);
// Track the serialized state to detect any changes
const prevStateRef = useRef<string>("");
useEffect(() => {
// Only act during streaming
if (!isRunning) {
prevStateRef.current = "";
return;
}
// Serialize the thinking steps state to detect any changes
// This catches new steps, status changes, and item additions
let stateString = "";
thinkingStepsMap.forEach((steps, msgId) => {
steps.forEach((step) => {
stateString += `${msgId}:${step.id}:${step.status}:${step.items?.length || 0};`;
});
});
// If state changed at all during streaming, scroll
if (stateString !== prevStateRef.current && stateString !== "") {
prevStateRef.current = stateString;
// Multiple attempts to ensure scroll happens after DOM updates
const scrollAttempt = () => {
try {
viewport.scrollToBottom();
} catch (e) {
// Ignore errors - viewport might not be ready
}
};
// Delayed attempts to handle async DOM updates
requestAnimationFrame(scrollAttempt);
setTimeout(scrollAttempt, 100);
}
}, [thinkingStepsMap, viewport, isRunning]);
return null; // This component doesn't render anything
};
export const Thread: FC<ThreadProps> = ({ messageThinkingSteps = new Map() }) => {
return (
<ThinkingStepsContext.Provider value={messageThinkingSteps}>
@ -221,6 +284,9 @@ export const Thread: FC<ThreadProps> = ({ messageThinkingSteps = new Map() }) =>
turnAnchor="top"
className="aui-thread-viewport relative flex flex-1 flex-col overflow-x-auto overflow-y-scroll scroll-smooth px-4 pt-4"
>
{/* Auto-scroll handler for thinking steps - must be inside Viewport */}
<ThinkingStepsScrollHandler />
<AssistantIf condition={({ thread }) => thread.isEmpty}>
<ThreadWelcome />
</AssistantIf>
@ -263,49 +329,24 @@ const ThreadScrollToBottom: FC = () => {
const getTimeBasedGreeting = (userEmail?: string): string => {
const hour = new Date().getHours();
// Extract first name from email if available
const firstName = userEmail
? userEmail.split("@")[0].split(".")[0].charAt(0).toUpperCase() +
userEmail.split("@")[0].split(".")[0].slice(1)
? userEmail.split("@")[0].split(".")[0].charAt(0).toUpperCase() +
userEmail.split("@")[0].split(".")[0].slice(1)
: null;
// Array of greeting variations for each time period
const morningGreetings = [
"Good morning",
"Rise and shine",
"Morning",
"Hey there",
];
const afternoonGreetings = [
"Good afternoon",
"Afternoon",
"Hey there",
"Hi there",
];
const eveningGreetings = [
"Good evening",
"Evening",
"Hey there",
"Hi there",
];
const nightGreetings = [
"Good night",
"Evening",
"Hey there",
"Winding down",
];
const lateNightGreetings = [
"Still up",
"Night owl mode",
"The night is young",
"Hi there",
];
const morningGreetings = ["Good morning", "Rise and shine", "Morning", "Hey there"];
const afternoonGreetings = ["Good afternoon", "Afternoon", "Hey there", "Hi there"];
const eveningGreetings = ["Good evening", "Evening", "Hey there", "Hi there"];
const nightGreetings = ["Good night", "Evening", "Hey there", "Winding down"];
const lateNightGreetings = ["Still up", "Night owl mode", "The night is young", "Hi there"];
// Select a random greeting based on time
let greeting: string;
if (hour < 5) {
@ -321,12 +362,12 @@ const getTimeBasedGreeting = (userEmail?: string): string => {
// Night: 10 PM to midnight
greeting = nightGreetings[Math.floor(Math.random() * nightGreetings.length)];
}
// Add personalization with first name if available
if (firstName) {
return `${greeting}, ${firstName}!`;
}
return `${greeting}!`;
};
@ -335,14 +376,14 @@ const ThreadWelcome: FC = () => {
// Memoize greeting so it doesn't change on re-renders (only on user change)
const greeting = useMemo(() => getTimeBasedGreeting(user?.email), [user?.email]);
return (
<div className="aui-thread-welcome-root mx-auto flex w-full max-w-(--thread-max-width) grow flex-col items-center px-4 relative">
{/* Greeting positioned above the composer - fixed position */}
<div className="aui-thread-welcome-message absolute bottom-[calc(50%+5rem)] left-0 right-0 flex flex-col items-center text-center z-10">
<h1 className="aui-thread-welcome-message-inner fade-in slide-in-from-bottom-2 animate-in text-5xl delay-100 duration-500 ease-out fill-mode-both">
{greeting}
</h1>
<h1 className="aui-thread-welcome-message-inner fade-in slide-in-from-bottom-2 animate-in text-5xl delay-100 duration-500 ease-out fill-mode-both">
{greeting}
</h1>
</div>
{/* Composer - top edge fixed, expands downward only */}
<div className="fade-in slide-in-from-bottom-3 animate-in delay-200 duration-500 ease-out fill-mode-both w-full flex items-start justify-center absolute top-[calc(50%-3.5rem)] left-0 right-0">
@ -490,6 +531,23 @@ const Composer: FC = () => {
setMentionedDocuments((prev) => prev.filter((doc) => doc.id !== docId));
};
// Check if a model is configured - needed to disable input
const { data: userConfigs } = useAtomValue(newLLMConfigsAtom);
const { data: globalConfigs } = useAtomValue(globalNewLLMConfigsAtom);
const { data: preferences } = useAtomValue(llmPreferencesAtom);
const hasModelConfigured = useMemo(() => {
if (!preferences) return false;
const agentLlmId = preferences.agent_llm_id;
if (agentLlmId === null || agentLlmId === undefined) return false;
// Check if the configured model actually exists
if (agentLlmId < 0) {
return globalConfigs?.some((c) => c.id === agentLlmId) ?? false;
}
return userConfigs?.some((c) => c.id === agentLlmId) ?? false;
}, [preferences, globalConfigs, userConfigs]);
return (
<ComposerPrimitive.Root className="aui-composer-root relative flex w-full flex-col">
<ComposerPrimitive.AttachmentDropzone className="aui-composer-attachment-dropzone flex w-full flex-col rounded-2xl border-input bg-muted px-1 pt-2 outline-none transition-shadow data-[dragging=true]:border-ring data-[dragging=true]:border-dashed data-[dragging=true]:bg-accent/50">
@ -571,22 +629,26 @@ const Composer: FC = () => {
const ConnectorIndicator: FC = () => {
const searchSpaceId = useAtomValue(activeSearchSpaceIdAtom);
const { connectors, isLoading: connectorsLoading } = useSearchSourceConnectors(false, searchSpaceId ? Number(searchSpaceId) : undefined);
const { data: documentTypeCounts, isLoading: documentTypesLoading } = useAtomValue(documentTypeCountsAtom);
const { connectors, isLoading: connectorsLoading } = useSearchSourceConnectors(
false,
searchSpaceId ? Number(searchSpaceId) : undefined
);
const { data: documentTypeCounts, isLoading: documentTypesLoading } =
useAtomValue(documentTypeCountsAtom);
const [isOpen, setIsOpen] = useState(false);
const closeTimeoutRef = useRef<NodeJS.Timeout | null>(null);
const isLoading = connectorsLoading || documentTypesLoading;
// Get document types that have documents in the search space
const activeDocumentTypes = documentTypeCounts
const activeDocumentTypes = documentTypeCounts
? Object.entries(documentTypeCounts).filter(([_, count]) => count > 0)
: [];
const hasConnectors = connectors.length > 0;
const hasSources = hasConnectors || activeDocumentTypes.length > 0;
const totalSourceCount = connectors.length + activeDocumentTypes.length;
const handleMouseEnter = useCallback(() => {
// Clear any pending close timeout
if (closeTimeoutRef.current) {
@ -595,16 +657,16 @@ const ConnectorIndicator: FC = () => {
}
setIsOpen(true);
}, []);
const handleMouseLeave = useCallback(() => {
// Delay closing by 150ms for better UX
closeTimeoutRef.current = setTimeout(() => {
setIsOpen(false);
}, 150);
}, []);
if (!searchSpaceId) return null;
return (
<Popover open={isOpen} onOpenChange={setIsOpen}>
<PopoverTrigger asChild>
@ -618,7 +680,9 @@ const ConnectorIndicator: FC = () => {
"data-[state=open]:bg-transparent data-[state=open]:shadow-none data-[state=open]:ring-0",
"text-muted-foreground"
)}
aria-label={hasSources ? `View ${totalSourceCount} connected sources` : "Add your first connector"}
aria-label={
hasSources ? `View ${totalSourceCount} connected sources` : "Add your first connector"
}
onMouseEnter={handleMouseEnter}
onMouseLeave={handleMouseLeave}
>
@ -640,9 +704,9 @@ const ConnectorIndicator: FC = () => {
)}
</button>
</PopoverTrigger>
<PopoverContent
side="bottom"
align="start"
<PopoverContent
side="bottom"
align="start"
className="w-64 p-3"
onMouseEnter={handleMouseEnter}
onMouseLeave={handleMouseLeave}
@ -650,9 +714,7 @@ const ConnectorIndicator: FC = () => {
{hasSources ? (
<div className="space-y-3">
<div className="flex items-center justify-between">
<p className="text-xs font-medium text-muted-foreground">
Connected Sources
</p>
<p className="text-xs font-medium text-muted-foreground">Connected Sources</p>
<span className="text-xs font-medium bg-muted px-1.5 py-0.5 rounded">
{totalSourceCount}
</span>
@ -681,11 +743,11 @@ const ConnectorIndicator: FC = () => {
</div>
<div className="pt-1 border-t border-border/50">
<Link
href={`/dashboard/${searchSpaceId}/connectors`}
href={`/dashboard/${searchSpaceId}/connectors/add`}
className="inline-flex items-center gap-1.5 text-xs text-muted-foreground hover:text-foreground transition-colors"
>
<Plug2 className="size-3" />
Manage connectors
<Plus className="size-3" />
Add more sources
<ChevronRightIcon className="size-3" />
</Link>
</div>
@ -728,7 +790,24 @@ const ComposerAction: FC = () => {
return text.length === 0;
});
const isSendDisabled = hasProcessingAttachments || isComposerEmpty;
// Check if a model is configured
const { data: userConfigs } = useAtomValue(newLLMConfigsAtom);
const { data: globalConfigs } = useAtomValue(globalNewLLMConfigsAtom);
const { data: preferences } = useAtomValue(llmPreferencesAtom);
const hasModelConfigured = useMemo(() => {
if (!preferences) return false;
const agentLlmId = preferences.agent_llm_id;
if (agentLlmId === null || agentLlmId === undefined) return false;
// Check if the configured model actually exists
if (agentLlmId < 0) {
return globalConfigs?.some((c) => c.id === agentLlmId) ?? false;
}
return userConfigs?.some((c) => c.id === agentLlmId) ?? false;
}, [preferences, globalConfigs, userConfigs]);
const isSendDisabled = hasProcessingAttachments || isComposerEmpty || !hasModelConfigured;
return (
<div className="aui-composer-action-wrapper relative mx-2 mb-2 flex items-center justify-between">
@ -745,15 +824,25 @@ const ComposerAction: FC = () => {
</div>
)}
{/* Show warning when no model is configured */}
{!hasModelConfigured && !hasProcessingAttachments && (
<div className="flex items-center gap-1.5 text-amber-600 dark:text-amber-400 text-xs">
<AlertCircle className="size-3" />
<span>Select a model</span>
</div>
)}
<AssistantIf condition={({ thread }) => !thread.isRunning}>
<ComposerPrimitive.Send asChild disabled={isSendDisabled}>
<TooltipIconButton
tooltip={
hasProcessingAttachments
? "Wait for attachments to process"
: isComposerEmpty
? "Enter a message to send"
: "Send message"
!hasModelConfigured
? "Please select a model from the header to start chatting"
: hasProcessingAttachments
? "Wait for attachments to process"
: isComposerEmpty
? "Enter a message to send"
: "Send message"
}
side="bottom"
type="submit"
@ -798,25 +887,34 @@ const MessageError: FC = () => {
);
};
const AssistantMessageInner: FC = () => {
/**
* Custom component to render thinking steps from Context
*/
const ThinkingStepsPart: FC = () => {
const thinkingStepsMap = useContext(ThinkingStepsContext);
// Get the current message ID to look up thinking steps
const messageId = useAssistantState(({ message }) => message?.id);
const thinkingSteps = thinkingStepsMap.get(messageId) || [];
// Check if thread is still running (for stopping the spinner when cancelled)
const isThreadRunning = useAssistantState(({ thread }) => thread.isRunning);
if (thinkingSteps.length === 0) return null;
return (
<div className="mb-3">
<ThinkingStepsDisplay steps={thinkingSteps} isThreadRunning={isThreadRunning} />
</div>
);
};
const AssistantMessageInner: FC = () => {
return (
<>
{/* Show thinking steps BEFORE the text response */}
{thinkingSteps.length > 0 && (
<div className="mb-3">
<ThinkingStepsDisplay steps={thinkingSteps} isThreadRunning={isThreadRunning} />
</div>
)}
{/* Render thinking steps from message content - this ensures proper scroll tracking */}
<ThinkingStepsPart />
<div className="aui-assistant-message-content wrap-break-word px-2 text-foreground leading-relaxed">
<MessagePrimitive.Parts
components={{

View file

@ -4,9 +4,7 @@ import {
IconBrandLinkedin,
IconBrandTwitter,
} from "@tabler/icons-react";
import Image from "next/image";
import Link from "next/link";
import React from "react";
import { Logo } from "@/components/Logo";
export function FooterNew() {

View file

@ -1,97 +0,0 @@
"use client";
import {
IconBrandDiscord,
IconBrandGithub,
IconBrandLinkedin,
IconBrandTwitter,
} from "@tabler/icons-react";
import Link from "next/link";
import type React from "react";
import { cn } from "@/lib/utils";
export function Footer() {
const pages = [
{
title: "Privacy",
href: "/privacy",
},
{
title: "Terms",
href: "/terms",
},
];
return (
<div className="border-t border-neutral-100 dark:border-white/[0.1] px-8 py-20 w-full relative overflow-hidden">
<div className="max-w-7xl mx-auto text-sm text-neutral-500 justify-between items-start md:px-8">
<div className="flex flex-col items-center justify-center w-full relative">
<div className="mr-0 md:mr-4 md:flex mb-4">
<div className="flex items-center">
<span className="font-medium text-black dark:text-white ml-2">SurfSense</span>
</div>
</div>
<ul className="transition-colors flex sm:flex-row flex-col hover:text-text-neutral-800 text-neutral-600 dark:text-neutral-300 list-none gap-4">
{pages.map((page) => (
<li key={`pages-${page.title}`} className="list-none">
<Link className="transition-colors hover:text-text-neutral-800" href={page.href}>
{page.title}
</Link>
</li>
))}
</ul>
<GridLineHorizontal className="max-w-7xl mx-auto mt-8" />
</div>
<div className="flex sm:flex-row flex-col justify-between mt-8 items-center w-full">
<p className="text-neutral-500 dark:text-neutral-400 mb-8 sm:mb-0">
&copy; SurfSense 2025
</p>
<div className="flex gap-4">
<Link href="https://x.com/mod_setter">
<IconBrandTwitter className="h-6 w-6 text-neutral-500 dark:text-neutral-300" />
</Link>
<Link href="https://www.linkedin.com/in/rohan-verma-sde/">
<IconBrandLinkedin className="h-6 w-6 text-neutral-500 dark:text-neutral-300" />
</Link>
<Link href="https://github.com/MODSetter">
<IconBrandGithub className="h-6 w-6 text-neutral-500 dark:text-neutral-300" />
</Link>
<Link href="https://discord.gg/ejRNvftDp9">
<IconBrandDiscord className="h-6 w-6 text-neutral-500 dark:text-neutral-300" />
</Link>
</div>
</div>
</div>
</div>
);
}
const GridLineHorizontal = ({ className, offset }: { className?: string; offset?: string }) => {
return (
<div
style={
{
"--background": "#ffffff",
"--color": "rgba(0, 0, 0, 0.2)",
"--height": "1px",
"--width": "5px",
"--fade-stop": "90%",
"--offset": offset || "200px", //-100px if you want to keep the line inside
"--color-dark": "rgba(255, 255, 255, 0.2)",
maskComposite: "exclude",
} as React.CSSProperties
}
className={cn(
"w-[calc(100%+var(--offset))] h-[var(--height)]",
"bg-[linear-gradient(to_right,var(--color),var(--color)_50%,transparent_0,transparent)]",
"[background-size:var(--width)_var(--height)]",
"[mask:linear-gradient(to_left,var(--background)_var(--fade-stop),transparent),_linear-gradient(to_right,var(--background)_var(--fade-stop),transparent),_linear-gradient(black,black)]",
"[mask-composite:exclude]",
"z-30",
"dark:bg-[linear-gradient(to_right,var(--color-dark),var(--color-dark)_50%,transparent_0,transparent)]",
className
)}
></div>
);
};

View file

@ -0,0 +1,66 @@
"use client";
import { useCallback, useState } from "react";
import type {
GlobalNewLLMConfig,
NewLLMConfigPublic,
} from "@/contracts/types/new-llm-config.types";
import { ModelConfigSidebar } from "./model-config-sidebar";
import { ModelSelector } from "./model-selector";
interface ChatHeaderProps {
searchSpaceId: number;
}
export function ChatHeader({ searchSpaceId }: ChatHeaderProps) {
const [sidebarOpen, setSidebarOpen] = useState(false);
const [selectedConfig, setSelectedConfig] = useState<
NewLLMConfigPublic | GlobalNewLLMConfig | null
>(null);
const [isGlobal, setIsGlobal] = useState(false);
const [sidebarMode, setSidebarMode] = useState<"create" | "edit" | "view">("view");
const handleEditConfig = useCallback(
(config: NewLLMConfigPublic | GlobalNewLLMConfig, global: boolean) => {
setSelectedConfig(config);
setIsGlobal(global);
setSidebarMode(global ? "view" : "edit");
setSidebarOpen(true);
},
[]
);
const handleAddNew = useCallback(() => {
setSelectedConfig(null);
setIsGlobal(false);
setSidebarMode("create");
setSidebarOpen(true);
}, []);
const handleSidebarClose = useCallback((open: boolean) => {
setSidebarOpen(open);
if (!open) {
// Reset state when closing
setSelectedConfig(null);
}
}, []);
return (
<>
{/* Header Bar */}
<div className="flex items-center justify-between px-4 py-2 border-b border-border/30 bg-background/80 backdrop-blur-sm">
<ModelSelector onEdit={handleEditConfig} onAddNew={handleAddNew} />
</div>
{/* Config Sidebar */}
<ModelConfigSidebar
open={sidebarOpen}
onOpenChange={handleSidebarClose}
config={selectedConfig}
isGlobal={isGlobal}
searchSpaceId={searchSpaceId}
mode={sidebarMode}
/>
</>
);
}

View file

@ -0,0 +1,369 @@
"use client";
import { useAtomValue } from "jotai";
import { AlertCircle, Bot, ChevronRight, Globe, User, X } from "lucide-react";
import { AnimatePresence, motion } from "motion/react";
import { useCallback, useEffect, useState } from "react";
import { toast } from "sonner";
import {
createNewLLMConfigMutationAtom,
updateLLMPreferencesMutationAtom,
updateNewLLMConfigMutationAtom,
} from "@/atoms/new-llm-config/new-llm-config-mutation.atoms";
import { LLMConfigForm, type LLMConfigFormData } from "@/components/shared/llm-config-form";
import { Alert, AlertDescription } from "@/components/ui/alert";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
import type {
GlobalNewLLMConfig,
NewLLMConfigPublic,
} from "@/contracts/types/new-llm-config.types";
import { cn } from "@/lib/utils";
interface ModelConfigSidebarProps {
open: boolean;
onOpenChange: (open: boolean) => void;
config: NewLLMConfigPublic | GlobalNewLLMConfig | null;
isGlobal: boolean;
searchSpaceId: number;
mode: "create" | "edit" | "view";
}
export function ModelConfigSidebar({
open,
onOpenChange,
config,
isGlobal,
searchSpaceId,
mode,
}: ModelConfigSidebarProps) {
const [isSubmitting, setIsSubmitting] = useState(false);
// Mutations - use mutateAsync from the atom value
const { mutateAsync: createConfig } = useAtomValue(createNewLLMConfigMutationAtom);
const { mutateAsync: updateConfig } = useAtomValue(updateNewLLMConfigMutationAtom);
const { mutateAsync: updatePreferences } = useAtomValue(updateLLMPreferencesMutationAtom);
// Handle escape key
useEffect(() => {
const handleEscape = (e: KeyboardEvent) => {
if (e.key === "Escape" && open) {
onOpenChange(false);
}
};
window.addEventListener("keydown", handleEscape);
return () => window.removeEventListener("keydown", handleEscape);
}, [open, onOpenChange]);
// Get title based on mode
const getTitle = () => {
if (mode === "create") return "Add New Configuration";
if (isGlobal) return "View Global Configuration";
return "Edit Configuration";
};
// Handle form submit
const handleSubmit = useCallback(
async (data: LLMConfigFormData) => {
setIsSubmitting(true);
try {
if (mode === "create") {
// Create new config
const result = await createConfig({
...data,
search_space_id: searchSpaceId,
});
// Assign the new config to the agent role
if (result?.id) {
await updatePreferences({
search_space_id: searchSpaceId,
data: {
agent_llm_id: result.id,
},
});
}
toast.success("Configuration created and assigned!");
onOpenChange(false);
} else if (!isGlobal && config) {
// Update existing user config
await updateConfig({
id: config.id,
data: {
name: data.name,
description: data.description,
provider: data.provider,
custom_provider: data.custom_provider,
model_name: data.model_name,
api_key: data.api_key,
api_base: data.api_base,
litellm_params: data.litellm_params,
system_instructions: data.system_instructions,
use_default_system_instructions: data.use_default_system_instructions,
citations_enabled: data.citations_enabled,
},
});
toast.success("Configuration updated!");
onOpenChange(false);
}
} catch (error) {
console.error("Failed to save configuration:", error);
toast.error("Failed to save configuration");
} finally {
setIsSubmitting(false);
}
},
[
mode,
isGlobal,
config,
searchSpaceId,
createConfig,
updateConfig,
updatePreferences,
onOpenChange,
]
);
// Handle "Use this model" for global configs
const handleUseGlobalConfig = useCallback(async () => {
if (!config || !isGlobal) return;
setIsSubmitting(true);
try {
await updatePreferences({
search_space_id: searchSpaceId,
data: {
agent_llm_id: config.id,
},
});
toast.success(`Now using ${config.name}`);
onOpenChange(false);
} catch (error) {
console.error("Failed to set model:", error);
toast.error("Failed to set model");
} finally {
setIsSubmitting(false);
}
}, [config, isGlobal, searchSpaceId, updatePreferences, onOpenChange]);
return (
<AnimatePresence>
{open && (
<>
{/* Backdrop */}
<motion.div
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
exit={{ opacity: 0 }}
transition={{ duration: 0.2 }}
className="fixed inset-0 z-40 bg-black/20 backdrop-blur-sm"
onClick={() => onOpenChange(false)}
/>
{/* Sidebar Panel */}
<motion.div
initial={{ x: "100%", opacity: 0 }}
animate={{ x: 0, opacity: 1 }}
exit={{ x: "100%", opacity: 0 }}
transition={{
type: "spring",
damping: 30,
stiffness: 300,
}}
className={cn(
"fixed right-0 top-0 z-50 h-full w-full sm:w-[480px] lg:w-[540px]",
"bg-background border-l border-border/50 shadow-2xl",
"flex flex-col"
)}
>
{/* Header */}
<div className="flex items-center justify-between px-6 py-4 border-b border-border/50 bg-muted/20">
<div className="flex items-center gap-3">
<div className="flex items-center justify-center size-10 rounded-xl bg-primary/10">
<Bot className="size-5 text-primary" />
</div>
<div>
<h2 className="text-lg font-semibold">{getTitle()}</h2>
<div className="flex items-center gap-2 mt-0.5">
{isGlobal ? (
<Badge variant="secondary" className="gap-1 text-xs">
<Globe className="size-3" />
Global
</Badge>
) : mode !== "create" ? (
<Badge variant="outline" className="gap-1 text-xs">
<User className="size-3" />
Custom
</Badge>
) : null}
{config && (
<span className="text-xs text-muted-foreground">{config.model_name}</span>
)}
</div>
</div>
</div>
<Button
variant="ghost"
size="icon"
onClick={() => onOpenChange(false)}
className="rounded-xl hover:bg-destructive/10 hover:text-destructive"
>
<X className="size-5" />
</Button>
</div>
{/* Content - use overflow-y-auto instead of ScrollArea for better compatibility */}
<div className="flex-1 overflow-y-auto">
<div className="p-6">
{/* Global config notice */}
{isGlobal && mode !== "create" && (
<Alert className="mb-6 border-amber-500/30 bg-amber-500/5">
<AlertCircle className="size-4 text-amber-500" />
<AlertDescription className="text-sm text-amber-700 dark:text-amber-400">
Global configurations are read-only. To customize settings, create a new
configuration based on this template.
</AlertDescription>
</Alert>
)}
{/* Form */}
{mode === "create" ? (
<LLMConfigForm
searchSpaceId={searchSpaceId}
onSubmit={handleSubmit}
onCancel={() => onOpenChange(false)}
isSubmitting={isSubmitting}
mode="create"
submitLabel="Create & Use"
/>
) : isGlobal && config ? (
// Read-only view for global configs
<div className="space-y-6">
{/* Config Details */}
<div className="space-y-4">
<div className="grid gap-4 sm:grid-cols-2">
<div className="space-y-1.5">
<label className="text-xs font-medium text-muted-foreground uppercase tracking-wider">
Configuration Name
</label>
<p className="text-sm font-medium">{config.name}</p>
</div>
{config.description && (
<div className="space-y-1.5">
<label className="text-xs font-medium text-muted-foreground uppercase tracking-wider">
Description
</label>
<p className="text-sm text-muted-foreground">{config.description}</p>
</div>
)}
</div>
<div className="h-px bg-border/50" />
<div className="grid gap-4 sm:grid-cols-2">
<div className="space-y-1.5">
<label className="text-xs font-medium text-muted-foreground uppercase tracking-wider">
Provider
</label>
<p className="text-sm font-medium">{config.provider}</p>
</div>
<div className="space-y-1.5">
<label className="text-xs font-medium text-muted-foreground uppercase tracking-wider">
Model
</label>
<p className="text-sm font-medium font-mono">{config.model_name}</p>
</div>
</div>
<div className="h-px bg-border/50" />
<div className="grid gap-4 sm:grid-cols-2">
<div className="space-y-2">
<label className="text-xs font-medium text-muted-foreground uppercase tracking-wider">
Citations
</label>
<Badge
variant={config.citations_enabled ? "default" : "secondary"}
className="w-fit"
>
{config.citations_enabled ? "Enabled" : "Disabled"}
</Badge>
</div>
</div>
{config.system_instructions && (
<>
<div className="h-px bg-border/50" />
<div className="space-y-1.5">
<label className="text-xs font-medium text-muted-foreground uppercase tracking-wider">
System Instructions
</label>
<div className="p-3 rounded-lg bg-muted/50 border border-border/50">
<p className="text-xs font-mono text-muted-foreground whitespace-pre-wrap line-clamp-10">
{config.system_instructions}
</p>
</div>
</div>
</>
)}
</div>
{/* Action Buttons */}
<div className="flex gap-3 pt-4 border-t border-border/50">
<Button
variant="outline"
className="flex-1"
onClick={() => onOpenChange(false)}
>
Close
</Button>
<Button
className="flex-1 gap-2"
onClick={handleUseGlobalConfig}
disabled={isSubmitting}
>
{isSubmitting ? (
<>Loading...</>
) : (
<>
<ChevronRight className="size-4" />
Use This Model
</>
)}
</Button>
</div>
</div>
) : config ? (
// Edit form for user configs
<LLMConfigForm
searchSpaceId={searchSpaceId}
initialData={{
name: config.name,
description: config.description,
provider: config.provider,
custom_provider: config.custom_provider,
model_name: config.model_name,
api_key: config.api_key,
api_base: config.api_base,
litellm_params: config.litellm_params,
system_instructions: config.system_instructions,
use_default_system_instructions: config.use_default_system_instructions,
citations_enabled: config.citations_enabled,
search_space_id: searchSpaceId,
}}
onSubmit={handleSubmit}
onCancel={() => onOpenChange(false)}
isSubmitting={isSubmitting}
mode="edit"
submitLabel="Save Changes"
/>
) : null}
</div>
</div>
</motion.div>
</>
)}
</AnimatePresence>
);
}

View file

@ -0,0 +1,384 @@
"use client";
import { useAtomValue } from "jotai";
import {
Bot,
Check,
ChevronDown,
Cloud,
Edit3,
Globe,
Loader2,
Plus,
Settings2,
Sparkles,
User,
Zap,
} from "lucide-react";
import { useCallback, useMemo, useState } from "react";
import { toast } from "sonner";
import { updateLLMPreferencesMutationAtom } from "@/atoms/new-llm-config/new-llm-config-mutation.atoms";
import {
globalNewLLMConfigsAtom,
llmPreferencesAtom,
newLLMConfigsAtom,
} from "@/atoms/new-llm-config/new-llm-config-query.atoms";
import { activeSearchSpaceIdAtom } from "@/atoms/search-spaces/search-space-query.atoms";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
import {
Command,
CommandEmpty,
CommandGroup,
CommandInput,
CommandItem,
CommandList,
CommandSeparator,
} from "@/components/ui/command";
import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover";
import type {
GlobalNewLLMConfig,
NewLLMConfigPublic,
} from "@/contracts/types/new-llm-config.types";
import { cn } from "@/lib/utils";
// Provider icons mapping
const getProviderIcon = (provider: string) => {
const iconClass = "size-4";
switch (provider?.toUpperCase()) {
case "OPENAI":
return <Sparkles className={cn(iconClass, "text-emerald-500")} />;
case "ANTHROPIC":
return <Bot className={cn(iconClass, "text-amber-600")} />;
case "GOOGLE":
return <Cloud className={cn(iconClass, "text-blue-500")} />;
case "GROQ":
return <Zap className={cn(iconClass, "text-orange-500")} />;
case "OLLAMA":
return <Settings2 className={cn(iconClass, "text-gray-500")} />;
case "XAI":
return <Bot className={cn(iconClass, "text-violet-500")} />;
default:
return <Bot className={cn(iconClass, "text-muted-foreground")} />;
}
};
interface ModelSelectorProps {
onEdit: (config: NewLLMConfigPublic | GlobalNewLLMConfig, isGlobal: boolean) => void;
onAddNew: () => void;
className?: string;
}
export function ModelSelector({ onEdit, onAddNew, className }: ModelSelectorProps) {
const [open, setOpen] = useState(false);
const [searchQuery, setSearchQuery] = useState("");
const [isSwitching, setIsSwitching] = useState(false);
// Fetch configs
const { data: userConfigs, isLoading: userConfigsLoading } = useAtomValue(newLLMConfigsAtom);
const { data: globalConfigs, isLoading: globalConfigsLoading } =
useAtomValue(globalNewLLMConfigsAtom);
const { data: preferences, isLoading: preferencesLoading } = useAtomValue(llmPreferencesAtom);
const searchSpaceId = useAtomValue(activeSearchSpaceIdAtom);
const { mutateAsync: updatePreferences } = useAtomValue(updateLLMPreferencesMutationAtom);
const isLoading = userConfigsLoading || globalConfigsLoading || preferencesLoading;
// Get current agent LLM config
const currentConfig = useMemo(() => {
if (!preferences) return null;
const agentLlmId = preferences.agent_llm_id;
if (agentLlmId === null || agentLlmId === undefined) return null;
// Check if it's a global config (negative ID)
if (agentLlmId < 0) {
return globalConfigs?.find((c) => c.id === agentLlmId) ?? null;
}
// Otherwise, check user configs
return userConfigs?.find((c) => c.id === agentLlmId) ?? null;
}, [preferences, globalConfigs, userConfigs]);
// Filter configs based on search
const filteredGlobalConfigs = useMemo(() => {
if (!globalConfigs) return [];
if (!searchQuery) return globalConfigs;
const query = searchQuery.toLowerCase();
return globalConfigs.filter(
(c) =>
c.name.toLowerCase().includes(query) ||
c.model_name.toLowerCase().includes(query) ||
c.provider.toLowerCase().includes(query)
);
}, [globalConfigs, searchQuery]);
const filteredUserConfigs = useMemo(() => {
if (!userConfigs) return [];
if (!searchQuery) return userConfigs;
const query = searchQuery.toLowerCase();
return userConfigs.filter(
(c) =>
c.name.toLowerCase().includes(query) ||
c.model_name.toLowerCase().includes(query) ||
c.provider.toLowerCase().includes(query)
);
}, [userConfigs, searchQuery]);
const handleSelectConfig = useCallback(
async (config: NewLLMConfigPublic | GlobalNewLLMConfig) => {
// If already selected, just close
if (currentConfig?.id === config.id) {
setOpen(false);
return;
}
if (!searchSpaceId) {
toast.error("No search space selected");
return;
}
setIsSwitching(true);
try {
await updatePreferences({
search_space_id: Number(searchSpaceId),
data: {
agent_llm_id: config.id,
},
});
toast.success(`Switched to ${config.name}`);
setOpen(false);
} catch (error) {
console.error("Failed to switch model:", error);
toast.error("Failed to switch model");
} finally {
setIsSwitching(false);
}
},
[currentConfig, searchSpaceId, updatePreferences]
);
const handleEditConfig = useCallback(
(e: React.MouseEvent, config: NewLLMConfigPublic | GlobalNewLLMConfig, isGlobal: boolean) => {
e.stopPropagation();
onEdit(config, isGlobal);
setOpen(false);
},
[onEdit]
);
return (
<Popover open={open} onOpenChange={setOpen}>
<PopoverTrigger asChild>
<Button
variant="ghost"
size="sm"
role="combobox"
aria-expanded={open}
className={cn(
"h-9 gap-2 px-3 rounded-xl border border-border/50 bg-background/50 backdrop-blur-sm",
"hover:bg-muted/80 hover:border-border transition-all duration-200",
"text-sm font-medium text-foreground",
className
)}
>
{isLoading ? (
<>
<Loader2 className="size-4 animate-spin text-muted-foreground" />
<span className="text-muted-foreground">Loading...</span>
</>
) : currentConfig ? (
<>
{getProviderIcon(currentConfig.provider)}
<span className="max-w-[150px] truncate">{currentConfig.name}</span>
<Badge variant="secondary" className="ml-1 text-[10px] px-1.5 py-0 h-4 bg-muted/80">
{currentConfig.model_name.split("/").pop()?.slice(0, 15) ||
currentConfig.model_name.slice(0, 15)}
</Badge>
</>
) : (
<>
<Bot className="size-4 text-muted-foreground" />
<span className="text-muted-foreground">Select Model</span>
</>
)}
<ChevronDown className="size-3.5 text-muted-foreground ml-1 shrink-0" />
</Button>
</PopoverTrigger>
<PopoverContent
className="w-[360px] p-0 rounded-xl shadow-lg border-border/50"
align="start"
sideOffset={8}
>
<Command shouldFilter={false} className="rounded-xl relative">
{/* Switching overlay */}
{isSwitching && (
<div className="absolute inset-0 z-10 flex items-center justify-center bg-background/80 backdrop-blur-sm rounded-xl">
<div className="flex items-center gap-2 text-sm text-muted-foreground">
<Loader2 className="size-4 animate-spin" />
<span>Switching model...</span>
</div>
</div>
)}
<div className="flex items-center gap-2 border-b px-3 py-2 bg-muted/30">
<Bot className="size-4 text-muted-foreground" />
<CommandInput
placeholder="Search models..."
value={searchQuery}
onValueChange={setSearchQuery}
className="h-8 border-0 bg-transparent focus:ring-0 placeholder:text-muted-foreground/60"
disabled={isSwitching}
/>
</div>
<CommandList className="max-h-[400px] overflow-y-auto">
<CommandEmpty className="py-8 text-center">
<div className="flex flex-col items-center gap-2">
<Bot className="size-8 text-muted-foreground/40" />
<p className="text-sm text-muted-foreground">No models found</p>
<p className="text-xs text-muted-foreground/60">Try a different search term</p>
</div>
</CommandEmpty>
{/* Global Configs Section */}
{filteredGlobalConfigs.length > 0 && (
<CommandGroup>
<div className="flex items-center gap-2 px-3 py-2 text-xs font-semibold text-muted-foreground uppercase tracking-wider">
<Globe className="size-3.5" />
Global Models
</div>
{filteredGlobalConfigs.map((config) => {
const isSelected = currentConfig?.id === config.id;
return (
<CommandItem
key={`global-${config.id}`}
value={`global-${config.id}`}
onSelect={() => handleSelectConfig(config)}
className={cn(
"mx-2 rounded-lg mb-1 cursor-pointer",
"aria-selected:bg-accent/50",
isSelected && "bg-accent/80"
)}
>
<div className="flex items-center justify-between w-full gap-2">
<div className="flex items-center gap-3 min-w-0 flex-1">
<div className="shrink-0">{getProviderIcon(config.provider)}</div>
<div className="min-w-0 flex-1">
<div className="flex items-center gap-2">
<span className="font-medium truncate">{config.name}</span>
{isSelected && <Check className="size-3.5 text-primary shrink-0" />}
</div>
<div className="flex items-center gap-1.5 mt-0.5">
<span className="text-xs text-muted-foreground truncate">
{config.model_name}
</span>
{config.citations_enabled && (
<Badge
variant="outline"
className="text-[9px] px-1 py-0 h-3.5 bg-primary/10 text-primary border-primary/20"
>
Citations
</Badge>
)}
</div>
</div>
</div>
<Button
variant="ghost"
size="icon"
className="size-7 shrink-0 rounded-md hover:bg-muted"
onClick={(e) => handleEditConfig(e, config, true)}
>
<Edit3 className="size-3.5 text-muted-foreground" />
</Button>
</div>
</CommandItem>
);
})}
</CommandGroup>
)}
{filteredGlobalConfigs.length > 0 && filteredUserConfigs.length > 0 && (
<CommandSeparator className="my-1" />
)}
{/* User Configs Section */}
{filteredUserConfigs.length > 0 && (
<CommandGroup>
<div className="flex items-center gap-2 px-3 py-2 text-xs font-semibold text-muted-foreground uppercase tracking-wider">
<User className="size-3.5" />
Your Configurations
</div>
{filteredUserConfigs.map((config) => {
const isSelected = currentConfig?.id === config.id;
return (
<CommandItem
key={`user-${config.id}`}
value={`user-${config.id}`}
onSelect={() => handleSelectConfig(config)}
className={cn(
"mx-2 rounded-lg mb-1 cursor-pointer",
"aria-selected:bg-accent/50",
isSelected && "bg-accent/80"
)}
>
<div className="flex items-center justify-between w-full gap-2">
<div className="flex items-center gap-3 min-w-0 flex-1">
<div className="shrink-0">{getProviderIcon(config.provider)}</div>
<div className="min-w-0 flex-1">
<div className="flex items-center gap-2">
<span className="font-medium truncate">{config.name}</span>
{isSelected && <Check className="size-3.5 text-primary shrink-0" />}
</div>
<div className="flex items-center gap-1.5 mt-0.5">
<span className="text-xs text-muted-foreground truncate">
{config.model_name}
</span>
{config.citations_enabled && (
<Badge
variant="outline"
className="text-[9px] px-1 py-0 h-3.5 bg-primary/10 text-primary border-primary/20"
>
Citations
</Badge>
)}
</div>
</div>
</div>
<Button
variant="ghost"
size="icon"
className="size-7 shrink-0 rounded-md hover:bg-muted"
onClick={(e) => handleEditConfig(e, config, false)}
>
<Edit3 className="size-3.5 text-muted-foreground" />
</Button>
</div>
</CommandItem>
);
})}
</CommandGroup>
)}
{/* Add New Config Button */}
<div className="p-2 border-t border-border/50 bg-muted/20">
<Button
variant="ghost"
size="sm"
className="w-full justify-start gap-2 h-9 rounded-lg hover:bg-accent/50"
onClick={() => {
setOpen(false);
onAddNew();
}}
>
<Plus className="size-4 text-primary" />
<span className="text-sm font-medium">Add New Configuration</span>
</Button>
</div>
</CommandList>
</Command>
</PopoverContent>
</Popover>
);
}

View file

@ -1,8 +0,0 @@
export { OnboardActionCard } from "./onboard-action-card";
export { OnboardAdvancedSettings } from "./onboard-advanced-settings";
export { OnboardHeader } from "./onboard-header";
export { OnboardLLMSetup } from "./onboard-llm-setup";
export { OnboardLoading } from "./onboard-loading";
export { OnboardStats } from "./onboard-stats";
export { SetupLLMStep } from "./setup-llm-step";
export { SetupPromptStep } from "./setup-prompt-step";

View file

@ -1,114 +0,0 @@
"use client";
import { ArrowRight, CheckCircle, type LucideIcon } from "lucide-react";
import { motion } from "motion/react";
import { Button } from "@/components/ui/button";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card";
import { cn } from "@/lib/utils";
interface OnboardActionCardProps {
title: string;
description: string;
icon: LucideIcon;
features: string[];
buttonText: string;
onClick: () => void;
colorScheme: "emerald" | "blue" | "violet";
delay?: number;
}
const colorSchemes = {
emerald: {
iconBg: "bg-emerald-500/10 dark:bg-emerald-500/20",
iconRing: "ring-emerald-500/20 dark:ring-emerald-500/30",
iconColor: "text-emerald-600 dark:text-emerald-400",
checkColor: "text-emerald-500",
buttonBg: "bg-emerald-600 hover:bg-emerald-500",
hoverBorder: "hover:border-emerald-500/50",
},
blue: {
iconBg: "bg-blue-500/10 dark:bg-blue-500/20",
iconRing: "ring-blue-500/20 dark:ring-blue-500/30",
iconColor: "text-blue-600 dark:text-blue-400",
checkColor: "text-blue-500",
buttonBg: "bg-blue-600 hover:bg-blue-500",
hoverBorder: "hover:border-blue-500/50",
},
violet: {
iconBg: "bg-violet-500/10 dark:bg-violet-500/20",
iconRing: "ring-violet-500/20 dark:ring-violet-500/30",
iconColor: "text-violet-600 dark:text-violet-400",
checkColor: "text-violet-500",
buttonBg: "bg-violet-600 hover:bg-violet-500",
hoverBorder: "hover:border-violet-500/50",
},
};
export function OnboardActionCard({
title,
description,
icon: Icon,
features,
buttonText,
onClick,
colorScheme,
delay = 0,
}: OnboardActionCardProps) {
const colors = colorSchemes[colorScheme];
return (
<motion.div
initial={{ opacity: 0, y: 30 }}
animate={{ opacity: 1, y: 0 }}
transition={{ delay, type: "spring", stiffness: 200 }}
whileHover={{ y: -6, transition: { duration: 0.2 } }}
>
<Card
className={cn(
"h-full cursor-pointer group relative overflow-hidden transition-all duration-300",
"border bg-card hover:shadow-lg",
colors.hoverBorder
)}
onClick={onClick}
>
<CardHeader className="relative pb-4">
<motion.div
className={cn(
"w-14 h-14 rounded-2xl flex items-center justify-center mb-4 ring-1 transition-all duration-300",
colors.iconBg,
colors.iconRing,
"group-hover:scale-110"
)}
whileHover={{ rotate: [0, -5, 5, 0] }}
transition={{ duration: 0.5 }}
>
<Icon className={cn("w-7 h-7", colors.iconColor)} />
</motion.div>
<CardTitle className="text-xl">{title}</CardTitle>
<CardDescription>{description}</CardDescription>
</CardHeader>
<CardContent className="relative space-y-4">
<div className="space-y-2.5 text-sm text-muted-foreground">
{features.map((feature, index) => (
<div key={index} className="flex items-center gap-2.5">
<CheckCircle className={cn("w-4 h-4", colors.checkColor)} />
<span>{feature}</span>
</div>
))}
</div>
<Button
className={cn(
"w-full text-white border-0 transition-all duration-300",
colors.buttonBg
)}
>
{buttonText}
<ArrowRight className="w-4 h-4 ml-2 group-hover:translate-x-1 transition-transform" />
</Button>
</CardContent>
</Card>
</motion.div>
);
}

View file

@ -1,144 +0,0 @@
"use client";
import { ChevronDown, MessageSquare, Settings2 } from "lucide-react";
import { AnimatePresence, motion } from "motion/react";
import { SetupLLMStep } from "@/components/onboard/setup-llm-step";
import { SetupPromptStep } from "@/components/onboard/setup-prompt-step";
import { Card, CardContent } from "@/components/ui/card";
import { Collapsible, CollapsibleContent, CollapsibleTrigger } from "@/components/ui/collapsible";
import { cn } from "@/lib/utils";
interface OnboardAdvancedSettingsProps {
searchSpaceId: number;
showLLMSettings: boolean;
setShowLLMSettings: (show: boolean) => void;
showPromptSettings: boolean;
setShowPromptSettings: (show: boolean) => void;
onConfigCreated: () => void;
onConfigDeleted: () => void;
onPreferencesUpdated: () => Promise<void>;
}
export function OnboardAdvancedSettings({
searchSpaceId,
showLLMSettings,
setShowLLMSettings,
showPromptSettings,
setShowPromptSettings,
onConfigCreated,
onConfigDeleted,
onPreferencesUpdated,
}: OnboardAdvancedSettingsProps) {
return (
<motion.div
initial={{ opacity: 0, y: 20 }}
animate={{ opacity: 1, y: 0 }}
transition={{ delay: 1 }}
className="space-y-4"
>
{/* LLM Configuration */}
<Collapsible open={showLLMSettings} onOpenChange={setShowLLMSettings}>
<CollapsibleTrigger asChild>
<Card className="hover:bg-muted/50 transition-colors cursor-pointer">
<CardContent className="py-4">
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<div className="p-2 rounded-xl bg-fuchsia-500/10 dark:bg-fuchsia-500/20 border border-fuchsia-500/20">
<Settings2 className="w-5 h-5 text-fuchsia-600 dark:text-fuchsia-400" />
</div>
<div>
<h3 className="font-semibold">LLM Configuration</h3>
<p className="text-sm text-muted-foreground">
Customize AI models and role assignments
</p>
</div>
</div>
<motion.div
animate={{ rotate: showLLMSettings ? 180 : 0 }}
transition={{ duration: 0.2 }}
>
<ChevronDown className="w-5 h-5 text-muted-foreground" />
</motion.div>
</div>
</CardContent>
</Card>
</CollapsibleTrigger>
<CollapsibleContent>
<AnimatePresence>
{showLLMSettings && (
<motion.div
initial={{ opacity: 0, height: 0 }}
animate={{ opacity: 1, height: "auto" }}
exit={{ opacity: 0, height: 0 }}
transition={{ duration: 0.3 }}
>
<Card className="mt-2">
<CardContent className="pt-6">
<SetupLLMStep
searchSpaceId={searchSpaceId}
onConfigCreated={onConfigCreated}
onConfigDeleted={onConfigDeleted}
onPreferencesUpdated={onPreferencesUpdated}
/>
</CardContent>
</Card>
</motion.div>
)}
</AnimatePresence>
</CollapsibleContent>
</Collapsible>
{/* Prompt Configuration */}
<Collapsible open={showPromptSettings} onOpenChange={setShowPromptSettings}>
<CollapsibleTrigger asChild>
<Card className="hover:bg-muted/50 transition-colors cursor-pointer">
<CardContent className="py-4">
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<div className="p-2 rounded-xl bg-cyan-500/10 dark:bg-cyan-500/20 border border-cyan-500/20">
<MessageSquare className="w-5 h-5 text-cyan-600 dark:text-cyan-400" />
</div>
<div>
<h3 className="font-semibold">AI Response Settings</h3>
<p className="text-sm text-muted-foreground">
Configure citations and custom instructions (Optional)
</p>
</div>
</div>
<motion.div
animate={{ rotate: showPromptSettings ? 180 : 0 }}
transition={{ duration: 0.2 }}
>
<ChevronDown className="w-5 h-5 text-muted-foreground" />
</motion.div>
</div>
</CardContent>
</Card>
</CollapsibleTrigger>
<CollapsibleContent>
<AnimatePresence>
{showPromptSettings && (
<motion.div
initial={{ opacity: 0, height: 0 }}
animate={{ opacity: 1, height: "auto" }}
exit={{ opacity: 0, height: 0 }}
transition={{ duration: 0.3 }}
>
<Card className="mt-2">
<CardContent className="pt-6">
<SetupPromptStep
searchSpaceId={searchSpaceId}
onComplete={() => setShowPromptSettings(false)}
/>
</CardContent>
</Card>
</motion.div>
)}
</AnimatePresence>
</CollapsibleContent>
</Collapsible>
</motion.div>
);
}

View file

@ -1,56 +0,0 @@
"use client";
import { CheckCircle } from "lucide-react";
import { motion } from "motion/react";
import { Logo } from "@/components/Logo";
import { Badge } from "@/components/ui/badge";
interface OnboardHeaderProps {
title: string;
subtitle: string;
isReady?: boolean;
}
export function OnboardHeader({ title, subtitle, isReady }: OnboardHeaderProps) {
return (
<motion.div
initial={{ opacity: 0, y: -20 }}
animate={{ opacity: 1, y: 0 }}
transition={{ duration: 0.5, delay: 0.1 }}
className="text-center mb-10"
>
<motion.div
initial={{ scale: 0 }}
animate={{ scale: 1 }}
transition={{ type: "spring", stiffness: 200, delay: 0.2 }}
className="inline-flex items-center justify-center mb-6"
>
<Logo className="w-20 h-20 rounded-2xl shadow-lg" />
</motion.div>
<motion.div
initial={{ opacity: 0, y: 10 }}
animate={{ opacity: 1, y: 0 }}
transition={{ delay: 0.3 }}
className="space-y-2"
>
<h1 className="text-4xl md:text-5xl font-bold text-foreground">{title}</h1>
<p className="text-muted-foreground text-lg md:text-xl max-w-2xl mx-auto">{subtitle}</p>
</motion.div>
{isReady && (
<motion.div
initial={{ opacity: 0, scale: 0.8 }}
animate={{ opacity: 1, scale: 1 }}
transition={{ delay: 0.4, type: "spring" }}
className="mt-4"
>
<Badge className="px-4 py-2 text-sm bg-emerald-500/10 border-emerald-500/30 text-emerald-600 dark:text-emerald-400">
<CheckCircle className="w-4 h-4 mr-2" />
AI Configuration Complete
</Badge>
</motion.div>
)}
</motion.div>
);
}

View file

@ -1,93 +0,0 @@
"use client";
import { Bot } from "lucide-react";
import { motion } from "motion/react";
import { Logo } from "@/components/Logo";
import { SetupLLMStep } from "@/components/onboard/setup-llm-step";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card";
interface OnboardLLMSetupProps {
searchSpaceId: number;
title: string;
configTitle: string;
configDescription: string;
onConfigCreated: () => void;
onConfigDeleted: () => void;
onPreferencesUpdated: () => Promise<void>;
}
export function OnboardLLMSetup({
searchSpaceId,
title,
configTitle,
configDescription,
onConfigCreated,
onConfigDeleted,
onPreferencesUpdated,
}: OnboardLLMSetupProps) {
return (
<div className="min-h-screen bg-background flex items-center justify-center p-4">
<motion.div
initial={{ opacity: 0, y: 20 }}
animate={{ opacity: 1, y: 0 }}
transition={{ duration: 0.5 }}
className="w-full max-w-4xl"
>
{/* Header */}
<div className="text-center mb-8">
<motion.div
initial={{ scale: 0 }}
animate={{ scale: 1 }}
transition={{ type: "spring", stiffness: 200, delay: 0.1 }}
className="inline-flex items-center justify-center mb-6"
>
<Logo className="w-16 h-16 rounded-2xl shadow-lg" />
</motion.div>
<motion.h1
initial={{ opacity: 0, y: 10 }}
animate={{ opacity: 1, y: 0 }}
transition={{ delay: 0.2 }}
className="text-4xl font-bold text-foreground mb-3"
>
{title}
</motion.h1>
<motion.p
initial={{ opacity: 0, y: 10 }}
animate={{ opacity: 1, y: 0 }}
transition={{ delay: 0.3 }}
className="text-muted-foreground text-lg"
>
Configure your AI model to get started
</motion.p>
</div>
{/* LLM Setup Card */}
<motion.div
initial={{ opacity: 0, y: 20 }}
animate={{ opacity: 1, y: 0 }}
transition={{ delay: 0.4 }}
>
<Card className="shadow-lg">
<CardHeader className="text-center border-b pb-6">
<div className="flex items-center justify-center gap-3 mb-2">
<div className="p-2 rounded-xl bg-primary/10 border border-primary/20">
<Bot className="w-6 h-6 text-primary" />
</div>
<CardTitle className="text-2xl">{configTitle}</CardTitle>
</div>
<CardDescription>{configDescription}</CardDescription>
</CardHeader>
<CardContent className="pt-6">
<SetupLLMStep
searchSpaceId={searchSpaceId}
onConfigCreated={onConfigCreated}
onConfigDeleted={onConfigDeleted}
onPreferencesUpdated={onPreferencesUpdated}
/>
</CardContent>
</Card>
</motion.div>
</motion.div>
</div>
);
}

View file

@ -1,47 +0,0 @@
"use client";
import { Wand2 } from "lucide-react";
import { motion } from "motion/react";
interface OnboardLoadingProps {
title: string;
subtitle: string;
}
export function OnboardLoading({ title, subtitle }: OnboardLoadingProps) {
return (
<div className="min-h-screen bg-background flex items-center justify-center p-4">
<motion.div
initial={{ opacity: 0, scale: 0.9 }}
animate={{ opacity: 1, scale: 1 }}
transition={{ duration: 0.5 }}
className="text-center"
>
<div className="relative mb-8 flex justify-center">
<motion.div
animate={{ rotate: 360 }}
transition={{ duration: 2, repeat: Infinity, ease: "linear" }}
>
<Wand2 className="w-16 h-16 text-primary" />
</motion.div>
</div>
<h2 className="text-2xl font-bold text-foreground mb-2">{title}</h2>
<p className="text-muted-foreground">{subtitle}</p>
<div className="mt-6 flex justify-center gap-1.5">
{[0, 1, 2].map((i) => (
<motion.div
key={i}
className="w-2 h-2 rounded-full bg-primary"
animate={{ scale: [1, 1.5, 1], opacity: [0.5, 1, 0.5] }}
transition={{
duration: 1,
repeat: Infinity,
delay: i * 0.2,
}}
/>
))}
</div>
</motion.div>
</div>
);
}

View file

@ -1,38 +0,0 @@
"use client";
import { Bot, Brain, Sparkles } from "lucide-react";
import { motion } from "motion/react";
import { Badge } from "@/components/ui/badge";
interface OnboardStatsProps {
globalConfigsCount: number;
userConfigsCount: number;
}
export function OnboardStats({ globalConfigsCount, userConfigsCount }: OnboardStatsProps) {
return (
<motion.div
initial={{ opacity: 0, y: 20 }}
animate={{ opacity: 1, y: 0 }}
transition={{ delay: 0.5 }}
className="flex flex-wrap justify-center gap-3 mb-10"
>
{globalConfigsCount > 0 && (
<Badge variant="secondary" className="px-3 py-1.5">
<Sparkles className="w-3 h-3 mr-1.5 text-violet-500" />
{globalConfigsCount} Global Model{globalConfigsCount > 1 ? "s" : ""}
</Badge>
)}
{userConfigsCount > 0 && (
<Badge variant="secondary" className="px-3 py-1.5">
<Bot className="w-3 h-3 mr-1.5 text-blue-500" />
{userConfigsCount} Custom Config{userConfigsCount > 1 ? "s" : ""}
</Badge>
)}
<Badge variant="secondary" className="px-3 py-1.5">
<Brain className="w-3 h-3 mr-1.5 text-fuchsia-500" />
All Roles Assigned
</Badge>
</motion.div>
);
}

View file

@ -1,813 +0,0 @@
"use client";
import { useAtomValue } from "jotai";
import {
AlertCircle,
Bot,
Brain,
Check,
CheckCircle,
ChevronDown,
ChevronsUpDown,
ChevronUp,
Plus,
Trash2,
Zap,
} from "lucide-react";
import { motion } from "motion/react";
import { useTranslations } from "next-intl";
import { useEffect, useState } from "react";
import { toast } from "sonner";
import {
createLLMConfigMutationAtom,
deleteLLMConfigMutationAtom,
updateLLMPreferencesMutationAtom,
} from "@/atoms/llm-config/llm-config-mutation.atoms";
import {
globalLLMConfigsAtom,
llmConfigsAtom,
llmPreferencesAtom,
} from "@/atoms/llm-config/llm-config-query.atoms";
import { Alert, AlertDescription } from "@/components/ui/alert";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card";
import {
Command,
CommandEmpty,
CommandGroup,
CommandInput,
CommandItem,
CommandList,
} from "@/components/ui/command";
import { Input } from "@/components/ui/input";
import { Label } from "@/components/ui/label";
import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
import { Separator } from "@/components/ui/separator";
import { LANGUAGES } from "@/contracts/enums/languages";
import { getModelsByProvider } from "@/contracts/enums/llm-models";
import { LLM_PROVIDERS } from "@/contracts/enums/llm-providers";
import { type CreateLLMConfigRequest, LLMConfig } from "@/contracts/types/llm-config.types";
import { cn } from "@/lib/utils";
import InferenceParamsEditor from "../inference-params-editor";
interface SetupLLMStepProps {
searchSpaceId: number;
onConfigCreated?: () => void;
onConfigDeleted?: () => void;
onPreferencesUpdated?: () => Promise<void>;
}
const ROLE_DESCRIPTIONS = {
long_context: {
icon: Brain,
key: "long_context_llm_id" as const,
titleKey: "long_context_llm_title",
descKey: "long_context_llm_desc",
examplesKey: "long_context_llm_examples",
color:
"bg-blue-100 text-blue-800 border-blue-200 dark:bg-blue-950 dark:text-blue-200 dark:border-blue-800",
},
fast: {
icon: Zap,
key: "fast_llm_id" as const,
titleKey: "fast_llm_title",
descKey: "fast_llm_desc",
examplesKey: "fast_llm_examples",
color:
"bg-green-100 text-green-800 border-green-200 dark:bg-green-950 dark:text-green-200 dark:border-green-800",
},
strategic: {
icon: Bot,
key: "strategic_llm_id" as const,
titleKey: "strategic_llm_title",
descKey: "strategic_llm_desc",
examplesKey: "strategic_llm_examples",
color:
"bg-purple-100 text-purple-800 border-purple-200 dark:bg-purple-950 dark:text-purple-200 dark:border-purple-800",
},
};
export function SetupLLMStep({
searchSpaceId,
onConfigCreated,
onConfigDeleted,
onPreferencesUpdated,
}: SetupLLMStepProps) {
const { mutate: createLLMConfig, isPending: isCreatingLlmConfig } = useAtomValue(
createLLMConfigMutationAtom
);
const t = useTranslations("onboard");
const { mutateAsync: deleteLLMConfig } = useAtomValue(deleteLLMConfigMutationAtom);
const { data: llmConfigs = [] } = useAtomValue(llmConfigsAtom);
const { data: globalConfigs = [] } = useAtomValue(globalLLMConfigsAtom);
const { data: preferences = {} } = useAtomValue(llmPreferencesAtom);
const { mutateAsync: updatePreferences } = useAtomValue(updateLLMPreferencesMutationAtom);
const [isAddingNew, setIsAddingNew] = useState(false);
const [formData, setFormData] = useState<CreateLLMConfigRequest>({
name: "",
provider: "" as CreateLLMConfigRequest["provider"], // Allow it as Default
custom_provider: "",
model_name: "",
api_key: "",
api_base: "",
language: "English",
litellm_params: {},
search_space_id: searchSpaceId,
});
const [modelComboboxOpen, setModelComboboxOpen] = useState(false);
const [showProviderForm, setShowProviderForm] = useState(false);
// Role assignments state
const [assignments, setAssignments] = useState({
long_context_llm_id: preferences.long_context_llm_id || "",
fast_llm_id: preferences.fast_llm_id || "",
strategic_llm_id: preferences.strategic_llm_id || "",
});
// Combine global and user-specific configs
const allConfigs = [...globalConfigs, ...llmConfigs];
useEffect(() => {
setAssignments({
long_context_llm_id: preferences.long_context_llm_id || "",
fast_llm_id: preferences.fast_llm_id || "",
strategic_llm_id: preferences.strategic_llm_id || "",
});
}, [preferences]);
const handleInputChange = (field: keyof CreateLLMConfigRequest, value: string) => {
setFormData((prev) => ({ ...prev, [field]: value }));
};
const handleSubmit = async (e: React.FormEvent) => {
e.preventDefault();
if (!formData.name || !formData.provider || !formData.model_name || !formData.api_key) {
toast.error("Please fill in all required fields");
return;
}
createLLMConfig(formData, {
onError: (error) => {
console.error("Error creating LLM config:", error);
if (error instanceof Error) {
toast.error(error?.message || "Failed to create LLM config");
}
},
onSuccess: () => {
toast.success("LLM config created successfully");
setFormData({
name: "",
provider: "" as CreateLLMConfigRequest["provider"],
custom_provider: "",
model_name: "",
api_key: "",
api_base: "",
language: "English",
litellm_params: {},
search_space_id: searchSpaceId,
});
onConfigCreated?.();
},
onSettled: () => {
setIsAddingNew(false);
},
});
};
const handleRoleAssignment = async (role: string, configId: string) => {
const newAssignments = {
...assignments,
[role]: configId === "" ? "" : parseInt(configId),
};
setAssignments(newAssignments);
// Auto-save if this assignment completes all roles
const hasAllAssignments =
newAssignments.long_context_llm_id &&
newAssignments.fast_llm_id &&
newAssignments.strategic_llm_id;
if (hasAllAssignments) {
const numericAssignments = {
long_context_llm_id:
typeof newAssignments.long_context_llm_id === "string"
? parseInt(newAssignments.long_context_llm_id)
: newAssignments.long_context_llm_id,
fast_llm_id:
typeof newAssignments.fast_llm_id === "string"
? parseInt(newAssignments.fast_llm_id)
: newAssignments.fast_llm_id,
strategic_llm_id:
typeof newAssignments.strategic_llm_id === "string"
? parseInt(newAssignments.strategic_llm_id)
: newAssignments.strategic_llm_id,
};
await updatePreferences({
search_space_id: searchSpaceId,
data: numericAssignments,
});
if (onPreferencesUpdated) {
await onPreferencesUpdated();
}
}
};
const selectedProvider = LLM_PROVIDERS.find((p) => p.value === formData.provider);
const availableModels = formData.provider ? getModelsByProvider(formData.provider) : [];
const handleParamsChange = (newParams: Record<string, number | string>) => {
setFormData((prev) => ({ ...prev, litellm_params: newParams }));
};
const handleProviderChange = (value: string) => {
handleInputChange("provider", value);
setFormData((prev) => ({ ...prev, model_name: "" }));
};
const isAssignmentComplete =
assignments.long_context_llm_id && assignments.fast_llm_id && assignments.strategic_llm_id;
return (
<div className="space-y-8">
{/* Global Configs Notice - Prominent at top */}
{globalConfigs.length > 0 && (
<Alert className="bg-blue-50 border-blue-200 dark:bg-blue-950 dark:border-blue-800">
<CheckCircle className="h-4 w-4 text-blue-600 dark:text-blue-400" />
<AlertDescription className="text-blue-800 dark:text-blue-200">
<div className="space-y-2">
<p className="font-semibold text-base">
{globalConfigs.length} global configuration(s) available!
</p>
<p className="text-sm">
You can skip adding your own LLM provider and use our pre-configured models in the
role assignment section below.
</p>
<p className="text-sm">
Or expand "Add LLM Provider" to add your own custom configurations.
</p>
</div>
</AlertDescription>
</Alert>
)}
{/* Section 1: Add LLM Providers */}
<div className="space-y-4">
<div className="flex items-center justify-between">
<div>
<h3 className="text-xl font-semibold flex items-center gap-2">
<Bot className="w-5 h-5" />
{t("add_llm_provider")}
</h3>
<p className="text-sm text-muted-foreground mt-1">{t("configure_first_provider")}</p>
</div>
<Button
variant="ghost"
size="sm"
onClick={() => setShowProviderForm(!showProviderForm)}
className="gap-2"
>
{showProviderForm ? (
<>
<ChevronUp className="w-4 h-4" />
Collapse
</>
) : (
<>
<ChevronDown className="w-4 h-4" />
Expand
</>
)}
</Button>
</div>
{showProviderForm && (
<motion.div
initial={{ opacity: 0, height: 0 }}
animate={{ opacity: 1, height: "auto" }}
exit={{ opacity: 0, height: 0 }}
transition={{ duration: 0.3 }}
className="space-y-4"
>
{/* Info Alert */}
<Alert>
<AlertCircle className="h-4 w-4" />
<AlertDescription>{t("add_provider_instruction")}</AlertDescription>
</Alert>
{/* Existing Configurations */}
{llmConfigs.length > 0 && (
<div className="space-y-3">
<h4 className="text-sm font-semibold text-muted-foreground">
{t("your_llm_configs")}
</h4>
<div className="grid gap-3">
{llmConfigs.map((config) => (
<motion.div
key={config.id}
initial={{ opacity: 0, y: 10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -10 }}
>
<Card className="border-l-4 border-l-primary">
<CardContent className="pt-4">
<div className="flex items-center justify-between">
<div className="flex-1">
<div className="flex items-center gap-2 mb-1">
<Bot className="w-4 h-4" />
<h4 className="font-medium">{config.name}</h4>
<Badge variant="secondary" className="text-xs">
{config.provider}
</Badge>
</div>
<p className="text-sm text-muted-foreground">
{t("model")}: {config.model_name}
{config.language && `${t("language")}: ${config.language}`}
{config.api_base && `${t("base")}: ${config.api_base}`}
</p>
</div>
<Button
variant="ghost"
size="sm"
onClick={async () => {
try {
await deleteLLMConfig({ id: config.id });
onConfigDeleted?.();
} catch (error) {
console.error("Failed to delete config:", error);
}
}}
className="text-destructive hover:text-destructive"
>
<Trash2 className="w-4 h-4" />
</Button>
</div>
</CardContent>
</Card>
</motion.div>
))}
</div>
</div>
)}
{/* Add New Provider */}
{!isAddingNew ? (
<Card className="border-dashed border-2 hover:border-primary/50 transition-colors">
<CardContent className="flex flex-col items-center justify-center py-8">
<Plus className="w-8 h-8 text-muted-foreground mb-3" />
<h4 className="font-semibold mb-1">{t("add_provider_title")}</h4>
<p className="text-sm text-muted-foreground text-center mb-3">
{t("add_provider_subtitle")}
</p>
<Button onClick={() => setIsAddingNew(true)} size="sm">
<Plus className="w-4 h-4 mr-2" />
{t("add_provider_button")}
</Button>
</CardContent>
</Card>
) : (
<Card>
<CardHeader>
<CardTitle className="text-lg">{t("add_new_llm_provider")}</CardTitle>
<CardDescription>{t("configure_new_provider")}</CardDescription>
</CardHeader>
<CardContent>
<form onSubmit={handleSubmit} className="space-y-4">
<div className="grid grid-cols-1 md:grid-cols-3 gap-4">
<div className="space-y-2">
<Label htmlFor="name">{t("config_name_required")}</Label>
<Input
id="name"
placeholder={t("config_name_placeholder")}
value={formData.name}
onChange={(e) => handleInputChange("name", e.target.value)}
required
/>
</div>
<div className="space-y-2">
<Label htmlFor="provider">{t("provider_required")}</Label>
<Select value={formData.provider} onValueChange={handleProviderChange}>
<SelectTrigger>
<SelectValue placeholder={t("provider_placeholder")} />
</SelectTrigger>
<SelectContent className="max-h-[300px]">
{LLM_PROVIDERS.map((provider) => (
<SelectItem key={provider.value} value={provider.value}>
{provider.label}
</SelectItem>
))}
</SelectContent>
</Select>
</div>
<div className="space-y-2">
<Label htmlFor="language">{t("language_optional")}</Label>
<Select
value={formData.language || "English"}
onValueChange={(value) => handleInputChange("language", value)}
>
<SelectTrigger>
<SelectValue placeholder={t("language_placeholder")} />
</SelectTrigger>
<SelectContent>
{LANGUAGES.map((language) => (
<SelectItem key={language.value} value={language.value}>
{language.label}
</SelectItem>
))}
</SelectContent>
</Select>
</div>
</div>
{formData.provider === "CUSTOM" && (
<div className="space-y-2">
<Label htmlFor="custom_provider">{t("custom_provider_name")}</Label>
<Input
id="custom_provider"
placeholder={t("custom_provider_placeholder")}
value={formData.custom_provider ?? ""}
onChange={(e) => handleInputChange("custom_provider", e.target.value)}
required
/>
</div>
)}
<div className="space-y-2">
<Label htmlFor="model_name">{t("model_name_required")}</Label>
<Popover open={modelComboboxOpen} onOpenChange={setModelComboboxOpen}>
<PopoverTrigger asChild>
<Button
variant="outline"
aria-expanded={modelComboboxOpen}
className="w-full justify-between font-normal"
>
<span className={cn(!formData.model_name && "text-muted-foreground")}>
{formData.model_name || t("model_name_placeholder")}
</span>
<ChevronsUpDown className="ml-2 h-4 w-4 shrink-0 opacity-50" />
</Button>
</PopoverTrigger>
<PopoverContent className="w-full p-0" align="start" side="bottom">
<Command shouldFilter={false}>
<CommandInput
placeholder={
selectedProvider?.example ||
t("model_name_placeholder") ||
"Type model name..."
}
value={formData.model_name}
onValueChange={(value) => handleInputChange("model_name", value)}
/>
<CommandList>
<CommandEmpty>
<div className="py-2 text-center text-sm text-muted-foreground">
{formData.model_name
? `Using custom model: "${formData.model_name}"`
: "Type your model name above"}
</div>
</CommandEmpty>
{availableModels.length > 0 && (
<CommandGroup heading="Suggested Models">
{availableModels
.filter(
(model) =>
!formData.model_name ||
model.value
.toLowerCase()
.includes(formData.model_name.toLowerCase()) ||
model.label
.toLowerCase()
.includes(formData.model_name.toLowerCase())
)
.map((model) => (
<CommandItem
key={model.value}
value={model.value}
onSelect={(currentValue) => {
handleInputChange("model_name", currentValue);
setModelComboboxOpen(false);
}}
className="flex flex-col items-start py-3"
>
<div className="flex w-full items-center">
<Check
className={cn(
"mr-2 h-4 w-4 shrink-0",
formData.model_name === model.value
? "opacity-100"
: "opacity-0"
)}
/>
<div className="flex-1">
<div className="font-medium">{model.label}</div>
{model.contextWindow && (
<div className="text-xs text-muted-foreground">
Context: {model.contextWindow}
</div>
)}
</div>
</div>
</CommandItem>
))}
</CommandGroup>
)}
</CommandList>
</Command>
</PopoverContent>
</Popover>
<p className="text-xs text-muted-foreground">
{availableModels.length > 0
? `Type freely or select from ${availableModels.length} model suggestions`
: selectedProvider?.example
? `${t("examples")}: ${selectedProvider.example}`
: "Type your model name freely"}
</p>
</div>
<div className="space-y-2">
<Label htmlFor="api_key">{t("api_key_required")}</Label>
<Input
id="api_key"
type="password"
placeholder={
formData.provider === "OLLAMA"
? "Any value (e.g., ollama)"
: t("api_key_placeholder")
}
value={formData.api_key}
onChange={(e) => handleInputChange("api_key", e.target.value)}
required
/>
{formData.provider === "OLLAMA" && (
<p className="text-xs text-muted-foreground">
💡 Ollama doesn't require authentication enter any value (e.g.,
"ollama")
</p>
)}
</div>
<div className="space-y-2">
<Label htmlFor="api_base">{t("api_base_optional")}</Label>
<Input
id="api_base"
placeholder={selectedProvider?.apiBase || t("api_base_placeholder")}
value={formData.api_base ?? ""}
onChange={(e) => handleInputChange("api_base", e.target.value)}
/>
{/* Ollama-specific help */}
{formData.provider === "OLLAMA" && (
<div className="mt-2 p-3 bg-muted/50 rounded-lg border border-muted">
<p className="text-xs font-medium mb-2">
💡 Ollama API Base URL Examples:
</p>
<div className="space-y-1.5">
<button
type="button"
className="flex items-center gap-2 text-xs text-muted-foreground hover:text-foreground transition-colors"
onClick={() =>
handleInputChange("api_base", "http://localhost:11434")
}
>
<code className="px-1.5 py-0.5 bg-background rounded border">
http://localhost:11434
</code>
<span> Standard local installation</span>
</button>
<button
type="button"
className="flex items-center gap-2 text-xs text-muted-foreground hover:text-foreground transition-colors"
onClick={() =>
handleInputChange("api_base", "http://host.docker.internal:11434")
}
>
<code className="px-1.5 py-0.5 bg-background rounded border">
http://host.docker.internal:11434
</code>
<span> If using SurfSense Docker image</span>
</button>
</div>
</div>
)}
</div>
<div className="pt-2">
<InferenceParamsEditor
params={formData.litellm_params || {}}
setParams={handleParamsChange}
/>
</div>
<div className="flex gap-2 pt-2">
<Button type="submit" disabled={isCreatingLlmConfig} size="sm">
{isCreatingLlmConfig ? t("adding") : t("add_provider")}
</Button>
<Button
type="button"
variant="outline"
size="sm"
onClick={() => setIsAddingNew(false)}
disabled={isCreatingLlmConfig}
>
{t("cancel")}
</Button>
</div>
</form>
</CardContent>
</Card>
)}
</motion.div>
)}
</div>
<Separator className="my-8" />
{/* Section 2: Assign Roles */}
<div className="space-y-4">
<div>
<h3 className="text-xl font-semibold flex items-center gap-2">
<Brain className="w-5 h-5" />
{t("assign_llm_roles")}
</h3>
<p className="text-sm text-muted-foreground mt-1">{t("assign_specific_roles")}</p>
</div>
{allConfigs.length === 0 ? (
<Alert>
<AlertCircle className="h-4 w-4" />
<AlertDescription>{t("add_provider_before_roles")}</AlertDescription>
</Alert>
) : (
<div className="space-y-4">
<Alert>
<AlertCircle className="h-4 w-4" />
<AlertDescription>{t("assign_roles_instruction")}</AlertDescription>
</Alert>
<div className="grid gap-4">
{Object.entries(ROLE_DESCRIPTIONS).map(([roleKey, role]) => {
const IconComponent = role.icon;
const currentAssignment = assignments[role.key];
const assignedConfig = allConfigs.find((config) => config.id === currentAssignment);
return (
<motion.div
key={roleKey}
initial={{ opacity: 0, y: 10 }}
animate={{ opacity: 1, y: 0 }}
transition={{ delay: Object.keys(ROLE_DESCRIPTIONS).indexOf(roleKey) * 0.1 }}
>
<Card
className={`border-l-4 ${currentAssignment ? "border-l-primary" : "border-l-muted"}`}
>
<CardHeader className="pb-3">
<div className="flex items-center justify-between">
<div className="flex items-center gap-3">
<div className={`p-2 rounded-lg ${role.color}`}>
<IconComponent className="w-5 h-5" />
</div>
<div>
<CardTitle className="text-base">{t(role.titleKey)}</CardTitle>
<CardDescription className="mt-1 text-xs">
{t(role.descKey)}
</CardDescription>
</div>
</div>
{currentAssignment && <CheckCircle className="w-5 h-5 text-green-500" />}
</div>
</CardHeader>
<CardContent className="space-y-3">
<div className="space-y-2">
<Label className="text-sm font-medium">{t("assign_llm_config")}:</Label>
<Select
value={currentAssignment?.toString() || ""}
onValueChange={(value) => handleRoleAssignment(role.key, value)}
>
<SelectTrigger>
<SelectValue placeholder={t("select_llm_config")} />
</SelectTrigger>
<SelectContent>
{globalConfigs.length > 0 && (
<div className="px-2 py-1.5 text-xs font-semibold text-muted-foreground">
{t("global_configs") || "Global Configurations"}
</div>
)}
{globalConfigs
.filter((config) => config.id && config.id.toString().trim() !== "")
.map((config) => (
<SelectItem key={config.id} value={config.id.toString()}>
<div className="flex items-center gap-2">
<Badge variant="secondary" className="text-xs">
🌐 Global
</Badge>
<Badge variant="outline" className="text-xs">
{config.provider}
</Badge>
<span className="text-sm">{config.name}</span>
<span className="text-xs text-muted-foreground">
({config.model_name})
</span>
</div>
</SelectItem>
))}
{llmConfigs.length > 0 && globalConfigs.length > 0 && (
<div className="px-2 py-1.5 text-xs font-semibold text-muted-foreground border-t mt-1">
{t("your_configs") || "Your Configurations"}
</div>
)}
{llmConfigs
.filter((config) => config.id && config.id.toString().trim() !== "")
.map((config) => (
<SelectItem key={config.id} value={config.id.toString()}>
<div className="flex items-center gap-2">
<Badge variant="outline" className="text-xs">
{config.provider}
</Badge>
<span className="text-sm">{config.name}</span>
<span className="text-xs text-muted-foreground">
({config.model_name})
</span>
</div>
</SelectItem>
))}
</SelectContent>
</Select>
</div>
{assignedConfig && (
<div className="mt-2 p-3 bg-muted/50 rounded-lg">
<div className="flex items-center gap-2 text-sm">
<Bot className="w-4 h-4" />
<span className="font-medium">{t("assigned")}:</span>
{"is_global" in assignedConfig && assignedConfig.is_global && (
<Badge variant="secondary" className="text-xs">
🌐 Global
</Badge>
)}
<Badge variant="secondary" className="text-xs">
{assignedConfig.provider}
</Badge>
<span className="text-sm">{assignedConfig.name}</span>
</div>
<div className="text-xs text-muted-foreground mt-1">
{t("model")}: {assignedConfig.model_name}
</div>
</div>
)}
</CardContent>
</Card>
</motion.div>
);
})}
</div>
{/* Status Indicators */}
<div className="flex flex-col sm:flex-row items-center justify-between gap-3 pt-2">
<div className="flex items-center gap-2 text-sm text-muted-foreground">
<span>{t("progress")}:</span>
<div className="flex gap-1">
{Object.keys(ROLE_DESCRIPTIONS).map((key) => {
const roleKey = ROLE_DESCRIPTIONS[key as keyof typeof ROLE_DESCRIPTIONS].key;
return (
<div
key={key}
className={`w-2 h-2 rounded-full ${
assignments[roleKey] ? "bg-primary" : "bg-muted"
}`}
/>
);
})}
</div>
<span>
{t("roles_assigned", {
assigned: Object.values(assignments).filter(Boolean).length,
total: Object.keys(ROLE_DESCRIPTIONS).length,
})}
</span>
</div>
{isAssignmentComplete && (
<div className="flex items-center gap-2 px-3 py-1.5 bg-green-50 text-green-700 dark:bg-green-950 dark:text-green-200 rounded-lg border border-green-200 dark:border-green-800">
<CheckCircle className="w-4 h-4" />
<span className="text-sm font-medium">{t("all_roles_assigned_saved")}</span>
</div>
)}
</div>
</div>
)}
</div>
</div>
);
}

View file

@ -1,340 +0,0 @@
"use client";
import { useAtomValue } from "jotai";
import { ChevronDown, ChevronUp, ExternalLink, Info, Sparkles, User } from "lucide-react";
import { useEffect, useState } from "react";
import { toast } from "sonner";
import { communityPromptsAtom } from "@/atoms/search-spaces/search-space-query.atoms";
import { Alert, AlertDescription } from "@/components/ui/alert";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card";
import { Label } from "@/components/ui/label";
import { ScrollArea } from "@/components/ui/scroll-area";
import { Switch } from "@/components/ui/switch";
import { Tabs, TabsList, TabsTrigger } from "@/components/ui/tabs";
import { Textarea } from "@/components/ui/textarea";
import { authenticatedFetch } from "@/lib/auth-utils";
interface SetupPromptStepProps {
searchSpaceId: number;
onComplete?: () => void;
}
export function SetupPromptStep({ searchSpaceId, onComplete }: SetupPromptStepProps) {
const { data: prompts = [], isPending: loadingPrompts } = useAtomValue(communityPromptsAtom);
const [enableCitations, setEnableCitations] = useState(true);
const [customInstructions, setCustomInstructions] = useState("");
const [saving, setSaving] = useState(false);
const [hasChanges, setHasChanges] = useState(false);
const [selectedPromptKey, setSelectedPromptKey] = useState<string | null>(null);
const [expandedPrompts, setExpandedPrompts] = useState<Set<string>>(new Set());
const [selectedCategory, setSelectedCategory] = useState("all");
// Mark that we have changes when user modifies anything
useEffect(() => {
setHasChanges(true);
}, [enableCitations, customInstructions]);
const handleSelectCommunityPrompt = (promptKey: string, promptValue: string) => {
setCustomInstructions(promptValue);
setSelectedPromptKey(promptKey);
toast.success("Community prompt applied");
};
const toggleExpand = (promptKey: string) => {
const newExpanded = new Set(expandedPrompts);
if (newExpanded.has(promptKey)) {
newExpanded.delete(promptKey);
} else {
newExpanded.add(promptKey);
}
setExpandedPrompts(newExpanded);
};
// Get unique categories
const categories = Array.from(new Set(prompts.map((p) => p.category || "general")));
const filteredPrompts =
selectedCategory === "all"
? prompts
: prompts.filter((p) => (p.category || "general") === selectedCategory);
const truncateText = (text: string, maxLength: number = 150) => {
if (text.length <= maxLength) return text;
return text.substring(0, maxLength) + "...";
};
const handleSave = async () => {
try {
setSaving(true);
// Prepare the update payload with simplified schema
const payload: any = {
citations_enabled: enableCitations,
qna_custom_instructions: customInstructions.trim() || "",
};
// Only send update if there's something to update
if (Object.keys(payload).length > 0) {
const response = await authenticatedFetch(
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/searchspaces/${searchSpaceId}`,
{
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(payload),
}
);
if (!response.ok) {
const errorData = await response.json().catch(() => ({}));
throw new Error(
errorData.detail || `Failed to save prompt configuration (${response.status})`
);
}
toast.success("Prompt configuration saved successfully");
}
setHasChanges(false);
onComplete?.();
} catch (error: any) {
console.error("Error saving prompt configuration:", error);
toast.error(error.message || "Failed to save prompt configuration");
} finally {
setSaving(false);
}
};
const handleSkip = () => {
// Skip without saving - use defaults
onComplete?.();
};
return (
<div className="space-y-6">
<Alert>
<Info className="h-4 w-4" />
<AlertDescription>
These settings are optional. You can skip this step and configure them later in settings.
</AlertDescription>
</Alert>
{/* Citation Toggle */}
<div className="space-y-4">
<div className="flex items-center justify-between space-x-4 p-4 rounded-lg border bg-card">
<div className="flex-1 space-y-1">
<Label htmlFor="enable-citations" className="text-base font-medium">
Enable Citations
</Label>
<p className="text-sm text-muted-foreground">
When enabled, AI responses will include citations to source documents using
[citation:id] format.
</p>
</div>
<Switch
id="enable-citations"
checked={enableCitations}
onCheckedChange={setEnableCitations}
/>
</div>
{!enableCitations && (
<Alert
variant="default"
className="bg-yellow-50 dark:bg-yellow-950/20 border-yellow-200 dark:border-yellow-800"
>
<Info className="h-4 w-4 text-yellow-600 dark:text-yellow-500" />
<AlertDescription className="text-yellow-800 dark:text-yellow-300">
Disabling citations means AI responses won't include source references. You can
re-enable this anytime in settings.
</AlertDescription>
</Alert>
)}
</div>
{/* SearchSpace System Instructions */}
<div className="space-y-4">
<div className="space-y-2">
<Label htmlFor="custom-instructions" className="text-base font-medium">
SearchSpace System Instructions (Optional)
</Label>
<p className="text-sm text-muted-foreground">
Add system instructions to guide how the AI should respond. Choose from community
prompts below or write your own.
</p>
{/* Community Prompts Section */}
{!loadingPrompts && prompts.length > 0 && (
<Card className="border-dashed">
<CardHeader className="pb-3">
<CardTitle className="text-sm flex items-center gap-2">
<Sparkles className="h-4 w-4" />
Community Prompts Library
</CardTitle>
<CardDescription className="text-xs">
Browse {prompts.length} curated prompts. Click to preview or apply directly
</CardDescription>
</CardHeader>
<CardContent>
<Tabs
value={selectedCategory}
onValueChange={setSelectedCategory}
className="w-full"
>
<TabsList className="grid w-full grid-cols-5 mb-4">
<TabsTrigger value="all" className="text-xs">
All ({prompts.length})
</TabsTrigger>
{categories.map((category) => (
<TabsTrigger key={category} value={category} className="text-xs capitalize">
{category} (
{prompts.filter((p) => (p.category || "general") === category).length})
</TabsTrigger>
))}
</TabsList>
<ScrollArea className="h-[300px] pr-4">
<div className="space-y-3">
{filteredPrompts.map((prompt) => {
const isExpanded = expandedPrompts.has(prompt.key);
const isSelected = selectedPromptKey === prompt.key;
const displayText = isExpanded
? prompt.value
: truncateText(prompt.value, 120);
return (
<div
key={prompt.key}
className={`p-4 rounded-lg border transition-all ${
isSelected
? "border-primary bg-accent/50"
: "border-border hover:border-primary/50 hover:bg-accent/30"
}`}
>
<div className="flex items-start justify-between gap-2 mb-2">
<div className="flex items-center gap-2 flex-wrap flex-1">
<Badge variant="outline" className="text-xs font-medium">
{prompt.key.replace(/_/g, " ")}
</Badge>
{prompt.category && (
<Badge variant="secondary" className="text-xs capitalize">
{prompt.category}
</Badge>
)}
{isSelected && (
<Badge variant="default" className="text-xs">
Selected
</Badge>
)}
</div>
{prompt.link && (
<a
href={prompt.link}
target="_blank"
rel="noopener noreferrer"
className="text-muted-foreground hover:text-primary shrink-0"
title="View source"
>
<ExternalLink className="h-4 w-4" />
</a>
)}
</div>
<p className="text-sm text-foreground mb-3 whitespace-pre-wrap">
{displayText}
</p>
<div className="flex items-center justify-between gap-2">
<div className="flex items-center gap-2 text-xs text-muted-foreground">
<User className="h-3 w-3" />
<span>{prompt.author}</span>
</div>
<div className="flex items-center gap-2">
{prompt.value.length > 120 && (
<Button
type="button"
variant="ghost"
size="sm"
onClick={() => toggleExpand(prompt.key)}
className="h-7 text-xs"
>
{isExpanded ? (
<>
<ChevronUp className="h-3 w-3 mr-1" />
Show less
</>
) : (
<>
<ChevronDown className="h-3 w-3 mr-1" />
Read more
</>
)}
</Button>
)}
<Button
type="button"
variant={isSelected ? "default" : "secondary"}
size="sm"
onClick={() =>
handleSelectCommunityPrompt(prompt.key, prompt.value)
}
className="h-7 text-xs"
>
{isSelected ? "Applied" : "Use This"}
</Button>
</div>
</div>
</div>
);
})}
</div>
</ScrollArea>
</Tabs>
</CardContent>
</Card>
)}
<Textarea
id="custom-instructions"
placeholder="E.g., Always provide practical examples, be concise, focus on technical details..."
value={customInstructions}
onChange={(e) => {
setCustomInstructions(e.target.value);
setSelectedPromptKey(null);
}}
rows={6}
className="resize-none"
/>
<div className="flex items-center justify-between">
<p className="text-xs text-muted-foreground">{customInstructions.length} characters</p>
{customInstructions.length > 0 && (
<Button
type="button"
variant="ghost"
size="sm"
onClick={() => {
setCustomInstructions("");
setSelectedPromptKey(null);
}}
className="h-auto py-1 px-2 text-xs"
>
Clear
</Button>
)}
</div>
</div>
</div>
{/* Action Buttons */}
<div className="flex items-center justify-between pt-4 border-t">
<Button variant="ghost" onClick={handleSkip} disabled={saving}>
Skip for now
</Button>
<Button onClick={handleSave} disabled={saving || !hasChanges}>
{saving ? "Saving..." : "Save Configuration"}
</Button>
</div>
</div>
);
}

View file

@ -1,148 +1,133 @@
"use client"
"use client";
import {
Collapsible,
CollapsibleContent,
CollapsibleTrigger,
} from "@/components/ui/collapsible"
import { cn } from "@/lib/utils"
import { Brain, ChevronDown, Circle, Loader2, Search, Sparkles, Lightbulb, CheckCircle2 } from "lucide-react"
import React from "react"
Brain,
CheckCircle2,
ChevronDown,
Circle,
Lightbulb,
Loader2,
Search,
Sparkles,
} from "lucide-react";
import React from "react";
import { Collapsible, CollapsibleContent, CollapsibleTrigger } from "@/components/ui/collapsible";
import { cn } from "@/lib/utils";
export type ChainOfThoughtItemProps = React.ComponentProps<"div">
export type ChainOfThoughtItemProps = React.ComponentProps<"div">;
export const ChainOfThoughtItem = ({
children,
className,
...props
}: ChainOfThoughtItemProps) => (
<div className={cn("text-muted-foreground text-sm", className)} {...props}>
{children}
</div>
)
export const ChainOfThoughtItem = ({ children, className, ...props }: ChainOfThoughtItemProps) => (
<div className={cn("text-muted-foreground text-sm", className)} {...props}>
{children}
</div>
);
export type ChainOfThoughtTriggerProps = React.ComponentProps<
typeof CollapsibleTrigger
> & {
leftIcon?: React.ReactNode
swapIconOnHover?: boolean
}
export type ChainOfThoughtTriggerProps = React.ComponentProps<typeof CollapsibleTrigger> & {
leftIcon?: React.ReactNode;
swapIconOnHover?: boolean;
};
export const ChainOfThoughtTrigger = ({
children,
className,
leftIcon,
swapIconOnHover = true,
...props
children,
className,
leftIcon,
swapIconOnHover = true,
...props
}: ChainOfThoughtTriggerProps) => (
<CollapsibleTrigger
className={cn(
"group text-muted-foreground hover:text-foreground flex cursor-pointer items-center justify-start gap-1 text-left text-sm transition-colors",
className
)}
{...props}
>
<div className="flex items-center gap-2">
{leftIcon ? (
<span className="relative inline-flex size-4 items-center justify-center">
<span
className={cn(
"transition-opacity",
swapIconOnHover && "group-hover:opacity-0"
)}
>
{leftIcon}
</span>
{swapIconOnHover && (
<ChevronDown className="absolute size-4 opacity-0 transition-opacity group-hover:opacity-100 group-data-[state=open]:rotate-180" />
)}
</span>
) : (
<span className="relative inline-flex size-4 items-center justify-center">
<Circle className="size-2 fill-current" />
</span>
)}
<span>{children}</span>
</div>
{!leftIcon && (
<ChevronDown className="size-4 transition-transform group-data-[state=open]:rotate-180" />
)}
</CollapsibleTrigger>
)
<CollapsibleTrigger
className={cn(
"group text-muted-foreground hover:text-foreground flex cursor-pointer items-center justify-start gap-1 text-left text-sm transition-colors",
className
)}
{...props}
>
<div className="flex items-center gap-2">
{leftIcon ? (
<span className="relative inline-flex size-4 items-center justify-center">
<span className={cn("transition-opacity", swapIconOnHover && "group-hover:opacity-0")}>
{leftIcon}
</span>
{swapIconOnHover && (
<ChevronDown className="absolute size-4 opacity-0 transition-opacity group-hover:opacity-100 group-data-[state=open]:rotate-180" />
)}
</span>
) : (
<span className="relative inline-flex size-4 items-center justify-center">
<Circle className="size-2 fill-current" />
</span>
)}
<span>{children}</span>
</div>
{!leftIcon && (
<ChevronDown className="size-4 transition-transform group-data-[state=open]:rotate-180" />
)}
</CollapsibleTrigger>
);
export type ChainOfThoughtContentProps = React.ComponentProps<
typeof CollapsibleContent
>
export type ChainOfThoughtContentProps = React.ComponentProps<typeof CollapsibleContent>;
export const ChainOfThoughtContent = ({
children,
className,
...props
children,
className,
...props
}: ChainOfThoughtContentProps) => {
return (
<CollapsibleContent
className={cn(
"text-popover-foreground data-[state=closed]:animate-collapsible-up data-[state=open]:animate-collapsible-down overflow-hidden",
className
)}
{...props}
>
<div className="grid grid-cols-[min-content_minmax(0,1fr)] gap-x-4">
<div className="bg-primary/20 ml-1.75 h-full w-px group-data-[last=true]:hidden" />
<div className="ml-1.75 h-full w-px bg-transparent group-data-[last=false]:hidden" />
<div className="mt-2 space-y-2">{children}</div>
</div>
</CollapsibleContent>
)
}
return (
<CollapsibleContent
className={cn(
"text-popover-foreground data-[state=closed]:animate-collapsible-up data-[state=open]:animate-collapsible-down overflow-hidden",
className
)}
{...props}
>
<div className="grid grid-cols-[min-content_minmax(0,1fr)] gap-x-4">
<div className="bg-primary/20 ml-1.75 h-full w-px group-data-[last=true]:hidden" />
<div className="ml-1.75 h-full w-px bg-transparent group-data-[last=false]:hidden" />
<div className="mt-2 space-y-2">{children}</div>
</div>
</CollapsibleContent>
);
};
export type ChainOfThoughtProps = {
children: React.ReactNode
className?: string
}
children: React.ReactNode;
className?: string;
};
export function ChainOfThought({ children, className }: ChainOfThoughtProps) {
const childrenArray = React.Children.toArray(children)
const childrenArray = React.Children.toArray(children);
return (
<div className={cn("space-y-0", className)}>
{childrenArray.map((child, index) => (
<React.Fragment key={index}>
{React.isValidElement(child) &&
React.cloneElement(
child as React.ReactElement<ChainOfThoughtStepProps>,
{
isLast: index === childrenArray.length - 1,
}
)}
</React.Fragment>
))}
</div>
)
return (
<div className={cn("space-y-0", className)}>
{childrenArray.map((child, index) => (
<React.Fragment key={index}>
{React.isValidElement(child) &&
React.cloneElement(child as React.ReactElement<ChainOfThoughtStepProps>, {
isLast: index === childrenArray.length - 1,
})}
</React.Fragment>
))}
</div>
);
}
export type ChainOfThoughtStepProps = {
children: React.ReactNode
className?: string
isLast?: boolean
}
children: React.ReactNode;
className?: string;
isLast?: boolean;
};
export const ChainOfThoughtStep = ({
children,
className,
isLast = false,
...props
children,
className,
isLast = false,
...props
}: ChainOfThoughtStepProps & React.ComponentProps<typeof Collapsible>) => {
return (
<Collapsible
className={cn("group", className)}
data-last={isLast}
{...props}
>
{children}
<div className="flex justify-start group-data-[last=true]:hidden">
<div className="bg-primary/20 ml-1.75 h-4 w-px" />
</div>
</Collapsible>
)
}
return (
<Collapsible className={cn("group", className)} data-last={isLast} {...props}>
{children}
<div className="flex justify-start group-data-[last=true]:hidden">
<div className="bg-primary/20 ml-1.75 h-4 w-px" />
</div>
</Collapsible>
);
};

View file

@ -121,11 +121,6 @@ export function SearchSpaceForm({
<h2 className="text-3xl font-bold tracking-tight">
{isEditing ? "Edit Search Space" : "Create Search Space"}
</h2>
<p className="text-muted-foreground">
{isEditing
? "Update your search space details"
: "Create a new search space to organize your documents, chats, and podcasts."}
</p>
</div>
<Button
variant="ghost"
@ -198,8 +193,8 @@ export function SearchSpaceForm({
)}
</div>
<p className="text-muted-foreground">
A search space allows you to organize and search through your documents, generate
podcasts, and have AI-powered conversations about your content.
A search space is your personal workspace. Connect external sources, upload documents,
take notes, and get work done with AI agents.
</p>
</div>
</Tilt>

View file

@ -4,24 +4,22 @@ import { useAtomValue } from "jotai";
import {
AlertCircle,
Bot,
Brain,
CheckCircle,
FileText,
Loader2,
RefreshCw,
RotateCcw,
Save,
Settings2,
Zap,
} from "lucide-react";
import { motion } from "motion/react";
import { useEffect, useState } from "react";
import { toast } from "sonner";
import { updateLLMPreferencesMutationAtom } from "@/atoms/llm-config/llm-config-mutation.atoms";
import { updateLLMPreferencesMutationAtom } from "@/atoms/new-llm-config/new-llm-config-mutation.atoms";
import {
globalLLMConfigsAtom,
llmConfigsAtom,
globalNewLLMConfigsAtom,
llmPreferencesAtom,
} from "@/atoms/llm-config/llm-config-query.atoms";
newLLMConfigsAtom,
} from "@/atoms/new-llm-config/new-llm-config-query.atoms";
import { Alert, AlertDescription } from "@/components/ui/alert";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
@ -36,29 +34,21 @@ import {
} from "@/components/ui/select";
const ROLE_DESCRIPTIONS = {
long_context: {
icon: Brain,
title: "Long Context LLM",
description: "Handles summarization of long documents and complex Q&A",
color: "bg-blue-100 text-blue-800 border-blue-200",
examples: "Document analysis, research synthesis, complex Q&A",
characteristics: ["Large context window", "Deep reasoning", "Complex analysis"],
},
fast: {
icon: Zap,
title: "Fast LLM",
description: "Optimized for quick responses and real-time interactions",
color: "bg-green-100 text-green-800 border-green-200",
examples: "Quick searches, simple questions, instant responses",
characteristics: ["Low latency", "Quick responses", "Real-time chat"],
},
strategic: {
agent: {
icon: Bot,
title: "Strategic LLM",
description: "Advanced reasoning for planning and strategic decision making",
title: "Agent LLM",
description: "Primary LLM for chat interactions and agent operations",
color: "bg-blue-100 text-blue-800 border-blue-200",
examples: "Chat responses, agent tasks, real-time interactions",
characteristics: ["Fast responses", "Conversational", "Agent operations"],
},
document_summary: {
icon: FileText,
title: "Document Summary LLM",
description: "Handles document summarization, long context analysis, and query reformulation",
color: "bg-purple-100 text-purple-800 border-purple-200",
examples: "Planning workflows, strategic analysis, complex problem solving",
characteristics: ["Strategic thinking", "Long-term planning", "Complex reasoning"],
examples: "Document analysis, podcasts, research synthesis",
characteristics: ["Large context window", "Deep reasoning", "Summarization"],
},
};
@ -67,18 +57,19 @@ interface LLMRoleManagerProps {
}
export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
// Use new LLM config system
const {
data: llmConfigs = [],
data: newLLMConfigs = [],
isFetching: configsLoading,
error: configsError,
refetch: refreshConfigs,
} = useAtomValue(llmConfigsAtom);
} = useAtomValue(newLLMConfigsAtom);
const {
data: globalConfigs = [],
isFetching: globalConfigsLoading,
error: globalConfigsError,
refetch: refreshGlobalConfigs,
} = useAtomValue(globalLLMConfigsAtom);
} = useAtomValue(globalNewLLMConfigsAtom);
const {
data: preferences = {},
isFetching: preferencesLoading,
@ -89,9 +80,8 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
const { mutateAsync: updatePreferences } = useAtomValue(updateLLMPreferencesMutationAtom);
const [assignments, setAssignments] = useState({
long_context_llm_id: preferences.long_context_llm_id || "",
fast_llm_id: preferences.fast_llm_id || "",
strategic_llm_id: preferences.strategic_llm_id || "",
agent_llm_id: preferences.agent_llm_id || "",
document_summary_llm_id: preferences.document_summary_llm_id || "",
});
const [hasChanges, setHasChanges] = useState(false);
@ -99,9 +89,8 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
useEffect(() => {
const newAssignments = {
long_context_llm_id: preferences.long_context_llm_id || "",
fast_llm_id: preferences.fast_llm_id || "",
strategic_llm_id: preferences.strategic_llm_id || "",
agent_llm_id: preferences.agent_llm_id || "",
document_summary_llm_id: preferences.document_summary_llm_id || "",
};
setAssignments(newAssignments);
setHasChanges(false);
@ -117,9 +106,8 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
// Check if there are changes compared to current preferences
const currentPrefs = {
long_context_llm_id: preferences.long_context_llm_id || "",
fast_llm_id: preferences.fast_llm_id || "",
strategic_llm_id: preferences.strategic_llm_id || "",
agent_llm_id: preferences.agent_llm_id || "",
document_summary_llm_id: preferences.document_summary_llm_id || "",
};
const hasChangesNow = Object.keys(newAssignments).some(
@ -135,24 +123,18 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
setIsSaving(true);
const numericAssignments = {
long_context_llm_id:
typeof assignments.long_context_llm_id === "string"
? assignments.long_context_llm_id
? parseInt(assignments.long_context_llm_id)
agent_llm_id:
typeof assignments.agent_llm_id === "string"
? assignments.agent_llm_id
? parseInt(assignments.agent_llm_id)
: undefined
: assignments.long_context_llm_id,
fast_llm_id:
typeof assignments.fast_llm_id === "string"
? assignments.fast_llm_id
? parseInt(assignments.fast_llm_id)
: assignments.agent_llm_id,
document_summary_llm_id:
typeof assignments.document_summary_llm_id === "string"
? assignments.document_summary_llm_id
? parseInt(assignments.document_summary_llm_id)
: undefined
: assignments.fast_llm_id,
strategic_llm_id:
typeof assignments.strategic_llm_id === "string"
? assignments.strategic_llm_id
? parseInt(assignments.strategic_llm_id)
: undefined
: assignments.strategic_llm_id,
: assignments.document_summary_llm_id,
};
await updatePreferences({
@ -168,21 +150,18 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
const handleReset = () => {
setAssignments({
long_context_llm_id: preferences.long_context_llm_id || "",
fast_llm_id: preferences.fast_llm_id || "",
strategic_llm_id: preferences.strategic_llm_id || "",
agent_llm_id: preferences.agent_llm_id || "",
document_summary_llm_id: preferences.document_summary_llm_id || "",
});
setHasChanges(false);
};
const isAssignmentComplete =
assignments.long_context_llm_id && assignments.fast_llm_id && assignments.strategic_llm_id;
const assignedConfigIds = Object.values(assignments).filter((id) => id !== "");
const isAssignmentComplete = assignments.agent_llm_id && assignments.document_summary_llm_id;
// Combine global and custom configs
// Combine global and custom configs (new system)
const allConfigs = [
...globalConfigs.map((config) => ({ ...config, is_global: true })),
...llmConfigs.filter((config) => config.id && config.id.toString().trim() !== ""),
...newLLMConfigs.filter((config) => config.id && config.id.toString().trim() !== ""),
];
const availableConfigs = allConfigs;
@ -194,19 +173,6 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
<div className="space-y-6">
{/* Header */}
<div className="flex flex-col space-y-4 lg:flex-row lg:items-center lg:justify-between lg:space-y-0">
<div className="space-y-1">
<div className="flex items-center space-x-3">
<div className="flex h-10 w-10 items-center justify-center rounded-lg bg-purple-500/10">
<Settings2 className="h-5 w-5 text-purple-600" />
</div>
<div>
<h2 className="text-2xl font-bold tracking-tight">LLM Role Management</h2>
<p className="text-muted-foreground">
Assign your LLM configurations to specific roles for different purposes.
</p>
</div>
</div>
</div>
<div className="flex flex-wrap gap-2">
<Button
variant="outline"
@ -263,99 +229,6 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
</Card>
)}
{/* Stats Overview */}
{!isLoading && !hasError && (
<div className="grid gap-3 grid-cols-2 lg:grid-cols-4">
<Card className="overflow-hidden">
<div className="h-1 bg-blue-500" />
<CardContent className="p-4">
<div className="flex items-start justify-between gap-2">
<div className="space-y-1 min-w-0">
<p className="text-2xl font-bold tracking-tight">{availableConfigs.length}</p>
<p className="text-xs font-medium text-muted-foreground">Available Models</p>
<div className="flex flex-wrap gap-x-2 gap-y-0.5 text-[10px] text-muted-foreground">
<span>{globalConfigs.length} Global</span>
<span>{llmConfigs.length} Custom</span>
</div>
</div>
<div className="flex h-9 w-9 shrink-0 items-center justify-center rounded-lg bg-blue-500/10">
<Bot className="h-4 w-4 text-blue-600" />
</div>
</div>
</CardContent>
</Card>
<Card className="overflow-hidden">
<div className="h-1 bg-purple-500" />
<CardContent className="p-4">
<div className="flex items-start justify-between gap-2">
<div className="space-y-1 min-w-0">
<p className="text-2xl font-bold tracking-tight">{assignedConfigIds.length}</p>
<p className="text-xs font-medium text-muted-foreground">Assigned Roles</p>
</div>
<div className="flex h-9 w-9 shrink-0 items-center justify-center rounded-lg bg-purple-500/10">
<CheckCircle className="h-4 w-4 text-purple-600" />
</div>
</div>
</CardContent>
</Card>
<Card className="overflow-hidden">
<div className={`h-1 ${isAssignmentComplete ? "bg-green-500" : "bg-yellow-500"}`} />
<CardContent className="p-4">
<div className="flex items-start justify-between gap-2">
<div className="space-y-1 min-w-0">
<p className="text-2xl font-bold tracking-tight">
{Math.round((assignedConfigIds.length / 3) * 100)}%
</p>
<p className="text-xs font-medium text-muted-foreground">Completion</p>
</div>
<div
className={`flex h-9 w-9 shrink-0 items-center justify-center rounded-lg ${
isAssignmentComplete ? "bg-green-500/10" : "bg-yellow-500/10"
}`}
>
{isAssignmentComplete ? (
<CheckCircle className="h-4 w-4 text-green-600" />
) : (
<AlertCircle className="h-4 w-4 text-yellow-600" />
)}
</div>
</div>
</CardContent>
</Card>
<Card className="overflow-hidden">
<div className={`h-1 ${isAssignmentComplete ? "bg-emerald-500" : "bg-orange-500"}`} />
<CardContent className="p-4">
<div className="flex items-start justify-between gap-2">
<div className="space-y-1 min-w-0">
<p
className={`text-2xl font-bold tracking-tight ${
isAssignmentComplete ? "text-emerald-600" : "text-orange-600"
}`}
>
{isAssignmentComplete ? "Ready" : "Setup"}
</p>
<p className="text-xs font-medium text-muted-foreground">Status</p>
</div>
<div
className={`flex h-9 w-9 shrink-0 items-center justify-center rounded-lg ${
isAssignmentComplete ? "bg-emerald-500/10" : "bg-orange-500/10"
}`}
>
{isAssignmentComplete ? (
<CheckCircle className="h-4 w-4 text-emerald-600" />
) : (
<RefreshCw className="h-4 w-4 text-orange-600" />
)}
</div>
</div>
</CardContent>
</Card>
</div>
)}
{/* Info Alert */}
{!isLoading && !hasError && (
<div className="space-y-6">
@ -363,7 +236,7 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
<Alert variant="destructive">
<AlertCircle className="h-4 w-4" />
<AlertDescription>
No LLM configurations found. Please add at least one LLM provider in the Model
No LLM configurations found. Please add at least one LLM provider in the Agent
Configs tab before assigning roles.
</AlertDescription>
</Alert>
@ -459,12 +332,12 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
)}
{/* Custom Configurations */}
{llmConfigs.length > 0 && (
{newLLMConfigs.length > 0 && (
<>
<div className="px-2 py-1.5 text-xs font-semibold text-muted-foreground">
Your Configurations
</div>
{llmConfigs
{newLLMConfigs
.filter(
(config) => config.id && config.id.toString().trim() !== ""
)
@ -536,38 +409,6 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
</Button>
</div>
)}
{/* Status Indicator */}
{isAssignmentComplete && !hasChanges && (
<div className="flex justify-center pt-4">
<div className="flex items-center gap-2 px-4 py-2 bg-green-50 text-green-700 rounded-lg border border-green-200">
<CheckCircle className="w-4 h-4" />
<span className="text-sm font-medium">All roles assigned and saved!</span>
</div>
</div>
)}
{/* Progress Indicator */}
<div className="flex justify-center">
<div className="flex items-center gap-2 text-sm text-muted-foreground">
<span>Progress:</span>
<div className="flex gap-1">
{Object.keys(ROLE_DESCRIPTIONS).map((key) => (
<div
key={key}
className={`w-2 h-2 rounded-full ${
assignments[`${key}_llm_id` as keyof typeof assignments]
? "bg-primary"
: "bg-muted"
}`}
/>
))}
</div>
<span>
{assignedConfigIds.length} of {Object.keys(ROLE_DESCRIPTIONS).length} roles assigned
</span>
</div>
</div>
</div>
)}
</div>

File diff suppressed because it is too large Load diff

View file

@ -1,30 +1,14 @@
"use client";
import { useQuery } from "@tanstack/react-query";
import { useAtomValue } from "jotai";
import {
ChevronDown,
ChevronUp,
ExternalLink,
Info,
RotateCcw,
Save,
Sparkles,
User,
} from "lucide-react";
import { AlertTriangle, Info, RotateCcw, Save } from "lucide-react";
import { useEffect, useState } from "react";
import { toast } from "sonner";
import { communityPromptsAtom } from "@/atoms/search-spaces/search-space-query.atoms";
import { Alert, AlertDescription } from "@/components/ui/alert";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card";
import { Label } from "@/components/ui/label";
import { ScrollArea } from "@/components/ui/scroll-area";
import { Separator } from "@/components/ui/separator";
import { Skeleton } from "@/components/ui/skeleton";
import { Switch } from "@/components/ui/switch";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
import { Textarea } from "@/components/ui/textarea";
import { searchSpacesApiService } from "@/lib/apis/search-spaces-api.service";
import { authenticatedFetch } from "@/lib/auth-utils";
@ -44,20 +28,14 @@ export function PromptConfigManager({ searchSpaceId }: PromptConfigManagerProps)
queryFn: () => searchSpacesApiService.getSearchSpace({ id: searchSpaceId }),
enabled: !!searchSpaceId,
});
const { data: prompts = [], isPending: loadingPrompts } = useAtomValue(communityPromptsAtom);
const [enableCitations, setEnableCitations] = useState(true);
const [customInstructions, setCustomInstructions] = useState("");
const [saving, setSaving] = useState(false);
const [hasChanges, setHasChanges] = useState(false);
const [selectedPromptKey, setSelectedPromptKey] = useState<string | null>(null);
const [expandedPrompts, setExpandedPrompts] = useState<Set<string>>(new Set());
const [selectedCategory, setSelectedCategory] = useState("all");
// Initialize state from fetched search space
useEffect(() => {
if (searchSpace) {
setEnableCitations(searchSpace.citations_enabled);
setCustomInstructions(searchSpace.qna_custom_instructions || "");
setHasChanges(false);
}
@ -67,50 +45,39 @@ export function PromptConfigManager({ searchSpaceId }: PromptConfigManagerProps)
useEffect(() => {
if (searchSpace) {
const currentCustom = searchSpace.qna_custom_instructions || "";
const changed =
searchSpace.citations_enabled !== enableCitations || currentCustom !== customInstructions;
const changed = currentCustom !== customInstructions;
setHasChanges(changed);
}
}, [searchSpace, enableCitations, customInstructions]);
}, [searchSpace, customInstructions]);
const handleSave = async () => {
try {
setSaving(true);
// Prepare payload with simplified schema
const payload: any = {
citations_enabled: enableCitations,
const payload = {
qna_custom_instructions: customInstructions.trim() || "",
};
// Only send request if we have something to update
if (Object.keys(payload).length > 0) {
const response = await authenticatedFetch(
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/searchspaces/${searchSpaceId}`,
{
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(payload),
}
);
if (!response.ok) {
const errorData = await response.json().catch(() => ({}));
throw new Error(errorData.detail || "Failed to save prompt configuration");
const response = await authenticatedFetch(
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/searchspaces/${searchSpaceId}`,
{
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(payload),
}
);
toast.success("Prompt configuration saved successfully");
if (!response.ok) {
const errorData = await response.json().catch(() => ({}));
throw new Error(errorData.detail || "Failed to save system instructions");
}
toast.success("System instructions saved successfully");
setHasChanges(false);
// Refresh to get updated data
await fetchSearchSpace();
} catch (error: any) {
console.error("Error saving prompt configuration:", error);
toast.error(error.message || "Failed to save prompt configuration");
console.error("Error saving system instructions:", error);
toast.error(error.message || "Failed to save system instructions");
} finally {
setSaving(false);
}
@ -118,41 +85,11 @@ export function PromptConfigManager({ searchSpaceId }: PromptConfigManagerProps)
const handleReset = () => {
if (searchSpace) {
setEnableCitations(searchSpace.citations_enabled);
setCustomInstructions(searchSpace.qna_custom_instructions || "");
setSelectedPromptKey(null);
setHasChanges(false);
}
};
const handleSelectCommunityPrompt = (promptKey: string, promptValue: string) => {
setCustomInstructions(promptValue);
setSelectedPromptKey(promptKey);
toast.success("Community prompt applied");
};
const toggleExpand = (promptKey: string) => {
const newExpanded = new Set(expandedPrompts);
if (newExpanded.has(promptKey)) {
newExpanded.delete(promptKey);
} else {
newExpanded.add(promptKey);
}
setExpandedPrompts(newExpanded);
};
// Get unique categories
const categories = Array.from(new Set(prompts.map((p) => p.category || "general")));
const filteredPrompts =
selectedCategory === "all"
? prompts
: prompts.filter((p) => (p.category || "general") === selectedCategory);
const truncateText = (text: string, maxLength: number = 150) => {
if (text.length <= maxLength) return text;
return text.substring(0, maxLength) + "...";
};
if (loading) {
return (
<div className="space-y-6">
@ -172,225 +109,47 @@ export function PromptConfigManager({ searchSpaceId }: PromptConfigManagerProps)
return (
<div className="space-y-6">
<Alert>
<Info className="h-4 w-4" />
<AlertDescription>
Configure how the AI responds to your queries. Citations add source references, and the
system instructions personalize the response style.
{/* Work in Progress Notice */}
<Alert
variant="default"
className="bg-amber-50 dark:bg-amber-950/30 border-amber-300 dark:border-amber-700"
>
<AlertTriangle className="h-4 w-4 text-amber-600 dark:text-amber-500" />
<AlertDescription className="text-amber-800 dark:text-amber-300">
<span className="font-semibold">Work in Progress:</span> This functionality is currently
under development and not yet connected to the backend. Your instructions will be saved
but won't affect AI behavior until the feature is fully implemented.
</AlertDescription>
</Alert>
{/* Citations Card */}
<Alert>
<Info className="h-4 w-4" />
<AlertDescription>
System instructions apply to all AI interactions in this search space. They guide how the
AI responds, its tone, focus areas, and behavior patterns.
</AlertDescription>
</Alert>
{/* System Instructions Card */}
<Card>
<CardHeader>
<CardTitle>Citation Configuration</CardTitle>
<CardTitle>Custom System Instructions</CardTitle>
<CardDescription>
Control whether AI responses include citations to source documents
Provide specific guidelines for how you want the AI to respond. These instructions will
be applied to all answers in this search space.
</CardDescription>
</CardHeader>
<CardContent className="space-y-4">
<div className="flex items-center justify-between space-x-4 p-4 rounded-lg border bg-card">
<div className="flex-1 space-y-1">
<Label htmlFor="enable-citations-settings" className="text-base font-medium">
Enable Citations
</Label>
<p className="text-sm text-muted-foreground">
When enabled, AI responses will include citations in [citation:id] format linking to
source documents.
</p>
</div>
<Switch
id="enable-citations-settings"
checked={enableCitations}
onCheckedChange={setEnableCitations}
/>
</div>
{!enableCitations && (
<Alert
variant="default"
className="bg-yellow-50 dark:bg-yellow-950/20 border-yellow-200 dark:border-yellow-800"
>
<Info className="h-4 w-4 text-yellow-600 dark:text-yellow-500" />
<AlertDescription className="text-yellow-800 dark:text-yellow-300">
Citations are currently disabled. AI responses will not include source references.
You can re-enable this anytime.
</AlertDescription>
</Alert>
)}
{enableCitations && (
<Alert>
<Info className="h-4 w-4" />
<AlertDescription>
Citations are enabled. When answering questions, the AI will reference source
documents using the [citation:id] format.
</AlertDescription>
</Alert>
)}
</CardContent>
</Card>
{/* SearchSpace System Instructions Card */}
<Card>
<CardHeader>
<CardTitle>SearchSpace System Instructions</CardTitle>
<CardDescription>
Add system instructions to guide the AI's response style and behavior
</CardDescription>
</CardHeader>
<CardContent className="space-y-4">
{/* Community Prompts Section */}
{!loadingPrompts && prompts.length > 0 && (
<div className="space-y-2">
<Label className="text-base font-medium flex items-center gap-2">
<Sparkles className="h-4 w-4" />
Community Prompts Library
</Label>
<p className="text-sm text-muted-foreground">
Browse {prompts.length} curated prompts from the community
</p>
<Card className="border-dashed">
<CardContent className="pt-4">
<Tabs
value={selectedCategory}
onValueChange={setSelectedCategory}
className="w-full"
>
<TabsList className="grid w-full grid-cols-5 mb-4">
<TabsTrigger value="all" className="text-xs">
All ({prompts.length})
</TabsTrigger>
{categories.map((category) => (
<TabsTrigger key={category} value={category} className="text-xs capitalize">
{category} (
{prompts.filter((p) => (p.category || "general") === category).length})
</TabsTrigger>
))}
</TabsList>
<ScrollArea className="h-[350px] pr-4">
<div className="space-y-3">
{filteredPrompts.map((prompt) => {
const isExpanded = expandedPrompts.has(prompt.key);
const isSelected = selectedPromptKey === prompt.key;
const displayText = isExpanded
? prompt.value
: truncateText(prompt.value, 120);
return (
<div
key={prompt.key}
className={`p-4 rounded-lg border transition-all ${
isSelected
? "border-primary bg-accent/50"
: "border-border hover:border-primary/50 hover:bg-accent/30"
}`}
>
<div className="flex items-start justify-between gap-2 mb-2">
<div className="flex items-center gap-2 flex-wrap flex-1">
<Badge variant="outline" className="text-xs font-medium">
{prompt.key.replace(/_/g, " ")}
</Badge>
{prompt.category && (
<Badge variant="secondary" className="text-xs capitalize">
{prompt.category}
</Badge>
)}
{isSelected && (
<Badge variant="default" className="text-xs">
Selected
</Badge>
)}
</div>
{prompt.link && (
<a
href={prompt.link}
target="_blank"
rel="noopener noreferrer"
className="text-muted-foreground hover:text-primary shrink-0"
title="View source"
>
<ExternalLink className="h-4 w-4" />
</a>
)}
</div>
<p className="text-sm text-foreground mb-3 whitespace-pre-wrap">
{displayText}
</p>
<div className="flex items-center justify-between gap-2">
<div className="flex items-center gap-2 text-xs text-muted-foreground">
<User className="h-3 w-3" />
<span>{prompt.author}</span>
</div>
<div className="flex items-center gap-2">
{prompt.value.length > 120 && (
<Button
type="button"
variant="ghost"
size="sm"
onClick={() => toggleExpand(prompt.key)}
className="h-7 text-xs"
>
{isExpanded ? (
<>
<ChevronUp className="h-3 w-3 mr-1" />
Show less
</>
) : (
<>
<ChevronDown className="h-3 w-3 mr-1" />
Read more
</>
)}
</Button>
)}
<Button
type="button"
variant={isSelected ? "default" : "secondary"}
size="sm"
onClick={() =>
handleSelectCommunityPrompt(prompt.key, prompt.value)
}
className="h-7 text-xs"
>
{isSelected ? "Applied" : "Use This"}
</Button>
</div>
</div>
</div>
);
})}
</div>
</ScrollArea>
</Tabs>
</CardContent>
</Card>
</div>
)}
<Separator />
<div className="space-y-2">
<Label htmlFor="custom-instructions-settings" className="text-base font-medium">
Your System Instructions
Your Instructions
</Label>
<p className="text-sm text-muted-foreground">
Provide specific guidelines for how you want the AI to respond. These instructions
will be applied to all answers.
</p>
<Textarea
id="custom-instructions-settings"
placeholder="E.g., Always provide practical examples, be concise, focus on technical details, use simple language..."
placeholder="E.g., Always provide practical examples, be concise, focus on technical details, use simple language, respond in a specific format..."
value={customInstructions}
onChange={(e) => {
setCustomInstructions(e.target.value);
setSelectedPromptKey(null);
}}
rows={8}
onChange={(e) => setCustomInstructions(e.target.value)}
rows={12}
className="resize-none font-mono text-sm"
/>
<div className="flex items-center justify-between">
@ -401,10 +160,7 @@ export function PromptConfigManager({ searchSpaceId }: PromptConfigManagerProps)
<Button
variant="ghost"
size="sm"
onClick={() => {
setCustomInstructions("");
setSelectedPromptKey(null);
}}
onClick={() => setCustomInstructions("")}
className="h-auto py-1 px-2 text-xs"
>
Clear
@ -441,7 +197,7 @@ export function PromptConfigManager({ searchSpaceId }: PromptConfigManagerProps)
className="flex items-center gap-2"
>
<Save className="h-4 w-4" />
{saving ? "Saving..." : "Save Configuration"}
{saving ? "Saving..." : "Save Instructions"}
</Button>
</div>
@ -452,7 +208,7 @@ export function PromptConfigManager({ searchSpaceId }: PromptConfigManagerProps)
>
<Info className="h-4 w-4 text-blue-600 dark:text-blue-500" />
<AlertDescription className="text-blue-800 dark:text-blue-300">
You have unsaved changes. Click "Save Configuration" to apply them.
You have unsaved changes. Click "Save Instructions" to apply them.
</AlertDescription>
</Alert>
)}

View file

@ -0,0 +1,566 @@
"use client";
import { zodResolver } from "@hookform/resolvers/zod";
import { useAtomValue } from "jotai";
import {
Bot,
Check,
ChevronsUpDown,
Key,
Loader2,
MessageSquareQuote,
Rocket,
Sparkles,
} from "lucide-react";
import { AnimatePresence, motion } from "motion/react";
import { useEffect, useState } from "react";
import { useForm } from "react-hook-form";
import { z } from "zod";
import { defaultSystemInstructionsAtom } from "@/atoms/new-llm-config/new-llm-config-query.atoms";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
import {
Command,
CommandEmpty,
CommandGroup,
CommandInput,
CommandItem,
CommandList,
} from "@/components/ui/command";
import {
Form,
FormControl,
FormDescription,
FormField,
FormItem,
FormLabel,
FormMessage,
} from "@/components/ui/form";
import { Input } from "@/components/ui/input";
import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
import { Separator } from "@/components/ui/separator";
import { Switch } from "@/components/ui/switch";
import { Textarea } from "@/components/ui/textarea";
import { getModelsByProvider } from "@/contracts/enums/llm-models";
import { LLM_PROVIDERS } from "@/contracts/enums/llm-providers";
import type { CreateNewLLMConfigRequest } from "@/contracts/types/new-llm-config.types";
import { cn } from "@/lib/utils";
import InferenceParamsEditor from "../inference-params-editor";
// Form schema with zod
const formSchema = z.object({
name: z.string().min(1, "Name is required").max(100),
description: z.string().max(500).optional().nullable(),
provider: z.string().min(1, "Provider is required"),
custom_provider: z.string().max(100).optional().nullable(),
model_name: z.string().min(1, "Model name is required").max(100),
api_key: z.string().min(1, "API key is required"),
api_base: z.string().max(500).optional().nullable(),
litellm_params: z.record(z.string(), z.any()).optional().nullable(),
system_instructions: z.string().optional().default(""),
use_default_system_instructions: z.boolean().default(true),
citations_enabled: z.boolean().default(true),
search_space_id: z.number(),
});
type FormValues = z.infer<typeof formSchema>;
export interface LLMConfigFormData extends CreateNewLLMConfigRequest {}
interface LLMConfigFormProps {
initialData?: Partial<LLMConfigFormData>;
searchSpaceId: number;
onSubmit: (data: LLMConfigFormData) => Promise<void>;
onCancel?: () => void;
isSubmitting?: boolean;
mode?: "create" | "edit";
submitLabel?: string;
showAdvanced?: boolean;
compact?: boolean;
}
export function LLMConfigForm({
initialData,
searchSpaceId,
onSubmit,
onCancel,
isSubmitting = false,
mode = "create",
submitLabel,
showAdvanced = true,
compact = false,
}: LLMConfigFormProps) {
const { data: defaultInstructions, isSuccess: defaultInstructionsLoaded } = useAtomValue(
defaultSystemInstructionsAtom
);
const [modelComboboxOpen, setModelComboboxOpen] = useState(false);
const form = useForm<FormValues>({
resolver: zodResolver(formSchema),
defaultValues: {
name: initialData?.name ?? "",
description: initialData?.description ?? "",
provider: initialData?.provider ?? "",
custom_provider: initialData?.custom_provider ?? "",
model_name: initialData?.model_name ?? "",
api_key: initialData?.api_key ?? "",
api_base: initialData?.api_base ?? "",
litellm_params: initialData?.litellm_params ?? {},
system_instructions: initialData?.system_instructions ?? "",
use_default_system_instructions: initialData?.use_default_system_instructions ?? true,
citations_enabled: initialData?.citations_enabled ?? true,
search_space_id: searchSpaceId,
},
});
// Load default instructions when available (only for new configs)
useEffect(() => {
if (
mode === "create" &&
defaultInstructionsLoaded &&
defaultInstructions?.default_system_instructions &&
!form.getValues("system_instructions")
) {
form.setValue("system_instructions", defaultInstructions.default_system_instructions);
}
}, [defaultInstructionsLoaded, defaultInstructions, mode, form]);
const watchProvider = form.watch("provider");
const selectedProvider = LLM_PROVIDERS.find((p) => p.value === watchProvider);
const availableModels = watchProvider ? getModelsByProvider(watchProvider) : [];
const handleProviderChange = (value: string) => {
form.setValue("provider", value);
form.setValue("model_name", "");
// Auto-fill API base for certain providers
const provider = LLM_PROVIDERS.find((p) => p.value === value);
if (provider?.apiBase) {
form.setValue("api_base", provider.apiBase);
}
};
const handleFormSubmit = async (values: FormValues) => {
await onSubmit(values as LLMConfigFormData);
};
return (
<Form {...form}>
<form onSubmit={form.handleSubmit(handleFormSubmit)} className="space-y-6">
{/* System Instructions & Citations Section */}
<div className="space-y-4">
<div className="flex items-center gap-2 text-sm font-medium text-muted-foreground">
<MessageSquareQuote className="h-4 w-4" />
System Instructions
</div>
{/* System Instructions */}
<FormField
control={form.control}
name="system_instructions"
render={({ field }) => (
<FormItem>
<div className="flex items-center justify-between">
<FormLabel>Instructions for the AI</FormLabel>
{defaultInstructions && (
<Button
type="button"
variant="ghost"
size="sm"
onClick={() =>
field.onChange(defaultInstructions.default_system_instructions)
}
className="h-7 text-xs text-muted-foreground hover:text-foreground"
>
Reset to Default
</Button>
)}
</div>
<FormControl>
<Textarea
placeholder="Enter system instructions for the AI..."
rows={6}
className="font-mono text-xs resize-none"
{...field}
/>
</FormControl>
<FormDescription className="text-xs">
Use {"{resolved_today}"} to include today's date dynamically
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
{/* Citations Toggle */}
<FormField
control={form.control}
name="citations_enabled"
render={({ field }) => (
<FormItem className="flex items-center justify-between rounded-lg border p-3 bg-muted/30">
<div className="space-y-0.5">
<FormLabel className="text-sm font-medium">Enable Citations</FormLabel>
<FormDescription className="text-xs">
Include [citation:id] references to source documents
</FormDescription>
</div>
<FormControl>
<Switch checked={field.value} onCheckedChange={field.onChange} />
</FormControl>
</FormItem>
)}
/>
</div>
<Separator />
{/* Model Configuration Section */}
<div className="space-y-4">
<div className="flex items-center gap-2 text-sm font-medium text-muted-foreground">
<Bot className="h-4 w-4" />
Model Configuration
</div>
{/* Name & Description */}
<div className="grid gap-4 sm:grid-cols-2">
<FormField
control={form.control}
name="name"
render={({ field }) => (
<FormItem>
<FormLabel className="flex items-center gap-2">
<Sparkles className="h-3.5 w-3.5 text-violet-500" />
Configuration Name
</FormLabel>
<FormControl>
<Input
placeholder="e.g., My GPT-4 Agent"
className="transition-all focus-visible:ring-violet-500/50"
{...field}
/>
</FormControl>
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="description"
render={({ field }) => (
<FormItem>
<FormLabel className="text-muted-foreground">
Description
<Badge variant="outline" className="ml-2 text-[10px]">
Optional
</Badge>
</FormLabel>
<FormControl>
<Input placeholder="Brief description" {...field} value={field.value ?? ""} />
</FormControl>
<FormMessage />
</FormItem>
)}
/>
</div>
{/* Provider Selection */}
<FormField
control={form.control}
name="provider"
render={({ field }) => (
<FormItem>
<FormLabel>LLM Provider</FormLabel>
<Select value={field.value} onValueChange={handleProviderChange}>
<FormControl>
<SelectTrigger className="transition-all focus:ring-violet-500/50">
<SelectValue placeholder="Select a provider" />
</SelectTrigger>
</FormControl>
<SelectContent className="max-h-[300px]">
{LLM_PROVIDERS.map((provider) => (
<SelectItem key={provider.value} value={provider.value}>
<div className="flex flex-col py-0.5">
<span className="font-medium">{provider.label}</span>
<span className="text-xs text-muted-foreground">
{provider.description}
</span>
</div>
</SelectItem>
))}
</SelectContent>
</Select>
<FormMessage />
</FormItem>
)}
/>
{/* Custom Provider (conditional) */}
<AnimatePresence>
{watchProvider === "CUSTOM" && (
<motion.div
initial={{ opacity: 0, height: 0 }}
animate={{ opacity: 1, height: "auto" }}
exit={{ opacity: 0, height: 0 }}
>
<FormField
control={form.control}
name="custom_provider"
render={({ field }) => (
<FormItem>
<FormLabel>Custom Provider Name</FormLabel>
<FormControl>
<Input
placeholder="my-custom-provider"
{...field}
value={field.value ?? ""}
/>
</FormControl>
<FormMessage />
</FormItem>
)}
/>
</motion.div>
)}
</AnimatePresence>
{/* Model Name with Combobox */}
<FormField
control={form.control}
name="model_name"
render={({ field }) => (
<FormItem className="flex flex-col">
<FormLabel>Model Name</FormLabel>
<Popover open={modelComboboxOpen} onOpenChange={setModelComboboxOpen}>
<PopoverTrigger asChild>
<FormControl>
<Button
variant="outline"
role="combobox"
aria-expanded={modelComboboxOpen}
className={cn(
"w-full justify-between font-normal",
!field.value && "text-muted-foreground"
)}
>
{field.value || "Select or type model name"}
<ChevronsUpDown className="ml-2 h-4 w-4 shrink-0 opacity-50" />
</Button>
</FormControl>
</PopoverTrigger>
<PopoverContent className="w-full p-0" align="start">
<Command shouldFilter={false}>
<CommandInput
placeholder={selectedProvider?.example || "Type model name..."}
value={field.value}
onValueChange={field.onChange}
/>
<CommandList>
<CommandEmpty>
<div className="py-3 text-center text-sm text-muted-foreground">
{field.value ? `Using: "${field.value}"` : "Type your model name"}
</div>
</CommandEmpty>
{availableModels.length > 0 && (
<CommandGroup heading="Suggested Models">
{availableModels
.filter(
(model) =>
!field.value ||
model.value.toLowerCase().includes(field.value.toLowerCase())
)
.slice(0, 8)
.map((model) => (
<CommandItem
key={model.value}
value={model.value}
onSelect={(value) => {
field.onChange(value);
setModelComboboxOpen(false);
}}
className="py-2"
>
<Check
className={cn(
"mr-2 h-4 w-4",
field.value === model.value ? "opacity-100" : "opacity-0"
)}
/>
<div>
<div className="font-medium">{model.label}</div>
{model.contextWindow && (
<div className="text-xs text-muted-foreground">
Context: {model.contextWindow}
</div>
)}
</div>
</CommandItem>
))}
</CommandGroup>
)}
</CommandList>
</Command>
</PopoverContent>
</Popover>
{selectedProvider?.example && (
<FormDescription className="text-xs">
Example: {selectedProvider.example}
</FormDescription>
)}
<FormMessage />
</FormItem>
)}
/>
{/* API Credentials */}
<div className="grid gap-4 sm:grid-cols-2">
<FormField
control={form.control}
name="api_key"
render={({ field }) => (
<FormItem>
<FormLabel className="flex items-center gap-2">
<Key className="h-3.5 w-3.5 text-amber-500" />
API Key
</FormLabel>
<FormControl>
<Input
type="password"
placeholder={watchProvider === "OLLAMA" ? "Any value" : "sk-..."}
{...field}
/>
</FormControl>
{watchProvider === "OLLAMA" && (
<FormDescription className="text-xs">
Ollama doesn't require auth enter any value
</FormDescription>
)}
<FormMessage />
</FormItem>
)}
/>
<FormField
control={form.control}
name="api_base"
render={({ field }) => (
<FormItem>
<FormLabel className="flex items-center gap-2">
API Base URL
{selectedProvider?.apiBase && (
<Badge variant="secondary" className="text-[10px]">
Auto-filled
</Badge>
)}
</FormLabel>
<FormControl>
<Input
placeholder={selectedProvider?.apiBase || "https://api.example.com/v1"}
{...field}
value={field.value ?? ""}
/>
</FormControl>
<FormMessage />
</FormItem>
)}
/>
</div>
{/* Ollama Quick Actions */}
<AnimatePresence>
{watchProvider === "OLLAMA" && (
<motion.div
initial={{ opacity: 0, height: 0 }}
animate={{ opacity: 1, height: "auto" }}
exit={{ opacity: 0, height: 0 }}
className="flex flex-wrap gap-2"
>
<Button
type="button"
variant="outline"
size="sm"
className="h-7 text-xs"
onClick={() => form.setValue("api_base", "http://localhost:11434")}
>
localhost:11434
</Button>
<Button
type="button"
variant="outline"
size="sm"
className="h-7 text-xs"
onClick={() => form.setValue("api_base", "http://host.docker.internal:11434")}
>
Docker
</Button>
</motion.div>
)}
</AnimatePresence>
</div>
{/* Advanced Parameters */}
{showAdvanced && (
<>
<Separator />
<div className="space-y-4">
<div className="flex items-center gap-2 text-sm font-medium text-muted-foreground">
<Sparkles className="h-4 w-4" />
Advanced Parameters
</div>
<FormField
control={form.control}
name="litellm_params"
render={({ field }) => (
<FormItem>
<FormControl>
<InferenceParamsEditor
params={field.value || {}}
setParams={field.onChange}
/>
</FormControl>
<FormMessage />
</FormItem>
)}
/>
</div>
</>
)}
{/* Action Buttons */}
<div
className={cn(
"flex gap-3 pt-4",
compact ? "justify-end" : "justify-center sm:justify-end"
)}
>
{onCancel && (
<Button type="button" variant="outline" onClick={onCancel} disabled={isSubmitting}>
Cancel
</Button>
)}
<Button type="submit" disabled={isSubmitting} className="gap-2 min-w-[160px]">
{isSubmitting ? (
<>
<Loader2 className="h-4 w-4 animate-spin" />
{mode === "edit" ? "Updating..." : "Creating..."}
</>
) : (
<>
{!compact && <Rocket className="h-4 w-4" />}
{submitLabel ?? (mode === "edit" ? "Update Configuration" : "Create Configuration")}
</>
)}
</Button>
</div>
</form>
</Form>
);
}

View file

@ -221,7 +221,7 @@ export function AllChatsSidebar({ open, onOpenChange, searchSpaceId }: AllChatsS
</div>
)}
<ScrollArea className="flex-1">
<ScrollArea className="flex-1 min-h-0 overflow-hidden">
<div className="p-2">
{isLoading ? (
<div className="flex items-center justify-center py-8">

View file

@ -190,7 +190,7 @@ export function AllNotesSidebar({
</div>
</SheetHeader>
<ScrollArea className="flex-1">
<ScrollArea className="flex-1 min-h-0 overflow-hidden">
<div className="p-2">
{isLoading ? (
<div className="flex items-center justify-center py-8">

View file

@ -41,19 +41,13 @@ export function PageUsageDisplay({ pagesUsed, pagesLimit }: PageUsageDisplayProp
<span className="font-medium">{usagePercentage.toFixed(0)}%</span>
</div>
<Progress value={usagePercentage} className="h-2" />
<div className="flex items-start gap-2 pt-1">
<Mail className="h-3 w-3 text-muted-foreground mt-0.5 flex-shrink-0" />
<p className="text-[10px] text-muted-foreground leading-tight">
Contact{" "}
<a
href="mailto:rohan@surfsense.com"
className="text-primary hover:underline font-medium"
>
rohan@surfsense.com
</a>{" "}
to increase limits
</p>
</div>
<a
href="mailto:rohan@surfsense.com?subject=Request%20to%20Increase%20Page%20Limits"
className="flex items-center gap-1.5 text-[10px] text-muted-foreground hover:text-primary transition-colors pt-1"
>
<Mail className="h-3 w-3 flex-shrink-0" />
<span>Contact to increase limits</span>
</a>
</>
)}
</div>

View file

@ -1,13 +1,5 @@
"use client";
import { Card, CardContent } from "@/components/ui/card";
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { cn } from "@/lib/utils";
import {
AlertCircleIcon,
BookOpenIcon,
@ -18,6 +10,9 @@ import {
} from "lucide-react";
import { Component, type ReactNode, useCallback } from "react";
import { z } from "zod";
import { Card, CardContent } from "@/components/ui/card";
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip";
import { cn } from "@/lib/utils";
/**
* Zod schema for serializable article data (from backend)
@ -92,7 +87,7 @@ export interface ArticleProps {
*/
export function parseSerializableArticle(data: unknown): ArticleProps {
const result = SerializableArticleSchema.safeParse(data);
if (!result.success) {
console.warn("Invalid article data:", result.error.issues);
// Return fallback with basic info
@ -103,7 +98,7 @@ export function parseSerializableArticle(data: unknown): ArticleProps {
error: "Failed to parse article data",
};
}
const parsed = result.data;
return {
id: parsed.id,
@ -162,10 +157,7 @@ export function Article({
return (
<Card
id={id}
className={cn(
"overflow-hidden border-destructive/20 bg-destructive/5",
className
)}
className={cn("overflow-hidden border-destructive/20 bg-destructive/5", className)}
style={{ maxWidth }}
>
<CardContent className="p-4">
@ -174,14 +166,8 @@ export function Article({
<AlertCircleIcon className="size-5 text-destructive" />
</div>
<div className="flex-1 min-w-0">
<p className="font-medium text-destructive text-sm">
Failed to scrape webpage
</p>
{href && (
<p className="text-muted-foreground text-xs mt-0.5 truncate">
{href}
</p>
)}
<p className="font-medium text-destructive text-sm">Failed to scrape webpage</p>
{href && <p className="text-muted-foreground text-xs mt-0.5 truncate">{href}</p>}
<p className="text-muted-foreground text-xs mt-1">{error}</p>
</div>
</div>
@ -228,9 +214,7 @@ export function Article({
{/* Description */}
{description && (
<p className="text-muted-foreground text-xs mt-1 line-clamp-2">
{description}
</p>
<p className="text-muted-foreground text-xs mt-1 line-clamp-2">{description}</p>
)}
{/* Metadata row */}
@ -276,9 +260,7 @@ export function Article({
<span className="flex items-center gap-1">
<FileTextIcon className="size-3" />
<span>{formatWordCount(wordCount)}</span>
{wasTruncated && (
<span className="text-warning">(truncated)</span>
)}
{wasTruncated && <span className="text-warning">(truncated)</span>}
</span>
</TooltipTrigger>
<TooltipContent>
@ -333,9 +315,7 @@ export function Article({
/**
* Loading state for article component
*/
export function ArticleLoading({
title = "Loading article...",
}: { title?: string }) {
export function ArticleLoading({ title = "Loading article..." }: { title?: string }) {
return (
<Card className="overflow-hidden animate-pulse">
<CardContent className="p-4">
@ -388,10 +368,7 @@ interface ErrorBoundaryState {
/**
* Error boundary for article component
*/
export class ArticleErrorBoundary extends Component<
ErrorBoundaryProps,
ErrorBoundaryState
> {
export class ArticleErrorBoundary extends Component<ErrorBoundaryProps, ErrorBoundaryState> {
constructor(props: ErrorBoundaryProps) {
super(props);
this.state = { hasError: false };
@ -409,9 +386,7 @@ export class ArticleErrorBoundary extends Component<
<CardContent className="p-4">
<div className="flex items-center gap-3">
<AlertCircleIcon className="size-5 text-destructive" />
<p className="text-sm text-destructive">
Failed to render article
</p>
<p className="text-sm text-destructive">Failed to render article</p>
</div>
</CardContent>
</Card>
@ -422,4 +397,3 @@ export class ArticleErrorBoundary extends Component<
return this.props.children;
}
}

View file

@ -2,14 +2,14 @@
import { makeAssistantToolUI } from "@assistant-ui/react";
import { Brain, CheckCircle2, Loader2, Search, Sparkles } from "lucide-react";
import { useMemo, useState, useEffect, useRef } from "react";
import { useEffect, useMemo, useRef, useState } from "react";
import { z } from "zod";
import {
ChainOfThought,
ChainOfThoughtContent,
ChainOfThoughtItem,
ChainOfThoughtStep,
ChainOfThoughtTrigger,
ChainOfThought,
ChainOfThoughtContent,
ChainOfThoughtItem,
ChainOfThoughtStep,
ChainOfThoughtTrigger,
} from "@/components/prompt-kit/chain-of-thought";
import { cn } from "@/lib/utils";
@ -17,21 +17,21 @@ import { cn } from "@/lib/utils";
* Zod schemas for runtime validation
*/
const ThinkingStepSchema = z.object({
id: z.string(),
title: z.string(),
items: z.array(z.string()).default([]),
status: z.enum(["pending", "in_progress", "completed"]).default("pending"),
id: z.string(),
title: z.string(),
items: z.array(z.string()).default([]),
status: z.enum(["pending", "in_progress", "completed"]).default("pending"),
});
const DeepAgentThinkingArgsSchema = z.object({
query: z.string().optional(),
context: z.string().optional(),
query: z.string().optional(),
context: z.string().optional(),
});
const DeepAgentThinkingResultSchema = z.object({
steps: z.array(ThinkingStepSchema).optional(),
status: z.enum(["thinking", "searching", "synthesizing", "completed"]).optional(),
summary: z.string().optional(),
steps: z.array(ThinkingStepSchema).optional(),
status: z.enum(["thinking", "searching", "synthesizing", "completed"]).optional(),
summary: z.string().optional(),
});
/**
@ -45,200 +45,198 @@ type DeepAgentThinkingResult = z.infer<typeof DeepAgentThinkingResultSchema>;
* Parse and validate a single thinking step
*/
export function parseThinkingStep(data: unknown): ThinkingStep {
const result = ThinkingStepSchema.safeParse(data);
if (!result.success) {
console.warn("Invalid thinking step data:", result.error.issues);
// Return a fallback step
return {
id: "unknown",
title: "Processing...",
items: [],
status: "pending",
};
}
return result.data;
const result = ThinkingStepSchema.safeParse(data);
if (!result.success) {
console.warn("Invalid thinking step data:", result.error.issues);
// Return a fallback step
return {
id: "unknown",
title: "Processing...",
items: [],
status: "pending",
};
}
return result.data;
}
/**
* Parse and validate thinking result
*/
export function parseThinkingResult(data: unknown): DeepAgentThinkingResult {
const result = DeepAgentThinkingResultSchema.safeParse(data);
if (!result.success) {
console.warn("Invalid thinking result data:", result.error.issues);
return {};
}
return result.data;
const result = DeepAgentThinkingResultSchema.safeParse(data);
if (!result.success) {
console.warn("Invalid thinking result data:", result.error.issues);
return {};
}
return result.data;
}
/**
* Get icon based on step status and type
*/
function getStepIcon(status: "pending" | "in_progress" | "completed", title: string) {
// Check for specific step types based on title keywords
const titleLower = title.toLowerCase();
if (status === "in_progress") {
return <Loader2 className="size-4 animate-spin text-primary" />;
}
if (status === "completed") {
return <CheckCircle2 className="size-4 text-emerald-500" />;
}
// Default icons based on step type
if (titleLower.includes("search") || titleLower.includes("knowledge")) {
return <Search className="size-4 text-muted-foreground" />;
}
if (titleLower.includes("analy") || titleLower.includes("understand")) {
return <Brain className="size-4 text-muted-foreground" />;
}
return <Sparkles className="size-4 text-muted-foreground" />;
// Check for specific step types based on title keywords
const titleLower = title.toLowerCase();
if (status === "in_progress") {
return <Loader2 className="size-4 animate-spin text-primary" />;
}
if (status === "completed") {
return <CheckCircle2 className="size-4 text-emerald-500" />;
}
// Default icons based on step type
if (titleLower.includes("search") || titleLower.includes("knowledge")) {
return <Search className="size-4 text-muted-foreground" />;
}
if (titleLower.includes("analy") || titleLower.includes("understand")) {
return <Brain className="size-4 text-muted-foreground" />;
}
return <Sparkles className="size-4 text-muted-foreground" />;
}
/**
* Component to display a single thinking step with controlled open state
*/
function ThinkingStepDisplay({
step,
isOpen,
onToggle
}: {
step: ThinkingStep;
isOpen: boolean;
onToggle: () => void;
function ThinkingStepDisplay({
step,
isOpen,
onToggle,
}: {
step: ThinkingStep;
isOpen: boolean;
onToggle: () => void;
}) {
const icon = useMemo(() => getStepIcon(step.status, step.title), [step.status, step.title]);
return (
<ChainOfThoughtStep open={isOpen} onOpenChange={onToggle}>
<ChainOfThoughtTrigger
leftIcon={icon}
swapIconOnHover={step.status !== "in_progress"}
className={cn(
step.status === "in_progress" && "text-foreground font-medium",
step.status === "completed" && "text-muted-foreground"
)}
>
{step.title}
</ChainOfThoughtTrigger>
<ChainOfThoughtContent>
{step.items.map((item, index) => (
<ChainOfThoughtItem key={`${step.id}-item-${index}`}>
{item}
</ChainOfThoughtItem>
))}
</ChainOfThoughtContent>
</ChainOfThoughtStep>
);
const icon = useMemo(() => getStepIcon(step.status, step.title), [step.status, step.title]);
return (
<ChainOfThoughtStep open={isOpen} onOpenChange={onToggle}>
<ChainOfThoughtTrigger
leftIcon={icon}
swapIconOnHover={step.status !== "in_progress"}
className={cn(
step.status === "in_progress" && "text-foreground font-medium",
step.status === "completed" && "text-muted-foreground"
)}
>
{step.title}
</ChainOfThoughtTrigger>
<ChainOfThoughtContent>
{step.items.map((item, index) => (
<ChainOfThoughtItem key={`${step.id}-item-${index}`}>{item}</ChainOfThoughtItem>
))}
</ChainOfThoughtContent>
</ChainOfThoughtStep>
);
}
/**
* Loading state with animated thinking indicator
*/
function ThinkingLoadingState({ status }: { status?: string }) {
const statusText = useMemo(() => {
switch (status) {
case "searching":
return "Searching knowledge base...";
case "synthesizing":
return "Synthesizing response...";
case "thinking":
default:
return "Thinking...";
}
}, [status]);
return (
<div className="my-3 flex items-center gap-2 rounded-lg border border-border/50 bg-muted/30 px-4 py-3">
<div className="relative">
<Brain className="size-5 text-primary" />
<span className="absolute -right-0.5 -top-0.5 flex size-2">
<span className="absolute inline-flex size-full animate-ping rounded-full bg-primary/60" />
<span className="relative inline-flex size-2 rounded-full bg-primary" />
</span>
</div>
<span className="text-sm text-muted-foreground">{statusText}</span>
</div>
);
const statusText = useMemo(() => {
switch (status) {
case "searching":
return "Searching knowledge base...";
case "synthesizing":
return "Synthesizing response...";
case "thinking":
default:
return "Thinking...";
}
}, [status]);
return (
<div className="my-3 flex items-center gap-2 rounded-lg border border-border/50 bg-muted/30 px-4 py-3">
<div className="relative">
<Brain className="size-5 text-primary" />
<span className="absolute -right-0.5 -top-0.5 flex size-2">
<span className="absolute inline-flex size-full animate-ping rounded-full bg-primary/60" />
<span className="relative inline-flex size-2 rounded-full bg-primary" />
</span>
</div>
<span className="text-sm text-muted-foreground">{statusText}</span>
</div>
);
}
/**
* Smart chain of thought renderer with state management
*/
function SmartChainOfThought({ steps }: { steps: ThinkingStep[] }) {
// Track which steps the user has manually toggled
const [manualOverrides, setManualOverrides] = useState<Record<string, boolean>>({});
// Track previous step statuses to detect changes
const prevStatusesRef = useRef<Record<string, string>>({});
// Check if any step is currently in progress
const hasInProgressStep = steps.some(step => step.status === "in_progress");
// Find the last completed step index
const lastCompletedIndex = steps
.map((s, i) => s.status === "completed" ? i : -1)
.filter(i => i !== -1)
.pop();
// Clear manual overrides when a step's status changes
useEffect(() => {
const currentStatuses: Record<string, string> = {};
steps.forEach(step => {
currentStatuses[step.id] = step.status;
// If status changed, clear any manual override for this step
if (prevStatusesRef.current[step.id] && prevStatusesRef.current[step.id] !== step.status) {
setManualOverrides(prev => {
const next = { ...prev };
delete next[step.id];
return next;
});
}
});
prevStatusesRef.current = currentStatuses;
}, [steps]);
const getStepOpenState = (step: ThinkingStep, index: number): boolean => {
// If user has manually toggled, respect that
if (manualOverrides[step.id] !== undefined) {
return manualOverrides[step.id];
}
// Auto behavior: open if in progress
if (step.status === "in_progress") {
return true;
}
// Auto behavior: keep last completed step open if no in-progress step
if (!hasInProgressStep && index === lastCompletedIndex) {
return true;
}
// Default: collapsed
return false;
};
const handleToggle = (stepId: string, currentOpen: boolean) => {
setManualOverrides(prev => ({
...prev,
[stepId]: !currentOpen,
}));
};
return (
<ChainOfThought>
{steps.map((step, index) => {
const isOpen = getStepOpenState(step, index);
return (
<ThinkingStepDisplay
key={step.id}
step={step}
isOpen={isOpen}
onToggle={() => handleToggle(step.id, isOpen)}
/>
);
})}
</ChainOfThought>
);
// Track which steps the user has manually toggled
const [manualOverrides, setManualOverrides] = useState<Record<string, boolean>>({});
// Track previous step statuses to detect changes
const prevStatusesRef = useRef<Record<string, string>>({});
// Check if any step is currently in progress
const hasInProgressStep = steps.some((step) => step.status === "in_progress");
// Find the last completed step index
const lastCompletedIndex = steps
.map((s, i) => (s.status === "completed" ? i : -1))
.filter((i) => i !== -1)
.pop();
// Clear manual overrides when a step's status changes
useEffect(() => {
const currentStatuses: Record<string, string> = {};
steps.forEach((step) => {
currentStatuses[step.id] = step.status;
// If status changed, clear any manual override for this step
if (prevStatusesRef.current[step.id] && prevStatusesRef.current[step.id] !== step.status) {
setManualOverrides((prev) => {
const next = { ...prev };
delete next[step.id];
return next;
});
}
});
prevStatusesRef.current = currentStatuses;
}, [steps]);
const getStepOpenState = (step: ThinkingStep, index: number): boolean => {
// If user has manually toggled, respect that
if (manualOverrides[step.id] !== undefined) {
return manualOverrides[step.id];
}
// Auto behavior: open if in progress
if (step.status === "in_progress") {
return true;
}
// Auto behavior: keep last completed step open if no in-progress step
if (!hasInProgressStep && index === lastCompletedIndex) {
return true;
}
// Default: collapsed
return false;
};
const handleToggle = (stepId: string, currentOpen: boolean) => {
setManualOverrides((prev) => ({
...prev,
[stepId]: !currentOpen,
}));
};
return (
<ChainOfThought>
{steps.map((step, index) => {
const isOpen = getStepOpenState(step, index);
return (
<ThinkingStepDisplay
key={step.id}
step={step}
isOpen={isOpen}
onToggle={() => handleToggle(step.id, isOpen)}
/>
);
})}
</ChainOfThought>
);
}
/**
@ -249,69 +247,68 @@ function SmartChainOfThought({ steps }: { steps: ThinkingStep[] }) {
* in a collapsible, hierarchical format.
*/
export const DeepAgentThinkingToolUI = makeAssistantToolUI<
DeepAgentThinkingArgs,
DeepAgentThinkingResult
DeepAgentThinkingArgs,
DeepAgentThinkingResult
>({
toolName: "deepagent_thinking",
render: function DeepAgentThinkingUI({ result, status }) {
// Loading state - tool is still running
if (status.type === "running" || status.type === "requires-action") {
return <ThinkingLoadingState status={result?.status} />;
}
toolName: "deepagent_thinking",
render: function DeepAgentThinkingUI({ result, status }) {
// Loading state - tool is still running
if (status.type === "running" || status.type === "requires-action") {
return <ThinkingLoadingState status={result?.status} />;
}
// Incomplete/cancelled state
if (status.type === "incomplete") {
if (status.reason === "cancelled") {
return null; // Don't show anything if cancelled
}
if (status.reason === "error") {
return null; // Don't show error for thinking - it's not critical
}
}
// Incomplete/cancelled state
if (status.type === "incomplete") {
if (status.reason === "cancelled") {
return null; // Don't show anything if cancelled
}
if (status.reason === "error") {
return null; // Don't show error for thinking - it's not critical
}
}
// No result or no steps - don't render anything
if (!result?.steps || result.steps.length === 0) {
return null;
}
// No result or no steps - don't render anything
if (!result?.steps || result.steps.length === 0) {
return null;
}
// Render the chain of thought
return (
<div className="my-3 w-full">
<SmartChainOfThought steps={result.steps} />
</div>
);
},
// Render the chain of thought
return (
<div className="my-3 w-full">
<SmartChainOfThought steps={result.steps} />
</div>
);
},
});
/**
* Inline Thinking Display Component
*
*
* A simpler version that can be used inline with the message content
* for displaying reasoning without the full tool UI infrastructure.
*/
export function InlineThinkingDisplay({
steps,
isStreaming = false,
className,
steps,
isStreaming = false,
className,
}: {
steps: ThinkingStep[];
isStreaming?: boolean;
className?: string;
steps: ThinkingStep[];
isStreaming?: boolean;
className?: string;
}) {
if (steps.length === 0 && !isStreaming) {
return null;
}
if (steps.length === 0 && !isStreaming) {
return null;
}
return (
<div className={cn("my-3 w-full", className)}>
{isStreaming && steps.length === 0 ? (
<ThinkingLoadingState />
) : (
<SmartChainOfThought steps={steps} />
)}
</div>
);
return (
<div className={cn("my-3 w-full", className)}>
{isStreaming && steps.length === 0 ? (
<ThinkingLoadingState />
) : (
<SmartChainOfThought steps={steps} />
)}
</div>
);
}
export type { ThinkingStep, DeepAgentThinkingArgs, DeepAgentThinkingResult };

View file

@ -73,12 +73,7 @@ function ImageCancelledState({ src }: { src: string }) {
function ParsedImage({ result }: { result: unknown }) {
const image = parseSerializableImage(result);
return (
<Image
{...image}
maxWidth="420px"
/>
);
return <Image {...image} maxWidth="420px" />;
}
/**
@ -93,10 +88,7 @@ function ParsedImage({ result }: { result: unknown }) {
* - Hover overlay effects
* - Click to open full size
*/
export const DisplayImageToolUI = makeAssistantToolUI<
DisplayImageArgs,
DisplayImageResult
>({
export const DisplayImageToolUI = makeAssistantToolUI<DisplayImageArgs, DisplayImageResult>({
toolName: "display_image",
render: function DisplayImageUI({ args, result, status }) {
const src = args.src || "Unknown";
@ -151,4 +143,3 @@ export const DisplayImageToolUI = makeAssistantToolUI<
});
export type { DisplayImageArgs, DisplayImageResult };

View file

@ -202,9 +202,7 @@ function PodcastPlayer({
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/podcasts/${podcastId}/audio`,
{ method: "GET", signal: controller.signal }
),
baseApiService.get<unknown>(
`/api/v1/podcasts/${podcastId}`
),
baseApiService.get<unknown>(`/api/v1/podcasts/${podcastId}`),
]);
if (!audioResponse.ok) {

View file

@ -65,14 +65,14 @@ export interface ImageProps {
*/
export function parseSerializableImage(result: unknown): SerializableImage {
const parsed = SerializableImageSchema.safeParse(result);
if (!parsed.success) {
console.warn("Invalid image data:", parsed.error.issues);
// Try to extract basic info for error display
const obj = (result && typeof result === "object" ? result : {}) as Record<string, unknown>;
throw new Error(`Invalid image: ${parsed.error.issues.map(i => i.message).join(", ")}`);
throw new Error(`Invalid image: ${parsed.error.issues.map((i) => i.message).join(", ")}`);
}
return parsed.data;
}
@ -165,7 +165,7 @@ export function ImageLoading({ title = "Loading image..." }: { title?: string })
/**
* Image Component
*
*
* Display images with metadata and attribution.
* Features hover overlay with title and source attribution.
*/
@ -197,11 +197,7 @@ export function Image({
if (imageError) {
return (
<Card
id={id}
className={cn("w-full overflow-hidden", className)}
style={{ maxWidth }}
>
<Card id={id} className={cn("w-full overflow-hidden", className)} style={{ maxWidth }}>
<div className={cn("bg-muted flex items-center justify-center", aspectRatioClass)}>
<div className="flex flex-col items-center gap-2 text-muted-foreground">
<ImageIcon className="size-8" />
@ -266,9 +262,7 @@ export function Image({
{/* Description */}
{description && (
<p className="text-white/80 text-sm line-clamp-2 mb-2">
{description}
</p>
<p className="text-white/80 text-sm line-clamp-2 mb-2">{description}</p>
)}
{/* Source attribution */}
@ -295,8 +289,8 @@ export function Image({
{/* Always visible domain badge (bottom right, shown when NOT hovered) */}
{displayDomain && !isHovered && (
<div className="absolute bottom-2 right-2">
<Badge
variant="secondary"
<Badge
variant="secondary"
className="bg-black/60 text-white border-0 text-xs backdrop-blur-sm"
>
{displayDomain}

View file

@ -6,57 +6,57 @@
* rich UI when specific tools are called by the agent.
*/
export { Audio } from "./audio";
export { GeneratePodcastToolUI } from "./generate-podcast";
export {
DeepAgentThinkingToolUI,
InlineThinkingDisplay,
type ThinkingStep,
type DeepAgentThinkingArgs,
type DeepAgentThinkingResult,
Article,
ArticleErrorBoundary,
ArticleLoading,
type ArticleProps,
ArticleSkeleton,
parseSerializableArticle,
type SerializableArticle,
} from "./article";
export { Audio } from "./audio";
export {
type DeepAgentThinkingArgs,
type DeepAgentThinkingResult,
DeepAgentThinkingToolUI,
InlineThinkingDisplay,
type ThinkingStep,
} from "./deepagent-thinking";
export {
LinkPreviewToolUI,
MultiLinkPreviewToolUI,
type LinkPreviewArgs,
type LinkPreviewResult,
type MultiLinkPreviewArgs,
type MultiLinkPreviewResult,
} from "./link-preview";
type DisplayImageArgs,
type DisplayImageResult,
DisplayImageToolUI,
} from "./display-image";
export { GeneratePodcastToolUI } from "./generate-podcast";
export {
MediaCard,
MediaCardErrorBoundary,
MediaCardLoading,
MediaCardSkeleton,
parseSerializableMediaCard,
type MediaCardProps,
type SerializableMediaCard,
} from "./media-card";
export {
Image,
ImageErrorBoundary,
ImageLoading,
ImageSkeleton,
parseSerializableImage,
type ImageProps,
type SerializableImage,
Image,
ImageErrorBoundary,
ImageLoading,
type ImageProps,
ImageSkeleton,
parseSerializableImage,
type SerializableImage,
} from "./image";
export {
DisplayImageToolUI,
type DisplayImageArgs,
type DisplayImageResult,
} from "./display-image";
type LinkPreviewArgs,
type LinkPreviewResult,
LinkPreviewToolUI,
type MultiLinkPreviewArgs,
type MultiLinkPreviewResult,
MultiLinkPreviewToolUI,
} from "./link-preview";
export {
Article,
ArticleErrorBoundary,
ArticleLoading,
ArticleSkeleton,
parseSerializableArticle,
type ArticleProps,
type SerializableArticle,
} from "./article";
MediaCard,
MediaCardErrorBoundary,
MediaCardLoading,
type MediaCardProps,
MediaCardSkeleton,
parseSerializableMediaCard,
type SerializableMediaCard,
} from "./media-card";
export {
ScrapeWebpageToolUI,
type ScrapeWebpageArgs,
type ScrapeWebpageResult,
type ScrapeWebpageArgs,
type ScrapeWebpageResult,
ScrapeWebpageToolUI,
} from "./scrape-webpage";

View file

@ -74,9 +74,7 @@ function ParsedMediaCard({ result }: { result: unknown }) {
<MediaCard
{...card}
maxWidth="420px"
responseActions={[
{ id: "open", label: "Open", variant: "default" },
]}
responseActions={[{ id: "open", label: "Open", variant: "default" }]}
onResponseAction={(id) => {
if (id === "open" && card.href) {
window.open(card.href, "_blank", "noopener,noreferrer");
@ -98,10 +96,7 @@ function ParsedMediaCard({ result }: { result: unknown }) {
* - Domain name
* - Clickable link to open in new tab
*/
export const LinkPreviewToolUI = makeAssistantToolUI<
LinkPreviewArgs,
LinkPreviewResult
>({
export const LinkPreviewToolUI = makeAssistantToolUI<LinkPreviewArgs, LinkPreviewResult>({
toolName: "link_preview",
render: function LinkPreviewUI({ args, result, status }) {
const url = args.url || "Unknown URL";
@ -223,4 +218,3 @@ export const MultiLinkPreviewToolUI = makeAssistantToolUI<
});
export type { LinkPreviewArgs, LinkPreviewResult, MultiLinkPreviewArgs, MultiLinkPreviewResult };

View file

@ -70,12 +70,12 @@ export interface MediaCardProps {
*/
export function parseSerializableMediaCard(result: unknown): SerializableMediaCard {
const parsed = SerializableMediaCardSchema.safeParse(result);
if (!parsed.success) {
console.warn("Invalid media card data:", parsed.error.issues);
throw new Error(`Invalid media card: ${parsed.error.issues.map(i => i.message).join(", ")}`);
throw new Error(`Invalid media card: ${parsed.error.issues.map((i) => i.message).join(", ")}`);
}
return parsed.data;
}
@ -164,10 +164,7 @@ export class MediaCardErrorBoundary extends Component<
*/
export function MediaCardSkeleton({ maxWidth = "420px" }: { maxWidth?: string }) {
return (
<Card
className="w-full overflow-hidden animate-pulse"
style={{ maxWidth }}
>
<Card className="w-full overflow-hidden animate-pulse" style={{ maxWidth }}>
<div className="aspect-[2/1] bg-muted" />
<CardContent className="p-4">
<div className="h-4 w-3/4 rounded bg-muted" />
@ -180,7 +177,7 @@ export function MediaCardSkeleton({ maxWidth = "420px" }: { maxWidth?: string })
/**
* MediaCard Component
*
*
* A rich media card for displaying link previews, images, and other media
* in AI chat applications. Supports thumbnails, descriptions, and actions.
*/
@ -353,4 +350,3 @@ export function MediaCardLoading({ title = "Loading preview..." }: { title?: str
</Card>
);
}

View file

@ -78,9 +78,7 @@ function ParsedArticle({ result }: { result: unknown }) {
<Article
{...article}
maxWidth="480px"
responseActions={[
{ id: "open", label: "Open Source", variant: "default" },
]}
responseActions={[{ id: "open", label: "Open Source", variant: "default" }]}
onResponseAction={(id) => {
if (id === "open" && article.href) {
window.open(article.href, "_blank", "noopener,noreferrer");
@ -102,10 +100,7 @@ function ParsedArticle({ result }: { result: unknown }) {
* - Word count
* - Link to original source
*/
export const ScrapeWebpageToolUI = makeAssistantToolUI<
ScrapeWebpageArgs,
ScrapeWebpageResult
>({
export const ScrapeWebpageToolUI = makeAssistantToolUI<ScrapeWebpageArgs, ScrapeWebpageResult>({
toolName: "scrape_webpage",
render: function ScrapeWebpageUI({ args, result, status }) {
const url = args.url || "Unknown URL";
@ -160,4 +155,3 @@ export const ScrapeWebpageToolUI = makeAssistantToolUI<
});
export type { ScrapeWebpageArgs, ScrapeWebpageResult };

View file

@ -1,33 +1,21 @@
"use client"
"use client";
import * as CollapsiblePrimitive from "@radix-ui/react-collapsible"
import * as CollapsiblePrimitive from "@radix-ui/react-collapsible";
function Collapsible({
...props
}: React.ComponentProps<typeof CollapsiblePrimitive.Root>) {
return <CollapsiblePrimitive.Root data-slot="collapsible" {...props} />
function Collapsible({ ...props }: React.ComponentProps<typeof CollapsiblePrimitive.Root>) {
return <CollapsiblePrimitive.Root data-slot="collapsible" {...props} />;
}
function CollapsibleTrigger({
...props
...props
}: React.ComponentProps<typeof CollapsiblePrimitive.CollapsibleTrigger>) {
return (
<CollapsiblePrimitive.CollapsibleTrigger
data-slot="collapsible-trigger"
{...props}
/>
)
return <CollapsiblePrimitive.CollapsibleTrigger data-slot="collapsible-trigger" {...props} />;
}
function CollapsibleContent({
...props
...props
}: React.ComponentProps<typeof CollapsiblePrimitive.CollapsibleContent>) {
return (
<CollapsiblePrimitive.CollapsibleContent
data-slot="collapsible-content"
{...props}
/>
)
return <CollapsiblePrimitive.CollapsibleContent data-slot="collapsible-content" {...props} />;
}
export { Collapsible, CollapsibleTrigger, CollapsibleContent }
export { Collapsible, CollapsibleTrigger, CollapsibleContent };

View file

@ -1,69 +0,0 @@
export interface Language {
value: string;
label: string;
}
export const LANGUAGES: Language[] = [
{ value: "English", label: "English" },
{ value: "Spanish", label: "Spanish" },
{ value: "French", label: "French" },
{ value: "German", label: "German" },
{ value: "Italian", label: "Italian" },
{ value: "Portuguese", label: "Portuguese" },
{ value: "Russian", label: "Russian" },
{ value: "Chinese", label: "Chinese (Simplified)" },
{ value: "Chinese-traditional", label: "Chinese (Traditional)" },
{ value: "Japanese", label: "Japanese" },
{ value: "Korean", label: "Korean" },
{ value: "Arabic", label: "Arabic" },
{ value: "Hindi", label: "Hindi" },
{ value: "Dutch", label: "Dutch" },
{ value: "Swedish", label: "Swedish" },
{ value: "Norwegian", label: "Norwegian" },
{ value: "Danish", label: "Danish" },
{ value: "Finnish", label: "Finnish" },
{ value: "Polish", label: "Polish" },
{ value: "Czech", label: "Czech" },
{ value: "Hungarian", label: "Hungarian" },
{ value: "Romanian", label: "Romanian" },
{ value: "Bulgarian", label: "Bulgarian" },
{ value: "Croatian", label: "Croatian" },
{ value: "Serbian", label: "Serbian" },
{ value: "Slovenian", label: "Slovenian" },
{ value: "Slovak", label: "Slovak" },
{ value: "Lithuanian", label: "Lithuanian" },
{ value: "Latvian", label: "Latvian" },
{ value: "Estonian", label: "Estonian" },
{ value: "Greek", label: "Greek" },
{ value: "Turkish", label: "Turkish" },
{ value: "Hebrew", label: "Hebrew" },
{ value: "Thai", label: "Thai" },
{ value: "Vietnamese", label: "Vietnamese" },
{ value: "Indonesian", label: "Indonesian" },
{ value: "Malay", label: "Malay" },
{ value: "Tagalog", label: "Filipino/Tagalog" },
{ value: "Bengali", label: "Bengali" },
{ value: "Tamil", label: "Tamil" },
{ value: "Telugu", label: "Telugu" },
{ value: "Marathi", label: "Marathi" },
{ value: "Gujarati", label: "Gujarati" },
{ value: "Kannada", label: "Kannada" },
{ value: "Malayalam", label: "Malayalam" },
{ value: "Punjabi", label: "Punjabi" },
{ value: "Urdu", label: "Urdu" },
{ value: "Persian", label: "Persian/Farsi" },
{ value: "Swahili", label: "Swahili" },
{ value: "Afrikaans", label: "Afrikaans" },
{ value: "Amharic", label: "Amharic" },
{ value: "Ukrainian", label: "Ukrainian" },
{ value: "Belarusian", label: "Belarusian" },
{ value: "Georgian", label: "Georgian" },
{ value: "Armenian", label: "Armenian" },
{ value: "Azerbaijani", label: "Azerbaijani" },
{ value: "Kazakh", label: "Kazakh" },
{ value: "Uzbek", label: "Uzbek" },
{ value: "Kyrgyz", label: "Kyrgyz" },
{ value: "Tajik", label: "Tajik" },
{ value: "Turkmen", label: "Turkmen" },
{ value: "Mongolian", label: "Mongolian" },
];

View file

@ -1,193 +0,0 @@
import { z } from "zod";
import { paginationQueryParams } from ".";
export const liteLLMProviderEnum = z.enum([
"OPENAI",
"ANTHROPIC",
"GOOGLE",
"AZURE_OPENAI",
"BEDROCK",
"VERTEX_AI",
"GROQ",
"COHERE",
"MISTRAL",
"DEEPSEEK",
"XAI",
"OPENROUTER",
"TOGETHER_AI",
"FIREWORKS_AI",
"REPLICATE",
"PERPLEXITY",
"OLLAMA",
"ALIBABA_QWEN",
"MOONSHOT",
"ZHIPU",
"ANYSCALE",
"DEEPINFRA",
"CEREBRAS",
"SAMBANOVA",
"AI21",
"CLOUDFLARE",
"DATABRICKS",
"COMETAPI",
"HUGGINGFACE",
"CUSTOM",
]);
export const llmConfig = z.object({
id: z.number(),
name: z.string().max(100),
provider: liteLLMProviderEnum,
custom_provider: z.string().nullable().optional(),
model_name: z.string().max(100),
api_key: z.string(),
api_base: z.string().nullable().optional(),
language: z.string().max(50).nullable(),
litellm_params: z.record(z.string(), z.any()).nullable().optional(),
search_space_id: z.number(),
created_at: z.string().nullable(),
updated_at: z.string().nullable().optional(),
});
export const globalLLMConfig = llmConfig
.pick({
id: true,
name: true,
custom_provider: true,
model_name: true,
api_base: true,
language: true,
litellm_params: true,
})
.extend({
provider: z.string(),
is_global: z.literal(true),
});
/**
* Get global LLM configs
*/
export const getGlobalLLMConfigsResponse = z.array(globalLLMConfig);
/**
* Create LLM config
*/
export const createLLMConfigRequest = llmConfig.pick({
name: true,
provider: true,
custom_provider: true,
model_name: true,
api_key: true,
api_base: true,
language: true,
litellm_params: true,
search_space_id: true,
});
export const createLLMConfigResponse = llmConfig;
/**
* Get LLM configs
*/
export const getLLMConfigsRequest = z.object({
queryParams: paginationQueryParams
.pick({ skip: true, limit: true })
.extend({
search_space_id: z.number().or(z.string()),
})
.nullish(),
});
export const getLLMConfigsResponse = z.array(llmConfig);
/**
* Get LLM config by ID
*/
export const getLLMConfigRequest = llmConfig.pick({ id: true });
export const getLLMConfigResponse = llmConfig;
/**
* Update LLM config
*/
export const updateLLMConfigRequest = z.object({
id: z.number(),
data: llmConfig
.pick({
name: true,
provider: true,
custom_provider: true,
model_name: true,
api_key: true,
api_base: true,
language: true,
litellm_params: true,
})
.partial(),
});
export const updateLLMConfigResponse = llmConfig;
/**
* Delete LLM config
*/
export const deleteLLMConfigRequest = llmConfig.pick({ id: true });
export const deleteLLMConfigResponse = z.object({
message: z.literal("LLM configuration deleted successfully"),
});
/**
* LLM Preferences schemas
*/
export const llmPreferences = z.object({
long_context_llm_id: z.number().nullable().optional(),
fast_llm_id: z.number().nullable().optional(),
strategic_llm_id: z.number().nullable().optional(),
long_context_llm: llmConfig.nullable().optional(),
fast_llm: llmConfig.nullable().optional(),
strategic_llm: llmConfig.nullable().optional(),
});
/**
* Get LLM preferences
*/
export const getLLMPreferencesRequest = z.object({
search_space_id: z.number(),
});
export const getLLMPreferencesResponse = llmPreferences;
/**
* Update LLM preferences
*/
export const updateLLMPreferencesRequest = z.object({
search_space_id: z.number(),
data: llmPreferences.pick({
long_context_llm_id: true,
fast_llm_id: true,
strategic_llm_id: true,
}),
});
export const updateLLMPreferencesResponse = llmPreferences;
export type LLMConfig = z.infer<typeof llmConfig>;
export type LiteLLMProvider = z.infer<typeof liteLLMProviderEnum>;
export type GlobalLLMConfig = z.infer<typeof globalLLMConfig>;
export type GetGlobalLLMConfigsResponse = z.infer<typeof getGlobalLLMConfigsResponse>;
export type CreateLLMConfigRequest = z.infer<typeof createLLMConfigRequest>;
export type CreateLLMConfigResponse = z.infer<typeof createLLMConfigResponse>;
export type GetLLMConfigsRequest = z.infer<typeof getLLMConfigsRequest>;
export type GetLLMConfigsResponse = z.infer<typeof getLLMConfigsResponse>;
export type GetLLMConfigRequest = z.infer<typeof getLLMConfigRequest>;
export type GetLLMConfigResponse = z.infer<typeof getLLMConfigResponse>;
export type UpdateLLMConfigRequest = z.infer<typeof updateLLMConfigRequest>;
export type UpdateLLMConfigResponse = z.infer<typeof updateLLMConfigResponse>;
export type DeleteLLMConfigRequest = z.infer<typeof deleteLLMConfigRequest>;
export type DeleteLLMConfigResponse = z.infer<typeof deleteLLMConfigResponse>;
export type LLMPreferences = z.infer<typeof llmPreferences>;
export type GetLLMPreferencesRequest = z.infer<typeof getLLMPreferencesRequest>;
export type GetLLMPreferencesResponse = z.infer<typeof getLLMPreferencesResponse>;
export type UpdateLLMPreferencesRequest = z.infer<typeof updateLLMPreferencesRequest>;
export type UpdateLLMPreferencesResponse = z.infer<typeof updateLLMPreferencesResponse>;

View file

@ -0,0 +1,224 @@
import { z } from "zod";
/**
* LiteLLM Provider enum - all supported LLM providers
*/
export const liteLLMProviderEnum = z.enum([
"OPENAI",
"ANTHROPIC",
"GOOGLE",
"AZURE_OPENAI",
"BEDROCK",
"VERTEX_AI",
"GROQ",
"COHERE",
"MISTRAL",
"DEEPSEEK",
"XAI",
"OPENROUTER",
"TOGETHER_AI",
"FIREWORKS_AI",
"REPLICATE",
"PERPLEXITY",
"OLLAMA",
"ALIBABA_QWEN",
"MOONSHOT",
"ZHIPU",
"ANYSCALE",
"DEEPINFRA",
"CEREBRAS",
"SAMBANOVA",
"AI21",
"CLOUDFLARE",
"DATABRICKS",
"COMETAPI",
"HUGGINGFACE",
"CUSTOM",
]);
export type LiteLLMProvider = z.infer<typeof liteLLMProviderEnum>;
/**
* NewLLMConfig - combines LLM model settings with prompt configuration
*/
export const newLLMConfig = z.object({
id: z.number(),
name: z.string().max(100),
description: z.string().max(500).nullable().optional(),
// LLM Model Configuration
provider: liteLLMProviderEnum,
custom_provider: z.string().max(100).nullable().optional(),
model_name: z.string().max(100),
api_key: z.string(),
api_base: z.string().max(500).nullable().optional(),
litellm_params: z.record(z.string(), z.any()).nullable().optional(),
// Prompt Configuration
system_instructions: z.string().default(""),
use_default_system_instructions: z.boolean().default(true),
citations_enabled: z.boolean().default(true),
// Metadata
created_at: z.string(),
search_space_id: z.number(),
});
/**
* Public version without api_key (for list views)
*/
export const newLLMConfigPublic = newLLMConfig.omit({ api_key: true });
/**
* Create NewLLMConfig
*/
export const createNewLLMConfigRequest = newLLMConfig.omit({
id: true,
created_at: true,
});
export const createNewLLMConfigResponse = newLLMConfig;
/**
* Get NewLLMConfigs list
*/
export const getNewLLMConfigsRequest = z.object({
search_space_id: z.number(),
skip: z.number().optional(),
limit: z.number().optional(),
});
export const getNewLLMConfigsResponse = z.array(newLLMConfig);
/**
* Get single NewLLMConfig
*/
export const getNewLLMConfigRequest = z.object({
id: z.number(),
});
export const getNewLLMConfigResponse = newLLMConfig;
/**
* Update NewLLMConfig
*/
export const updateNewLLMConfigRequest = z.object({
id: z.number(),
data: newLLMConfig
.omit({
id: true,
created_at: true,
search_space_id: true,
})
.partial(),
});
export const updateNewLLMConfigResponse = newLLMConfig;
/**
* Delete NewLLMConfig
*/
export const deleteNewLLMConfigRequest = z.object({
id: z.number(),
});
export const deleteNewLLMConfigResponse = z.object({
message: z.string(),
id: z.number(),
});
/**
* Get default system instructions
*/
export const getDefaultSystemInstructionsResponse = z.object({
default_system_instructions: z.string(),
});
/**
* Global NewLLMConfig - from YAML, has negative IDs
*/
export const globalNewLLMConfig = z.object({
id: z.number(), // Negative IDs for global configs
name: z.string(),
description: z.string().nullable().optional(),
// LLM Model Configuration (no api_key)
provider: z.string(), // String because YAML doesn't enforce enum
custom_provider: z.string().nullable().optional(),
model_name: z.string(),
api_base: z.string().nullable().optional(),
litellm_params: z.record(z.string(), z.any()).nullable().optional(),
// Prompt Configuration
system_instructions: z.string().default(""),
use_default_system_instructions: z.boolean().default(true),
citations_enabled: z.boolean().default(true),
is_global: z.literal(true),
});
export const getGlobalNewLLMConfigsResponse = z.array(globalNewLLMConfig);
// =============================================================================
// LLM Preferences (Role Assignments)
// =============================================================================
/**
* LLM Preferences schemas - for role assignments
* The agent_llm and document_summary_llm fields contain the full NewLLMConfig objects
*/
export const llmPreferences = z.object({
agent_llm_id: z.union([z.number(), z.null()]).optional(),
document_summary_llm_id: z.union([z.number(), z.null()]).optional(),
agent_llm: z.union([z.record(z.string(), z.unknown()), z.null()]).optional(),
document_summary_llm: z.union([z.record(z.string(), z.unknown()), z.null()]).optional(),
});
/**
* Get LLM preferences
*/
export const getLLMPreferencesRequest = z.object({
search_space_id: z.number(),
});
export const getLLMPreferencesResponse = llmPreferences;
/**
* Update LLM preferences
*/
export const updateLLMPreferencesRequest = z.object({
search_space_id: z.number(),
data: llmPreferences.pick({
agent_llm_id: true,
document_summary_llm_id: true,
}),
});
export const updateLLMPreferencesResponse = llmPreferences;
// =============================================================================
// Type Exports
// =============================================================================
export type NewLLMConfig = z.infer<typeof newLLMConfig>;
export type NewLLMConfigPublic = z.infer<typeof newLLMConfigPublic>;
export type CreateNewLLMConfigRequest = z.infer<typeof createNewLLMConfigRequest>;
export type CreateNewLLMConfigResponse = z.infer<typeof createNewLLMConfigResponse>;
export type GetNewLLMConfigsRequest = z.infer<typeof getNewLLMConfigsRequest>;
export type GetNewLLMConfigsResponse = z.infer<typeof getNewLLMConfigsResponse>;
export type GetNewLLMConfigRequest = z.infer<typeof getNewLLMConfigRequest>;
export type GetNewLLMConfigResponse = z.infer<typeof getNewLLMConfigResponse>;
export type UpdateNewLLMConfigRequest = z.infer<typeof updateNewLLMConfigRequest>;
export type UpdateNewLLMConfigResponse = z.infer<typeof updateNewLLMConfigResponse>;
export type DeleteNewLLMConfigRequest = z.infer<typeof deleteNewLLMConfigRequest>;
export type DeleteNewLLMConfigResponse = z.infer<typeof deleteNewLLMConfigResponse>;
export type GetDefaultSystemInstructionsResponse = z.infer<
typeof getDefaultSystemInstructionsResponse
>;
export type GlobalNewLLMConfig = z.infer<typeof globalNewLLMConfig>;
export type GetGlobalNewLLMConfigsResponse = z.infer<typeof getGlobalNewLLMConfigsResponse>;
export type LLMPreferences = z.infer<typeof llmPreferences>;
export type GetLLMPreferencesRequest = z.infer<typeof getLLMPreferencesRequest>;
export type GetLLMPreferencesResponse = z.infer<typeof getLLMPreferencesResponse>;
export type UpdateLLMPreferencesRequest = z.infer<typeof updateLLMPreferencesRequest>;
export type UpdateLLMPreferencesResponse = z.infer<typeof updateLLMPreferencesResponse>;

View file

@ -36,19 +36,6 @@ export const createSearchSpaceRequest = searchSpace.pick({ name: true, descripti
export const createSearchSpaceResponse = searchSpace.omit({ member_count: true, is_owner: true });
/**
* Get community prompts
*/
export const getCommunityPromptsResponse = z.array(
z.object({
key: z.string(),
value: z.string(),
author: z.string(),
link: z.string(),
category: z.string(),
})
);
/**
* Get search space
*/
@ -83,7 +70,6 @@ export type GetSearchSpacesRequest = z.infer<typeof getSearchSpacesRequest>;
export type GetSearchSpacesResponse = z.infer<typeof getSearchSpacesResponse>;
export type CreateSearchSpaceRequest = z.infer<typeof createSearchSpaceRequest>;
export type CreateSearchSpaceResponse = z.infer<typeof createSearchSpaceResponse>;
export type GetCommunityPromptsResponse = z.infer<typeof getCommunityPromptsResponse>;
export type GetSearchSpaceRequest = z.infer<typeof getSearchSpaceRequest>;
export type GetSearchSpaceResponse = z.infer<typeof getSearchSpaceResponse>;
export type UpdateSearchSpaceRequest = z.infer<typeof updateSearchSpaceRequest>;

View file

@ -1,3 +0,0 @@
export * from "./use-debounced-value";
export * from "./use-logs";
export * from "./use-search-source-connectors";

View file

@ -1,179 +0,0 @@
import {
type CreateLLMConfigRequest,
createLLMConfigRequest,
createLLMConfigResponse,
type DeleteLLMConfigRequest,
deleteLLMConfigRequest,
deleteLLMConfigResponse,
type GetLLMConfigRequest,
type GetLLMConfigsRequest,
type GetLLMPreferencesRequest,
getGlobalLLMConfigsResponse,
getLLMConfigRequest,
getLLMConfigResponse,
getLLMConfigsRequest,
getLLMConfigsResponse,
getLLMPreferencesRequest,
getLLMPreferencesResponse,
type UpdateLLMConfigRequest,
type UpdateLLMPreferencesRequest,
updateLLMConfigRequest,
updateLLMConfigResponse,
updateLLMPreferencesRequest,
updateLLMPreferencesResponse,
} from "@/contracts/types/llm-config.types";
import { ValidationError } from "../error";
import { baseApiService } from "./base-api.service";
class LLMConfigApiService {
/**
* Get all global LLM configurations available to all users
*/
getGlobalLLMConfigs = async () => {
return baseApiService.get(`/api/v1/global-llm-configs`, getGlobalLLMConfigsResponse);
};
/**
* Create a new LLM configuration for a search space
*/
createLLMConfig = async (request: CreateLLMConfigRequest) => {
const parsedRequest = createLLMConfigRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.post(`/api/v1/llm-configs`, createLLMConfigResponse, {
body: parsedRequest.data,
});
};
/**
* Get a list of LLM configurations for a search space
*/
getLLMConfigs = async (request: GetLLMConfigsRequest) => {
const parsedRequest = getLLMConfigsRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
// Transform query params to be string values
const transformedQueryParams = parsedRequest.data.queryParams
? Object.fromEntries(
Object.entries(parsedRequest.data.queryParams).map(([k, v]) => {
return [k, String(v)];
})
)
: undefined;
const queryParams = transformedQueryParams
? new URLSearchParams(transformedQueryParams).toString()
: "";
return baseApiService.get(`/api/v1/llm-configs?${queryParams}`, getLLMConfigsResponse);
};
/**
* Get a single LLM configuration by ID
*/
getLLMConfig = async (request: GetLLMConfigRequest) => {
const parsedRequest = getLLMConfigRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.get(`/api/v1/llm-configs/${request.id}`, getLLMConfigResponse);
};
/**
* Update an existing LLM configuration
*/
updateLLMConfig = async (request: UpdateLLMConfigRequest) => {
const parsedRequest = updateLLMConfigRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
const { id, data } = parsedRequest.data;
return baseApiService.put(`/api/v1/llm-configs/${id}`, updateLLMConfigResponse, {
body: data,
});
};
/**
* Delete an LLM configuration
*/
deleteLLMConfig = async (request: DeleteLLMConfigRequest) => {
const parsedRequest = deleteLLMConfigRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.delete(`/api/v1/llm-configs/${request.id}`, deleteLLMConfigResponse);
};
/**
* Get LLM preferences for a search space
*/
getLLMPreferences = async (request: GetLLMPreferencesRequest) => {
const parsedRequest = getLLMPreferencesRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.get(
`/api/v1/search-spaces/${request.search_space_id}/llm-preferences`,
getLLMPreferencesResponse
);
};
/**
* Update LLM preferences for a search space
*/
updateLLMPreferences = async (request: UpdateLLMPreferencesRequest) => {
const parsedRequest = updateLLMPreferencesRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
const { search_space_id, data } = parsedRequest.data;
return baseApiService.put(
`/api/v1/search-spaces/${search_space_id}/llm-preferences`,
updateLLMPreferencesResponse,
{
body: data,
}
);
};
}
export const llmConfigApiService = new LLMConfigApiService();

View file

@ -0,0 +1,170 @@
import {
type CreateNewLLMConfigRequest,
createNewLLMConfigRequest,
createNewLLMConfigResponse,
type DeleteNewLLMConfigRequest,
deleteNewLLMConfigRequest,
deleteNewLLMConfigResponse,
type GetNewLLMConfigRequest,
type GetNewLLMConfigsRequest,
getDefaultSystemInstructionsResponse,
getGlobalNewLLMConfigsResponse,
getLLMPreferencesResponse,
getNewLLMConfigRequest,
getNewLLMConfigResponse,
getNewLLMConfigsRequest,
getNewLLMConfigsResponse,
type UpdateLLMPreferencesRequest,
type UpdateNewLLMConfigRequest,
updateLLMPreferencesRequest,
updateLLMPreferencesResponse,
updateNewLLMConfigRequest,
updateNewLLMConfigResponse,
} from "@/contracts/types/new-llm-config.types";
import { ValidationError } from "../error";
import { baseApiService } from "./base-api.service";
class NewLLMConfigApiService {
/**
* Get all global NewLLMConfigs available to all users
*/
getGlobalConfigs = async () => {
return baseApiService.get(`/api/v1/global-new-llm-configs`, getGlobalNewLLMConfigsResponse);
};
/**
* Get default system instructions template
*/
getDefaultSystemInstructions = async () => {
return baseApiService.get(
`/api/v1/new-llm-configs/default-system-instructions`,
getDefaultSystemInstructionsResponse
);
};
/**
* Create a new NewLLMConfig for a search space
*/
createConfig = async (request: CreateNewLLMConfigRequest) => {
const parsedRequest = createNewLLMConfigRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.post(`/api/v1/new-llm-configs`, createNewLLMConfigResponse, {
body: parsedRequest.data,
});
};
/**
* Get a list of NewLLMConfigs for a search space
*/
getConfigs = async (request: GetNewLLMConfigsRequest) => {
const parsedRequest = getNewLLMConfigsRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
const queryParams = new URLSearchParams({
search_space_id: String(parsedRequest.data.search_space_id),
...(parsedRequest.data.skip !== undefined && { skip: String(parsedRequest.data.skip) }),
...(parsedRequest.data.limit !== undefined && { limit: String(parsedRequest.data.limit) }),
}).toString();
return baseApiService.get(`/api/v1/new-llm-configs?${queryParams}`, getNewLLMConfigsResponse);
};
/**
* Get a single NewLLMConfig by ID
*/
getConfig = async (request: GetNewLLMConfigRequest) => {
const parsedRequest = getNewLLMConfigRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.get(
`/api/v1/new-llm-configs/${parsedRequest.data.id}`,
getNewLLMConfigResponse
);
};
/**
* Update an existing NewLLMConfig
*/
updateConfig = async (request: UpdateNewLLMConfigRequest) => {
const parsedRequest = updateNewLLMConfigRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
const { id, data } = parsedRequest.data;
return baseApiService.put(`/api/v1/new-llm-configs/${id}`, updateNewLLMConfigResponse, {
body: data,
});
};
/**
* Delete a NewLLMConfig
*/
deleteConfig = async (request: DeleteNewLLMConfigRequest) => {
const parsedRequest = deleteNewLLMConfigRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.delete(
`/api/v1/new-llm-configs/${parsedRequest.data.id}`,
deleteNewLLMConfigResponse
);
};
/**
* Get LLM preferences for a search space
*/
getLLMPreferences = async (searchSpaceId: number) => {
return baseApiService.get(
`/api/v1/search-spaces/${searchSpaceId}/llm-preferences`,
getLLMPreferencesResponse
);
};
/**
* Update LLM preferences for a search space
*/
updateLLMPreferences = async (request: UpdateLLMPreferencesRequest) => {
const parsedRequest = updateLLMPreferencesRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
const { search_space_id, data } = parsedRequest.data;
return baseApiService.put(
`/api/v1/search-spaces/${search_space_id}/llm-preferences`,
updateLLMPreferencesResponse,
{ body: data }
);
};
}
export const newLLMConfigApiService = new NewLLMConfigApiService();

View file

@ -7,7 +7,6 @@ import {
deleteSearchSpaceResponse,
type GetSearchSpaceRequest,
type GetSearchSpacesRequest,
getCommunityPromptsResponse,
getSearchSpaceRequest,
getSearchSpaceResponse,
getSearchSpacesRequest,
@ -67,16 +66,6 @@ class SearchSpacesApiService {
});
};
/**
* Get community-curated prompts for search space system instructions
*/
getCommunityPrompts = async () => {
return baseApiService.get(
`/api/v1/searchspaces/prompts/community`,
getCommunityPromptsResponse
);
};
/**
* Get a single search space by ID
*/

View file

@ -153,8 +153,6 @@ export function createAttachmentAdapter(): AttachmentAdapter {
throw new Error("No file provided");
}
console.log("[AttachmentAdapter] Processing file:", file.name);
// Generate a unique ID for this attachment
const id = crypto.randomUUID();
@ -175,13 +173,6 @@ export function createAttachmentAdapter(): AttachmentAdapter {
// Process the file through the backend ETL service
const result = await processAttachment(file);
console.log(
"[AttachmentAdapter] File processed:",
result.name,
"content length:",
result.contentLength
);
// Verify we have the required fields
if (!result.content) {
console.error("[AttachmentAdapter] WARNING: No content received from backend!");

View file

@ -1,33 +0,0 @@
// Helper to normalize list responses from the API
// Supports shapes: Array<T>, { items: T[]; total: number }, and tuple [T[], total]
export type ListResponse<T> = {
items: T[];
total: number;
};
export function normalizeListResponse<T>(payload: any): ListResponse<T> {
try {
// Case 1: already in desired shape
if (payload && Array.isArray(payload.items)) {
const total = typeof payload.total === "number" ? payload.total : payload.items.length;
return { items: payload.items as T[], total };
}
// Case 2: tuple [items, total]
if (Array.isArray(payload) && payload.length === 2 && Array.isArray(payload[0])) {
const items = (payload[0] ?? []) as T[];
const rawTotal = payload[1];
const total = typeof rawTotal === "number" ? rawTotal : items.length;
return { items, total };
}
// Case 3: plain array
if (Array.isArray(payload)) {
return { items: payload as T[], total: (payload as T[]).length };
}
} catch (e) {
// fallthrough to default
}
return { items: [], total: 0 };
}

View file

@ -1,5 +1,4 @@
import type { GetDocumentsRequest } from "@/contracts/types/document.types";
import type { GetLLMConfigsRequest } from "@/contracts/types/llm-config.types";
import type { GetSearchSpacesRequest } from "@/contracts/types/search-space.types";
export const cacheKeys = {
@ -19,13 +18,12 @@ export const cacheKeys = {
typeCounts: (searchSpaceId?: string) => ["documents", "type-counts", searchSpaceId] as const,
byChunk: (chunkId: string) => ["documents", "by-chunk", chunkId] as const,
},
llmConfigs: {
global: () => ["llm-configs", "global"] as const,
all: (searchSpaceId: string) => ["llm-configs", searchSpaceId] as const,
withQueryParams: (queries: GetLLMConfigsRequest["queryParams"]) =>
["llm-configs", ...(queries ? Object.values(queries) : [])] as const,
byId: (llmConfigId: string) => ["llm-config", llmConfigId] as const,
preferences: (searchSpaceId: string) => ["llm-preferences", searchSpaceId] as const,
newLLMConfigs: {
all: (searchSpaceId: number) => ["new-llm-configs", searchSpaceId] as const,
byId: (configId: number) => ["new-llm-configs", "detail", configId] as const,
preferences: (searchSpaceId: number) => ["llm-preferences", searchSpaceId] as const,
defaultInstructions: () => ["new-llm-configs", "default-instructions"] as const,
global: () => ["new-llm-configs", "global"] as const,
},
auth: {
user: ["auth", "user"] as const,
@ -35,7 +33,6 @@ export const cacheKeys = {
withQueryParams: (queries: GetSearchSpacesRequest["queryParams"]) =>
["search-spaces", ...(queries ? Object.values(queries) : [])] as const,
detail: (searchSpaceId: string) => ["search-spaces", searchSpaceId] as const,
communityPrompts: ["search-spaces", "community-prompts"] as const,
},
user: {
current: () => ["user", "me"] as const,

View file

@ -86,10 +86,8 @@
"react-dom": "^19.2.3",
"react-dropzone": "^14.3.8",
"react-hook-form": "^7.61.1",
"react-json-view": "^1.21.3",
"react-json-view-lite": "^2.4.1",
"react-markdown": "^10.1.0",
"react-rough-notation": "^1.0.8",
"react-syntax-highlighter": "^15.6.1",
"react-wrap-balancer": "^1.1.1",
"rehype-raw": "^7.0.0",

View file

@ -203,18 +203,12 @@ importers:
react-hook-form:
specifier: ^7.61.1
version: 7.69.0(react@19.2.3)
react-json-view:
specifier: ^1.21.3
version: 1.21.3(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
react-json-view-lite:
specifier: ^2.4.1
version: 2.5.0(react@19.2.3)
react-markdown:
specifier: ^10.1.0
version: 10.1.0(@types/react@19.2.7)(react@19.2.3)
react-rough-notation:
specifier: ^1.0.8
version: 1.0.8(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
react-syntax-highlighter:
specifier: ^15.6.1
version: 15.6.6(react@19.2.3)
@ -3092,9 +3086,6 @@ packages:
resolution: {integrity: sha512-BNoCY6SXXPQ7gF2opIP4GBE+Xw7U+pHMYKuzjgCN3GwiaIR09UUeKfheyIry77QtrCBlC0KK0q5/TER/tYh3PQ==}
engines: {node: '>= 0.4'}
asap@2.0.6:
resolution: {integrity: sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==}
assistant-cloud@0.1.12:
resolution: {integrity: sha512-A2tY6QIdP9+RkE8Mmpm4kAoO0NyKsKpJKYebbYFZ3bAnQKyB15Bw/PS9AovpdeziGU9At97TyiMrT36pDjCD7A==}
@ -3137,9 +3128,6 @@ packages:
balanced-match@1.0.2:
resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==}
base16@1.0.0:
resolution: {integrity: sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ==}
baseline-browser-mapping@2.9.11:
resolution: {integrity: sha512-Sg0xJUNDU1sJNGdfGWhVHX0kkZ+HWcvmVymJbj6NSgZZmW/8S9Y2HQ5euytnIgakgxN6papOAWiwDo1ctFDcoQ==}
hasBin: true
@ -3299,9 +3287,6 @@ packages:
engines: {node: '>=10.14', npm: '>=6', yarn: '>=1'}
hasBin: true
cross-fetch@3.2.0:
resolution: {integrity: sha512-Q+xVJLoGOeIMXZmbUK4HYk+69cQH6LudR0Vu/pRm2YlU/hDV9CiS0gKUMaWY5f2NeUH9C1nV3bsTlCo0FsTV1Q==}
cross-spawn@7.0.6:
resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==}
engines: {node: '>= 8'}
@ -3939,15 +3924,6 @@ packages:
fault@1.0.4:
resolution: {integrity: sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==}
fbemitter@3.0.0:
resolution: {integrity: sha512-KWKaceCwKQU0+HPoop6gn4eOHk50bBv/VxjJtGMfwmJt3D29JpN4H4eisCtIPA+a8GVBam+ldMMpMjJUvpDyHw==}
fbjs-css-vars@1.0.2:
resolution: {integrity: sha512-b2XGFAFdWZWg0phtAWLHCk836A1Xann+I+Dgd3Gk64MHKZO44FfoD1KxyvbSh0qZsIoXQGGlVztIY+oitJPpRQ==}
fbjs@3.0.5:
resolution: {integrity: sha512-ztsSx77JBtkuMrEypfhgc3cI0+0h+svqeie7xHbh1k/IKdcydnvadp/mUaGgjAOXQmQSxsqgaRhS3q9fy+1kxg==}
fdir@6.5.0:
resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==}
engines: {node: '>=12.0.0'}
@ -3980,11 +3956,6 @@ packages:
flatted@3.3.3:
resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==}
flux@4.0.4:
resolution: {integrity: sha512-NCj3XlayA2UsapRpM7va6wU1+9rE5FIL7qoMcmxWHRzbp0yujihMBm9BBHZ1MDIk5h5o2Bl6eGiCe8rYELAmYw==}
peerDependencies:
react: ^15.0.2 || ^16.0.0 || ^17.0.0
for-each@0.3.5:
resolution: {integrity: sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==}
engines: {node: '>= 0.4'}
@ -4684,12 +4655,6 @@ packages:
lodash-es@4.17.22:
resolution: {integrity: sha512-XEawp1t0gxSi9x01glktRZ5HDy0HXqrM0x5pXQM98EaI0NxO6jVM7omDOxsuEo5UIASAnm2bRp1Jt/e0a2XU8Q==}
lodash.curry@4.1.1:
resolution: {integrity: sha512-/u14pXGviLaweY5JI0IUzgzF2J6Ne8INyzAZjImcryjgkZ+ebruBxy2/JaOOkTqScddcYtakjhSaeemV8lR0tA==}
lodash.flow@3.5.0:
resolution: {integrity: sha512-ff3BX/tSioo+XojX4MOsOMhJw0nZoUEF011LX8g8d3gvjVbxd89cCio4BCXronjxcTUIJUoqKEUA+n4CqvvRPw==}
lodash.merge@4.6.2:
resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==}
@ -5050,15 +5015,6 @@ packages:
node-addon-api@7.1.1:
resolution: {integrity: sha512-5m3bsyrjFWE1xf7nz7YXdN4udnVtXK6/Yfgn5qnahL6bCkf2yKt4k3nuTKAtT4r3IG8JNR2ncsIMdZuAzJjHQQ==}
node-fetch@2.7.0:
resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==}
engines: {node: 4.x || >=6.0.0}
peerDependencies:
encoding: ^0.1.0
peerDependenciesMeta:
encoding:
optional: true
npm-to-yarn@3.0.1:
resolution: {integrity: sha512-tt6PvKu4WyzPwWUzy/hvPFqn+uwXO0K1ZHka8az3NnrhWJDmSqI8ncWq0fkL0k/lmmi5tAC11FXwXuh0rFbt1A==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
@ -5271,9 +5227,6 @@ packages:
resolution: {integrity: sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==}
engines: {node: '>=6'}
promise@7.3.1:
resolution: {integrity: sha512-nolQXZ/4L+bP/UGlkfaIujX9BKxGwmQ9OT4mOt5yvy8iK1h3wqTEJCijzGANTCCl9nWjY41juyAn2K3Q1hLLTg==}
prop-types@15.8.1:
resolution: {integrity: sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==}
@ -5384,15 +5337,9 @@ packages:
resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==}
engines: {node: '>=6'}
pure-color@1.3.0:
resolution: {integrity: sha512-QFADYnsVoBMw1srW7OVKEYjG+MbIa49s54w1MA1EDY6r2r/sTcKKYqRX1f4GYvnXP7eN/Pe9HFcX+hwzmrXRHA==}
queue-microtask@1.2.3:
resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==}
react-base16-styling@0.6.0:
resolution: {integrity: sha512-yvh/7CArceR/jNATXOKDlvTnPKPmGZz7zsenQ3jUwLzHkNUR0CvY3yGYJbWJ/nnxsL8Sgmt5cO3/SILVuPO6TQ==}
react-day-picker@9.13.0:
resolution: {integrity: sha512-euzj5Hlq+lOHqI53NiuNhCP8HWgsPf/bBAVijR50hNaY1XwjKjShAnIe8jm8RD2W9IJUvihDIZ+KrmqfFzNhFQ==}
engines: {node: '>=18'}
@ -5437,15 +5384,6 @@ packages:
peerDependencies:
react: ^18.0.0 || ^19.0.0
react-json-view@1.21.3:
resolution: {integrity: sha512-13p8IREj9/x/Ye4WI/JpjhoIwuzEgUAtgJZNBJckfzJt1qyh24BdTm6UQNGnyTq9dapQdrqvquZTo3dz1X6Cjw==}
peerDependencies:
react: ^17.0.0 || ^16.3.0 || ^15.5.4
react-dom: ^17.0.0 || ^16.3.0 || ^15.5.4
react-lifecycles-compat@3.0.4:
resolution: {integrity: sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==}
react-markdown@10.1.0:
resolution: {integrity: sha512-qKxVopLT/TyA6BX3Ue5NwabOsAzm0Q7kAPwq6L+wWDwisYs7R8vZ0nRXqq6rkueboxpkjvLGU9fWifiX/ZZFxQ==}
peerDependencies:
@ -5504,12 +5442,6 @@ packages:
'@types/react':
optional: true
react-rough-notation@1.0.8:
resolution: {integrity: sha512-ClreTCYNGVvjFKN4gvr3kuBAdA2WP+4B0mIdC7UDmek2n6FzUGColDMqhb142XZl69nlPGYfztivHLs5Un3KoA==}
peerDependencies:
react: ^18.2.0 || ^19.0.0
react-dom: ^18.2.0 || ^19.0.0
react-style-singleton@2.2.3:
resolution: {integrity: sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ==}
engines: {node: '>=10'}
@ -5678,9 +5610,6 @@ packages:
rope-sequence@1.3.4:
resolution: {integrity: sha512-UT5EDe2cu2E/6O4igUr5PSFs23nvvukicWHx6GnOPlHAiiYbzNuCRQCuiUdHJQcqKalLKlrYJnjY0ySGsXNQXQ==}
rough-notation@0.5.1:
resolution: {integrity: sha512-ITHofTzm13cWFVfoGsh/4c/k2Mg8geKgBCwex71UZLnNuw403tCRjYPQ68jSAd37DMbZIePXPjDgY0XdZi9HPw==}
roughjs@4.6.6:
resolution: {integrity: sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==}
@ -5751,9 +5680,6 @@ packages:
resolution: {integrity: sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw==}
engines: {node: '>= 0.4'}
setimmediate@1.0.5:
resolution: {integrity: sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==}
sharp@0.34.5:
resolution: {integrity: sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==}
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
@ -5948,9 +5874,6 @@ packages:
resolution: {integrity: sha512-FVDYdxtnj0G6Qm/DhNPSb8Ju59ULcup3tuJxkFb5K8Bv2pUXILbf0xZWU8PX8Ov19OXljbUyveOFwRMwkXzO+A==}
engines: {node: '>=16'}
tr46@0.0.3:
resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==}
tr46@5.1.1:
resolution: {integrity: sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==}
engines: {node: '>=18'}
@ -6014,10 +5937,6 @@ packages:
engines: {node: '>=14.17'}
hasBin: true
ua-parser-js@1.0.41:
resolution: {integrity: sha512-LbBDqdIC5s8iROCUjMbW1f5dJQTEFB1+KO9ogbvlb3nm9n4YHa5p4KTvFPWvh2Hs8gZMBuiB1/8+pdfe/tDPug==}
hasBin: true
uc.micro@2.1.0:
resolution: {integrity: sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==}
@ -6171,9 +6090,6 @@ packages:
web-namespaces@2.0.1:
resolution: {integrity: sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==}
webidl-conversions@3.0.1:
resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==}
webidl-conversions@7.0.0:
resolution: {integrity: sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==}
engines: {node: '>=12'}
@ -6190,9 +6106,6 @@ packages:
resolution: {integrity: sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==}
engines: {node: '>=18'}
whatwg-url@5.0.0:
resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==}
which-boxed-primitive@1.1.1:
resolution: {integrity: sha512-TbX3mj8n0odCBFVlY8AxkqcHASw3L60jIuF8jFP78az3C2YhmGvqbHBpAjTRH2/xqYunrJ9g1jSyjCjpoWzIAA==}
engines: {node: '>= 0.4'}
@ -9028,8 +8941,6 @@ snapshots:
get-intrinsic: 1.3.0
is-array-buffer: 3.0.5
asap@2.0.6: {}
assistant-cloud@0.1.12:
dependencies:
assistant-stream: 0.2.46
@ -9062,8 +8973,6 @@ snapshots:
balanced-match@1.0.2: {}
base16@1.0.0: {}
baseline-browser-mapping@2.9.11: {}
brace-expansion@1.1.12:
@ -9215,12 +9124,6 @@ snapshots:
dependencies:
cross-spawn: 7.0.6
cross-fetch@3.2.0:
dependencies:
node-fetch: 2.7.0
transitivePeerDependencies:
- encoding
cross-spawn@7.0.6:
dependencies:
path-key: 3.1.1
@ -10032,26 +9935,6 @@ snapshots:
dependencies:
format: 0.2.2
fbemitter@3.0.0:
dependencies:
fbjs: 3.0.5
transitivePeerDependencies:
- encoding
fbjs-css-vars@1.0.2: {}
fbjs@3.0.5:
dependencies:
cross-fetch: 3.2.0
fbjs-css-vars: 1.0.2
loose-envify: 1.4.0
object-assign: 4.1.1
promise: 7.3.1
setimmediate: 1.0.5
ua-parser-js: 1.0.41
transitivePeerDependencies:
- encoding
fdir@6.5.0(picomatch@4.0.3):
optionalDependencies:
picomatch: 4.0.3
@ -10080,14 +9963,6 @@ snapshots:
flatted@3.3.3: {}
flux@4.0.4(react@19.2.3):
dependencies:
fbemitter: 3.0.0
fbjs: 3.0.5
react: 19.2.3
transitivePeerDependencies:
- encoding
for-each@0.3.5:
dependencies:
is-callable: 1.2.7
@ -10915,10 +10790,6 @@ snapshots:
lodash-es@4.17.22: {}
lodash.curry@4.1.1: {}
lodash.flow@3.5.0: {}
lodash.merge@4.6.2: {}
longest-streak@3.1.0: {}
@ -11575,10 +11446,6 @@ snapshots:
node-addon-api@7.1.1: {}
node-fetch@2.7.0:
dependencies:
whatwg-url: 5.0.0
npm-to-yarn@3.0.1: {}
number-flow@0.5.8:
@ -11801,10 +11668,6 @@ snapshots:
prismjs@1.30.0: {}
promise@7.3.1:
dependencies:
asap: 2.0.6
prop-types@15.8.1:
dependencies:
loose-envify: 1.4.0
@ -11934,17 +11797,8 @@ snapshots:
punycode@2.3.1: {}
pure-color@1.3.0: {}
queue-microtask@1.2.3: {}
react-base16-styling@0.6.0:
dependencies:
base16: 1.0.0
lodash.curry: 4.1.1
lodash.flow: 3.5.0
pure-color: 1.3.0
react-day-picker@9.13.0(react@19.2.3):
dependencies:
'@date-fns/tz': 1.4.1
@ -11984,20 +11838,6 @@ snapshots:
dependencies:
react: 19.2.3
react-json-view@1.21.3(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3):
dependencies:
flux: 4.0.4(react@19.2.3)
react: 19.2.3
react-base16-styling: 0.6.0
react-dom: 19.2.3(react@19.2.3)
react-lifecycles-compat: 3.0.4
react-textarea-autosize: 8.5.9(@types/react@19.2.7)(react@19.2.3)
transitivePeerDependencies:
- '@types/react'
- encoding
react-lifecycles-compat@3.0.4: {}
react-markdown@10.1.0(@types/react@19.2.7)(react@19.2.3):
dependencies:
'@types/hast': 3.0.4
@ -12067,12 +11907,6 @@ snapshots:
optionalDependencies:
'@types/react': 19.2.7
react-rough-notation@1.0.8(react-dom@19.2.3(react@19.2.3))(react@19.2.3):
dependencies:
react: 19.2.3
react-dom: 19.2.3(react@19.2.3)
rough-notation: 0.5.1
react-style-singleton@2.2.3(@types/react@19.2.7)(react@19.2.3):
dependencies:
get-nonce: 1.0.1
@ -12339,8 +12173,6 @@ snapshots:
rope-sequence@1.3.4: {}
rough-notation@0.5.1: {}
roughjs@4.6.6:
dependencies:
hachure-fill: 0.5.2
@ -12421,8 +12253,6 @@ snapshots:
es-errors: 1.3.0
es-object-atoms: 1.1.1
setimmediate@1.0.5: {}
sharp@0.34.5:
dependencies:
'@img/colour': 1.0.0
@ -12685,8 +12515,6 @@ snapshots:
dependencies:
tldts: 6.1.86
tr46@0.0.3: {}
tr46@5.1.1:
dependencies:
punycode: 2.3.1
@ -12760,8 +12588,6 @@ snapshots:
typescript@5.9.3: {}
ua-parser-js@1.0.41: {}
uc.micro@2.1.0: {}
ufo@1.6.1: {}
@ -12941,8 +12767,6 @@ snapshots:
web-namespaces@2.0.1: {}
webidl-conversions@3.0.1: {}
webidl-conversions@7.0.0: {}
whatwg-encoding@3.1.1:
@ -12956,11 +12780,6 @@ snapshots:
tr46: 5.1.1
webidl-conversions: 7.0.0
whatwg-url@5.0.0:
dependencies:
tr46: 0.0.3
webidl-conversions: 3.0.1
which-boxed-primitive@1.1.1:
dependencies:
is-bigint: 1.1.0

View file

@ -1,11 +0,0 @@
// Proxy temporarily disabled for client-side i18n implementation
// Server-side i18n routing would require restructuring entire app directory to app/[locale]/...
// which is too invasive for this project
import type { NextRequest } from "next/server";
import { NextResponse } from "next/server";
// Empty proxy - just pass through all requests
export function proxy(request: NextRequest) {
return NextResponse.next();
}