Merge remote-tracking branch 'upstream/dev' into feat/sur-159

This commit is contained in:
Anish Sarkar 2026-02-11 03:47:51 +05:30
commit fc64f65876
86 changed files with 6332 additions and 5653 deletions

View file

@ -9,6 +9,8 @@ CELERY_TASK_DEFAULT_QUEUE=surfsense
# Redis for app-level features (heartbeats, podcast markers)
# Defaults to CELERY_BROKER_URL when not set
REDIS_APP_URL=redis://localhost:6379/0
# Optional: TTL in seconds for connector indexing lock key
# CONNECTOR_INDEXING_LOCK_TTL_SECONDS=28800
#Electric(for migrations only)
ELECTRIC_DB_USER=electric

View file

@ -0,0 +1,77 @@
"""Add shared_memories table (SUR-152)."""
from collections.abc import Sequence
from alembic import op
from app.config import config
revision: str = "96"
down_revision: str | None = "95"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
EMBEDDING_DIM = config.embedding_model_instance.dimension
def upgrade() -> None:
op.execute(
f"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT FROM information_schema.tables
WHERE table_name = 'shared_memories'
) THEN
CREATE TABLE shared_memories (
id SERIAL PRIMARY KEY,
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
search_space_id INTEGER NOT NULL REFERENCES searchspaces(id) ON DELETE CASCADE,
created_by_id UUID NOT NULL REFERENCES "user"(id) ON DELETE CASCADE,
memory_text TEXT NOT NULL,
category memorycategory NOT NULL DEFAULT 'fact',
embedding vector({EMBEDDING_DIM})
);
END IF;
END$$;
"""
)
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_indexes
WHERE tablename = 'shared_memories' AND indexname = 'ix_shared_memories_search_space_id'
) THEN
CREATE INDEX ix_shared_memories_search_space_id ON shared_memories(search_space_id);
END IF;
IF NOT EXISTS (
SELECT 1 FROM pg_indexes
WHERE tablename = 'shared_memories' AND indexname = 'ix_shared_memories_updated_at'
) THEN
CREATE INDEX ix_shared_memories_updated_at ON shared_memories(updated_at);
END IF;
IF NOT EXISTS (
SELECT 1 FROM pg_indexes
WHERE tablename = 'shared_memories' AND indexname = 'ix_shared_memories_created_by_id'
) THEN
CREATE INDEX ix_shared_memories_created_by_id ON shared_memories(created_by_id);
END IF;
END$$;
"""
)
op.execute(
"""
CREATE INDEX IF NOT EXISTS shared_memories_vector_index
ON shared_memories USING hnsw (embedding public.vector_cosine_ops);
"""
)
def downgrade() -> None:
op.execute("DROP INDEX IF EXISTS shared_memories_vector_index;")
op.execute("DROP INDEX IF EXISTS ix_shared_memories_created_by_id;")
op.execute("DROP INDEX IF EXISTS ix_shared_memories_updated_at;")
op.execute("DROP INDEX IF EXISTS ix_shared_memories_search_space_id;")
op.execute("DROP TABLE IF EXISTS shared_memories CASCADE;")

View file

@ -0,0 +1,23 @@
"""Add GITHUB_MODELS to LiteLLMProvider enum
Revision ID: 97
Revises: 96
"""
from collections.abc import Sequence
from alembic import op
revision: str = "97"
down_revision: str | None = "96"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
op.execute("COMMIT")
op.execute("ALTER TYPE litellmprovider ADD VALUE IF NOT EXISTS 'GITHUB_MODELS'")
def downgrade() -> None:
pass

View file

@ -22,6 +22,7 @@ from app.agents.new_chat.system_prompt import (
build_surfsense_system_prompt,
)
from app.agents.new_chat.tools.registry import build_tools_async
from app.db import ChatVisibility
from app.services.connector_service import ConnectorService
# =============================================================================
@ -126,6 +127,7 @@ async def create_surfsense_deep_agent(
disabled_tools: list[str] | None = None,
additional_tools: Sequence[BaseTool] | None = None,
firecrawl_api_key: str | None = None,
thread_visibility: ChatVisibility | None = None,
):
"""
Create a SurfSense deep agent with configurable tools and prompts.
@ -228,14 +230,15 @@ async def create_surfsense_deep_agent(
logging.warning(f"Failed to discover available connectors/document types: {e}")
# Build dependencies dict for the tools registry
visibility = thread_visibility or ChatVisibility.PRIVATE
dependencies = {
"search_space_id": search_space_id,
"db_session": db_session,
"connector_service": connector_service,
"firecrawl_api_key": firecrawl_api_key,
"user_id": user_id, # Required for memory tools
"thread_id": thread_id, # For podcast tool
# Dynamic connector/document type discovery for knowledge base tool
"user_id": user_id,
"thread_id": thread_id,
"thread_visibility": visibility,
"available_connectors": available_connectors,
"available_document_types": available_document_types,
}
@ -255,10 +258,12 @@ async def create_surfsense_deep_agent(
custom_system_instructions=agent_config.system_instructions,
use_default_system_instructions=agent_config.use_default_system_instructions,
citations_enabled=agent_config.citations_enabled,
thread_visibility=thread_visibility,
)
else:
# Use default prompt (with citations enabled)
system_prompt = build_surfsense_system_prompt()
system_prompt = build_surfsense_system_prompt(
thread_visibility=thread_visibility,
)
# Create the deep agent with system prompt and checkpointer
# Note: TodoListMiddleware (write_todos) is included by default in create_deep_agent

View file

@ -45,6 +45,7 @@ PROVIDER_MAP = {
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai",
"GITHUB_MODELS": "github",
"REPLICATE": "replicate",
"PERPLEXITY": "perplexity",
"ANYSCALE": "anyscale",

View file

@ -12,6 +12,8 @@ The prompt is composed of three parts:
from datetime import UTC, datetime
from app.db import ChatVisibility
# Default system instructions - can be overridden via NewLLMConfig.system_instructions
SURFSENSE_SYSTEM_INSTRUCTIONS = """
<system_instruction>
@ -22,7 +24,34 @@ Today's date (UTC): {resolved_today}
</system_instruction>
"""
SURFSENSE_TOOLS_INSTRUCTIONS = """
# Default system instructions for shared (team) threads: team context + message format for attribution
_SYSTEM_INSTRUCTIONS_SHARED = """
<system_instruction>
You are SurfSense, a reasoning and acting AI agent designed to answer questions in this team space using the team's shared knowledge base.
In this team thread, each message is prefixed with **[DisplayName of the author]**. Use this to attribute and reference the author of anything in the discussion (who asked a question, made a suggestion, or contributed an idea) and to cite who said what in your answers.
Today's date (UTC): {resolved_today}
</system_instruction>
"""
def _get_system_instructions(
thread_visibility: ChatVisibility | None = None, today: datetime | None = None
) -> str:
"""Build system instructions based on thread visibility (private vs shared)."""
resolved_today = (today or datetime.now(UTC)).astimezone(UTC).date().isoformat()
visibility = thread_visibility or ChatVisibility.PRIVATE
if visibility == ChatVisibility.SEARCH_SPACE:
return _SYSTEM_INSTRUCTIONS_SHARED.format(resolved_today=resolved_today)
else:
return SURFSENSE_SYSTEM_INSTRUCTIONS.format(resolved_today=resolved_today)
# Tools 0-6 (common to both private and shared prompts)
_TOOLS_INSTRUCTIONS_COMMON = """
<tools>
You have access to the following tools:
@ -138,7 +167,11 @@ You have access to the following tools:
* Prioritize showing: diagrams, charts, infographics, key illustrations, or images that help explain the content.
* Don't show every image - just the most relevant 1-3 images that enhance understanding.
7. save_memory: Save facts, preferences, or context about the user for personalized responses.
"""
# Private (user) memory: tools 7-8 + memory-specific examples
_TOOLS_INSTRUCTIONS_MEMORY_PRIVATE = """
7. save_memory: Save facts, preferences, or context for personalized responses.
- Use this when the user explicitly or implicitly shares information worth remembering.
- Trigger scenarios:
* User says "remember this", "keep this in mind", "note that", or similar
@ -178,6 +211,75 @@ You have access to the following tools:
stating "Based on your memory..." - integrate the context seamlessly.
</tools>
<tool_call_examples>
- User: "Remember that I prefer TypeScript over JavaScript"
- Call: `save_memory(content="User prefers TypeScript over JavaScript for development", category="preference")`
- User: "I'm a data scientist working on ML pipelines"
- Call: `save_memory(content="User is a data scientist working on ML pipelines", category="fact")`
- User: "Always give me code examples in Python"
- Call: `save_memory(content="User wants code examples to be written in Python", category="instruction")`
- User: "What programming language should I use for this project?"
- First recall: `recall_memory(query="programming language preferences")`
- Then provide a personalized recommendation based on their preferences
- User: "What do you know about me?"
- Call: `recall_memory(top_k=10)`
- Then summarize the stored memories
"""
# Shared (team) memory: tools 7-8 + team memory examples
_TOOLS_INSTRUCTIONS_MEMORY_SHARED = """
7. save_memory: Save a fact, preference, or context to the team's shared memory for future reference.
- Use this when the user or a team member says "remember this", "keep this in mind", or similar in this shared chat.
- Use when the team agrees on something to remember (e.g., decisions, conventions).
- Someone shares a preference or fact that should be visible to the whole team.
- The saved information will be available in future shared conversations in this space.
- Args:
- content: The fact/preference/context to remember. Phrase it clearly, e.g. "API keys are stored in Vault", "The team prefers weekly demos on Fridays"
- category: Type of memory. One of:
* "preference": Team or workspace preferences
* "fact": Facts the team agreed on (e.g., processes, locations)
* "instruction": Standing instructions for the team
* "context": Current context (e.g., ongoing projects, goals)
- Returns: Confirmation of saved memory; returned context may include who added it (added_by).
- IMPORTANT: Only save information that would be genuinely useful for future team conversations in this space.
8. recall_memory: Recall relevant team memories for this space to provide contextual responses.
- Use when you need team context to answer (e.g., "where do we store X?", "what did we decide about Y?").
- Use when someone asks about something the team agreed to remember.
- Use when team preferences or conventions would improve the response.
- Args:
- query: Optional search query to find specific memories. If not provided, returns the most recent memories.
- category: Optional filter by category ("preference", "fact", "instruction", "context")
- top_k: Number of memories to retrieve (default: 5, max: 20)
- Returns: Relevant team memories and formatted context (may include added_by). Integrate naturally without saying "Based on team memory...".
</tools>
<tool_call_examples>
- User: "Remember that API keys are stored in Vault"
- Call: `save_memory(content="API keys are stored in Vault", category="fact")`
- User: "Let's remember that the team prefers weekly demos on Fridays"
- Call: `save_memory(content="The team prefers weekly demos on Fridays", category="preference")`
- User: "What did we decide about the release date?"
- First recall: `recall_memory(query="release date decision")`
- Then answer based on the team memories
- User: "Where do we document onboarding?"
- Call: `recall_memory(query="onboarding documentation")`
- Then answer using the recalled team context
- User: "What have we agreed to remember about our deployment process?"
- Call: `recall_memory(query="deployment process", top_k=10)`
- Then summarize the relevant team memories
"""
# Examples shared by both private and shared prompts (knowledge base, docs, podcast, links, images, etc.)
_TOOLS_INSTRUCTIONS_EXAMPLES_COMMON = """
- User: "What time is the team meeting today?"
- Call: `search_knowledge_base(query="team meeting time today")` (searches ALL sources - calendar, notes, Obsidian, etc.)
- DO NOT limit to just calendar - the info might be in notes!
@ -209,23 +311,6 @@ You have access to the following tools:
- User: "What's in my Obsidian vault about project ideas?"
- Call: `search_knowledge_base(query="project ideas", connectors_to_search=["OBSIDIAN_CONNECTOR"])`
- User: "Remember that I prefer TypeScript over JavaScript"
- Call: `save_memory(content="User prefers TypeScript over JavaScript for development", category="preference")`
- User: "I'm a data scientist working on ML pipelines"
- Call: `save_memory(content="User is a data scientist working on ML pipelines", category="fact")`
- User: "Always give me code examples in Python"
- Call: `save_memory(content="User wants code examples to be written in Python", category="instruction")`
- User: "What programming language should I use for this project?"
- First recall: `recall_memory(query="programming language preferences")`
- Then provide a personalized recommendation based on their preferences
- User: "What do you know about me?"
- Call: `recall_memory(top_k=10)`
- Then summarize the stored memories
- User: "Give me a podcast about AI trends based on what we discussed"
- First search for relevant content, then call: `generate_podcast(source_content="Based on our conversation and search results: [detailed summary of chat + search findings]", podcast_title="AI Trends Podcast")`
@ -315,6 +400,31 @@ You have access to the following tools:
</tool_call_examples>
"""
# Reassemble so existing callers see no change (same full prompt)
SURFSENSE_TOOLS_INSTRUCTIONS = (
_TOOLS_INSTRUCTIONS_COMMON
+ _TOOLS_INSTRUCTIONS_MEMORY_PRIVATE
+ _TOOLS_INSTRUCTIONS_EXAMPLES_COMMON
)
def _get_tools_instructions(thread_visibility: ChatVisibility | None = None) -> str:
"""Build tools instructions based on thread visibility (private vs shared).
For private chats: use user-focused memory wording and examples.
For shared chats: use team memory wording and examples.
"""
visibility = thread_visibility or ChatVisibility.PRIVATE
memory_block = (
_TOOLS_INSTRUCTIONS_MEMORY_SHARED
if visibility == ChatVisibility.SEARCH_SPACE
else _TOOLS_INSTRUCTIONS_MEMORY_PRIVATE
)
return (
_TOOLS_INSTRUCTIONS_COMMON + memory_block + _TOOLS_INSTRUCTIONS_EXAMPLES_COMMON
)
SURFSENSE_CITATION_INSTRUCTIONS = """
<citation_instructions>
CRITICAL CITATION REQUIREMENTS:
@ -413,6 +523,7 @@ Your goal is to provide helpful, informative answers in a clean, readable format
def build_surfsense_system_prompt(
today: datetime | None = None,
thread_visibility: ChatVisibility | None = None,
) -> str:
"""
Build the SurfSense system prompt with default settings.
@ -424,17 +535,17 @@ def build_surfsense_system_prompt(
Args:
today: Optional datetime for today's date (defaults to current UTC date)
thread_visibility: Optional; when provided, used for conditional prompt (e.g. private vs shared memory wording). Defaults to private behavior when None.
Returns:
Complete system prompt string
"""
resolved_today = (today or datetime.now(UTC)).astimezone(UTC).date().isoformat()
return (
SURFSENSE_SYSTEM_INSTRUCTIONS.format(resolved_today=resolved_today)
+ SURFSENSE_TOOLS_INSTRUCTIONS
+ SURFSENSE_CITATION_INSTRUCTIONS
)
visibility = thread_visibility or ChatVisibility.PRIVATE
system_instructions = _get_system_instructions(visibility, today)
tools_instructions = _get_tools_instructions(visibility)
citation_instructions = SURFSENSE_CITATION_INSTRUCTIONS
return system_instructions + tools_instructions + citation_instructions
def build_configurable_system_prompt(
@ -442,6 +553,7 @@ def build_configurable_system_prompt(
use_default_system_instructions: bool = True,
citations_enabled: bool = True,
today: datetime | None = None,
thread_visibility: ChatVisibility | None = None,
) -> str:
"""
Build a configurable SurfSense system prompt based on NewLLMConfig settings.
@ -460,6 +572,7 @@ def build_configurable_system_prompt(
citations_enabled: Whether to include citation instructions (True) or
anti-citation instructions (False).
today: Optional datetime for today's date (defaults to current UTC date)
thread_visibility: Optional; when provided, used for conditional prompt (e.g. private vs shared memory wording). Defaults to private behavior when None.
Returns:
Complete system prompt string
@ -473,16 +586,14 @@ def build_configurable_system_prompt(
resolved_today=resolved_today
)
elif use_default_system_instructions:
# Use default instructions
system_instructions = SURFSENSE_SYSTEM_INSTRUCTIONS.format(
resolved_today=resolved_today
)
visibility = thread_visibility or ChatVisibility.PRIVATE
system_instructions = _get_system_instructions(visibility, today)
else:
# No system instructions (edge case)
system_instructions = ""
# Tools instructions are always included
tools_instructions = SURFSENSE_TOOLS_INSTRUCTIONS
# Tools instructions: conditional on thread_visibility (private vs shared memory wording)
tools_instructions = _get_tools_instructions(thread_visibility)
# Citation instructions based on toggle
citation_instructions = (

View file

@ -8,6 +8,7 @@ This module provides:
- Tool factory for creating search_knowledge_base tools
"""
import asyncio
import json
from datetime import datetime
from typing import Any
@ -16,6 +17,7 @@ from langchain_core.tools import StructuredTool
from pydantic import BaseModel, Field
from sqlalchemy.ext.asyncio import AsyncSession
from app.db import async_session_maker
from app.services.connector_service import ConnectorService
# =============================================================================
@ -333,7 +335,7 @@ async def search_knowledge_base_async(
Returns:
Formatted string with search results
"""
all_documents = []
all_documents: list[dict[str, Any]] = []
# Resolve date range (default last 2 years)
from app.agents.new_chat.utils import resolve_date_range
@ -345,323 +347,131 @@ async def search_knowledge_base_async(
connectors = _normalize_connectors(connectors_to_search, available_connectors)
for connector in connectors:
connector_specs: dict[str, tuple[str, bool, bool, dict[str, Any]]] = {
"YOUTUBE_VIDEO": ("search_youtube", True, True, {}),
"EXTENSION": ("search_extension", True, True, {}),
"CRAWLED_URL": ("search_crawled_urls", True, True, {}),
"FILE": ("search_files", True, True, {}),
"SLACK_CONNECTOR": ("search_slack", True, True, {}),
"TEAMS_CONNECTOR": ("search_teams", True, True, {}),
"NOTION_CONNECTOR": ("search_notion", True, True, {}),
"GITHUB_CONNECTOR": ("search_github", True, True, {}),
"LINEAR_CONNECTOR": ("search_linear", True, True, {}),
"TAVILY_API": ("search_tavily", False, True, {}),
"SEARXNG_API": ("search_searxng", False, True, {}),
"LINKUP_API": ("search_linkup", False, False, {"mode": "standard"}),
"BAIDU_SEARCH_API": ("search_baidu", False, True, {}),
"DISCORD_CONNECTOR": ("search_discord", True, True, {}),
"JIRA_CONNECTOR": ("search_jira", True, True, {}),
"GOOGLE_CALENDAR_CONNECTOR": ("search_google_calendar", True, True, {}),
"AIRTABLE_CONNECTOR": ("search_airtable", True, True, {}),
"GOOGLE_GMAIL_CONNECTOR": ("search_google_gmail", True, True, {}),
"GOOGLE_DRIVE_FILE": ("search_google_drive", True, True, {}),
"CONFLUENCE_CONNECTOR": ("search_confluence", True, True, {}),
"CLICKUP_CONNECTOR": ("search_clickup", True, True, {}),
"LUMA_CONNECTOR": ("search_luma", True, True, {}),
"ELASTICSEARCH_CONNECTOR": ("search_elasticsearch", True, True, {}),
"NOTE": ("search_notes", True, True, {}),
"BOOKSTACK_CONNECTOR": ("search_bookstack", True, True, {}),
"CIRCLEBACK": ("search_circleback", True, True, {}),
"OBSIDIAN_CONNECTOR": ("search_obsidian", True, True, {}),
# Composio connectors
"COMPOSIO_GOOGLE_DRIVE_CONNECTOR": (
"search_composio_google_drive",
True,
True,
{},
),
"COMPOSIO_GMAIL_CONNECTOR": ("search_composio_gmail", True, True, {}),
"COMPOSIO_GOOGLE_CALENDAR_CONNECTOR": (
"search_composio_google_calendar",
True,
True,
{},
),
}
# Keep a conservative cap to avoid overloading DB/external services.
max_parallel_searches = 4
semaphore = asyncio.Semaphore(max_parallel_searches)
async def _search_one_connector(connector: str) -> list[dict[str, Any]]:
spec = connector_specs.get(connector)
if spec is None:
return []
method_name, includes_date_range, includes_top_k, extra_kwargs = spec
kwargs: dict[str, Any] = {
"user_query": query,
"search_space_id": search_space_id,
**extra_kwargs,
}
if includes_top_k:
kwargs["top_k"] = top_k
if includes_date_range:
kwargs["start_date"] = resolved_start_date
kwargs["end_date"] = resolved_end_date
try:
if connector == "YOUTUBE_VIDEO":
_, chunks = await connector_service.search_youtube(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
# Use isolated session per connector. Shared AsyncSession cannot safely
# run concurrent DB operations.
async with semaphore, async_session_maker() as isolated_session:
isolated_connector_service = ConnectorService(
isolated_session, search_space_id
)
all_documents.extend(chunks)
elif connector == "EXTENSION":
_, chunks = await connector_service.search_extension(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "CRAWLED_URL":
_, chunks = await connector_service.search_crawled_urls(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "FILE":
_, chunks = await connector_service.search_files(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "SLACK_CONNECTOR":
_, chunks = await connector_service.search_slack(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "TEAMS_CONNECTOR":
_, chunks = await connector_service.search_teams(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "NOTION_CONNECTOR":
_, chunks = await connector_service.search_notion(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "GITHUB_CONNECTOR":
_, chunks = await connector_service.search_github(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "LINEAR_CONNECTOR":
_, chunks = await connector_service.search_linear(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "TAVILY_API":
_, chunks = await connector_service.search_tavily(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
)
all_documents.extend(chunks)
elif connector == "SEARXNG_API":
_, chunks = await connector_service.search_searxng(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
)
all_documents.extend(chunks)
elif connector == "LINKUP_API":
# Keep behavior aligned with researcher: default "standard"
_, chunks = await connector_service.search_linkup(
user_query=query,
search_space_id=search_space_id,
mode="standard",
)
all_documents.extend(chunks)
elif connector == "BAIDU_SEARCH_API":
_, chunks = await connector_service.search_baidu(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
)
all_documents.extend(chunks)
elif connector == "DISCORD_CONNECTOR":
_, chunks = await connector_service.search_discord(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "JIRA_CONNECTOR":
_, chunks = await connector_service.search_jira(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "GOOGLE_CALENDAR_CONNECTOR":
_, chunks = await connector_service.search_google_calendar(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "AIRTABLE_CONNECTOR":
_, chunks = await connector_service.search_airtable(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "GOOGLE_GMAIL_CONNECTOR":
_, chunks = await connector_service.search_google_gmail(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "GOOGLE_DRIVE_FILE":
_, chunks = await connector_service.search_google_drive(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "CONFLUENCE_CONNECTOR":
_, chunks = await connector_service.search_confluence(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "CLICKUP_CONNECTOR":
_, chunks = await connector_service.search_clickup(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "LUMA_CONNECTOR":
_, chunks = await connector_service.search_luma(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "ELASTICSEARCH_CONNECTOR":
_, chunks = await connector_service.search_elasticsearch(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "NOTE":
_, chunks = await connector_service.search_notes(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "BOOKSTACK_CONNECTOR":
_, chunks = await connector_service.search_bookstack(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "CIRCLEBACK":
_, chunks = await connector_service.search_circleback(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "OBSIDIAN_CONNECTOR":
_, chunks = await connector_service.search_obsidian(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
# =========================================================
# Composio Connectors
# =========================================================
elif connector == "COMPOSIO_GOOGLE_DRIVE_CONNECTOR":
_, chunks = await connector_service.search_composio_google_drive(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "COMPOSIO_GMAIL_CONNECTOR":
_, chunks = await connector_service.search_composio_gmail(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
elif connector == "COMPOSIO_GOOGLE_CALENDAR_CONNECTOR":
_, chunks = await connector_service.search_composio_google_calendar(
user_query=query,
search_space_id=search_space_id,
top_k=top_k,
start_date=resolved_start_date,
end_date=resolved_end_date,
)
all_documents.extend(chunks)
connector_method = getattr(isolated_connector_service, method_name)
_, chunks = await connector_method(**kwargs)
return chunks
except Exception as e:
print(f"Error searching connector {connector}: {e}")
continue
return []
# Deduplicate by content hash
connector_results = await asyncio.gather(
*[_search_one_connector(connector) for connector in connectors]
)
for chunks in connector_results:
all_documents.extend(chunks)
# Deduplicate primarily by document ID. Only fall back to content hashing
# when a document has no ID.
seen_doc_ids: set[Any] = set()
seen_hashes: set[int] = set()
seen_content_hashes: set[int] = set()
deduplicated: list[dict[str, Any]] = []
def _content_fingerprint(document: dict[str, Any]) -> int | None:
chunks = document.get("chunks")
if isinstance(chunks, list):
chunk_texts = []
for chunk in chunks:
if not isinstance(chunk, dict):
continue
chunk_content = (chunk.get("content") or "").strip()
if chunk_content:
chunk_texts.append(chunk_content)
if chunk_texts:
return hash("||".join(chunk_texts))
flat_content = (document.get("content") or "").strip()
if flat_content:
return hash(flat_content)
return None
for doc in all_documents:
doc_id = (doc.get("document", {}) or {}).get("id")
content = (doc.get("content", "") or "").strip()
content_hash = hash(content)
if (doc_id and doc_id in seen_doc_ids) or content_hash in seen_hashes:
if doc_id is not None:
if doc_id in seen_doc_ids:
continue
seen_doc_ids.add(doc_id)
deduplicated.append(doc)
continue
if doc_id:
seen_doc_ids.add(doc_id)
seen_hashes.add(content_hash)
content_hash = _content_fingerprint(doc)
if content_hash is not None:
if content_hash in seen_content_hashes:
continue
seen_content_hashes.add(content_hash)
deduplicated.append(doc)
return format_documents_for_context(deduplicated)

View file

@ -11,21 +11,18 @@ Duplicate request prevention:
- Returns a friendly message if a podcast is already being generated
"""
import os
from typing import Any
import redis
from langchain_core.tools import tool
from sqlalchemy.ext.asyncio import AsyncSession
from app.config import config
from app.db import Podcast, PodcastStatus
# Redis connection for tracking active podcast tasks
# Defaults to the Celery broker when REDIS_APP_URL is not set
REDIS_URL = os.getenv(
"REDIS_APP_URL",
os.getenv("CELERY_BROKER_URL", "redis://localhost:6379/0"),
)
REDIS_URL = config.REDIS_APP_URL
_redis_client: redis.Redis | None = None

View file

@ -43,6 +43,8 @@ from typing import Any
from langchain_core.tools import BaseTool
from app.db import ChatVisibility
from .display_image import create_display_image_tool
from .generate_image import create_generate_image_tool
from .knowledge_base import create_search_knowledge_base_tool
@ -51,6 +53,10 @@ from .mcp_tool import load_mcp_tools
from .podcast import create_generate_podcast_tool
from .scrape_webpage import create_scrape_webpage_tool
from .search_surfsense_docs import create_search_surfsense_docs_tool
from .shared_memory import (
create_recall_shared_memory_tool,
create_save_shared_memory_tool,
)
from .user_memory import create_recall_memory_tool, create_save_memory_tool
# =============================================================================
@ -156,29 +162,42 @@ BUILTIN_TOOLS: list[ToolDefinition] = [
requires=["db_session"],
),
# =========================================================================
# USER MEMORY TOOLS - Claude-like memory feature
# USER MEMORY TOOLS - private or team store by thread_visibility
# =========================================================================
# Save memory tool - stores facts/preferences about the user
ToolDefinition(
name="save_memory",
description="Save facts, preferences, or context about the user for personalized responses",
factory=lambda deps: create_save_memory_tool(
user_id=deps["user_id"],
search_space_id=deps["search_space_id"],
db_session=deps["db_session"],
description="Save facts, preferences, or context for personalized or team responses",
factory=lambda deps: (
create_save_shared_memory_tool(
search_space_id=deps["search_space_id"],
created_by_id=deps["user_id"],
db_session=deps["db_session"],
)
if deps["thread_visibility"] == ChatVisibility.SEARCH_SPACE
else create_save_memory_tool(
user_id=deps["user_id"],
search_space_id=deps["search_space_id"],
db_session=deps["db_session"],
)
),
requires=["user_id", "search_space_id", "db_session"],
requires=["user_id", "search_space_id", "db_session", "thread_visibility"],
),
# Recall memory tool - retrieves relevant user memories
ToolDefinition(
name="recall_memory",
description="Recall user memories for personalized and contextual responses",
factory=lambda deps: create_recall_memory_tool(
user_id=deps["user_id"],
search_space_id=deps["search_space_id"],
db_session=deps["db_session"],
description="Recall relevant memories (personal or team) for context",
factory=lambda deps: (
create_recall_shared_memory_tool(
search_space_id=deps["search_space_id"],
db_session=deps["db_session"],
)
if deps["thread_visibility"] == ChatVisibility.SEARCH_SPACE
else create_recall_memory_tool(
user_id=deps["user_id"],
search_space_id=deps["search_space_id"],
db_session=deps["db_session"],
)
),
requires=["user_id", "search_space_id", "db_session"],
requires=["user_id", "search_space_id", "db_session", "thread_visibility"],
),
# =========================================================================
# ADD YOUR CUSTOM TOOLS BELOW

View file

@ -0,0 +1,280 @@
"""Shared (team) memory backend for search-space-scoped AI context."""
import logging
from typing import Any
from uuid import UUID
from langchain_core.tools import tool
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from app.config import config
from app.db import MemoryCategory, SharedMemory, User
logger = logging.getLogger(__name__)
DEFAULT_RECALL_TOP_K = 5
MAX_MEMORIES_PER_SEARCH_SPACE = 250
async def get_shared_memory_count(
db_session: AsyncSession,
search_space_id: int,
) -> int:
result = await db_session.execute(
select(SharedMemory).where(SharedMemory.search_space_id == search_space_id)
)
return len(result.scalars().all())
async def delete_oldest_shared_memory(
db_session: AsyncSession,
search_space_id: int,
) -> None:
result = await db_session.execute(
select(SharedMemory)
.where(SharedMemory.search_space_id == search_space_id)
.order_by(SharedMemory.updated_at.asc())
.limit(1)
)
oldest = result.scalars().first()
if oldest:
await db_session.delete(oldest)
await db_session.commit()
def _to_uuid(value: str | UUID) -> UUID:
if isinstance(value, UUID):
return value
return UUID(value)
async def save_shared_memory(
db_session: AsyncSession,
search_space_id: int,
created_by_id: str | UUID,
content: str,
category: str = "fact",
) -> dict[str, Any]:
category = category.lower() if category else "fact"
valid = ["preference", "fact", "instruction", "context"]
if category not in valid:
category = "fact"
try:
count = await get_shared_memory_count(db_session, search_space_id)
if count >= MAX_MEMORIES_PER_SEARCH_SPACE:
await delete_oldest_shared_memory(db_session, search_space_id)
embedding = config.embedding_model_instance.embed(content)
row = SharedMemory(
search_space_id=search_space_id,
created_by_id=_to_uuid(created_by_id),
memory_text=content,
category=MemoryCategory(category),
embedding=embedding,
)
db_session.add(row)
await db_session.commit()
await db_session.refresh(row)
return {
"status": "saved",
"memory_id": row.id,
"memory_text": content,
"category": category,
"message": f"I'll remember: {content}",
}
except Exception as e:
logger.exception("Failed to save shared memory: %s", e)
await db_session.rollback()
return {
"status": "error",
"error": str(e),
"message": "Failed to save memory. Please try again.",
}
async def recall_shared_memory(
db_session: AsyncSession,
search_space_id: int,
query: str | None = None,
category: str | None = None,
top_k: int = DEFAULT_RECALL_TOP_K,
) -> dict[str, Any]:
top_k = min(max(top_k, 1), 20)
try:
valid_categories = ["preference", "fact", "instruction", "context"]
stmt = select(SharedMemory).where(
SharedMemory.search_space_id == search_space_id
)
if category and category in valid_categories:
stmt = stmt.where(SharedMemory.category == MemoryCategory(category))
if query:
query_embedding = config.embedding_model_instance.embed(query)
stmt = stmt.order_by(
SharedMemory.embedding.op("<=>")(query_embedding)
).limit(top_k)
else:
stmt = stmt.order_by(SharedMemory.updated_at.desc()).limit(top_k)
result = await db_session.execute(stmt)
rows = result.scalars().all()
memory_list = [
{
"id": m.id,
"memory_text": m.memory_text,
"category": m.category.value if m.category else "unknown",
"updated_at": m.updated_at.isoformat() if m.updated_at else None,
"created_by_id": str(m.created_by_id) if m.created_by_id else None,
}
for m in rows
]
created_by_ids = list(
{m["created_by_id"] for m in memory_list if m["created_by_id"]}
)
created_by_map: dict[str, str] = {}
if created_by_ids:
uuids = [UUID(uid) for uid in created_by_ids]
users_result = await db_session.execute(
select(User).where(User.id.in_(uuids))
)
for u in users_result.scalars().all():
created_by_map[str(u.id)] = u.display_name or "A team member"
formatted_context = format_shared_memories_for_context(
memory_list, created_by_map
)
return {
"status": "success",
"count": len(memory_list),
"memories": memory_list,
"formatted_context": formatted_context,
}
except Exception as e:
logger.exception("Failed to recall shared memory: %s", e)
await db_session.rollback()
return {
"status": "error",
"error": str(e),
"memories": [],
"formatted_context": "Failed to recall memories.",
}
def format_shared_memories_for_context(
memories: list[dict[str, Any]],
created_by_map: dict[str, str] | None = None,
) -> str:
if not memories:
return "No relevant team memories found."
created_by_map = created_by_map or {}
parts = ["<team_memories>"]
for memory in memories:
category = memory.get("category", "unknown")
text = memory.get("memory_text", "")
updated = memory.get("updated_at", "")
created_by_id = memory.get("created_by_id")
added_by = (
created_by_map.get(str(created_by_id), "A team member")
if created_by_id is not None
else "A team member"
)
parts.append(
f" <memory category='{category}' updated='{updated}' added_by='{added_by}'>{text}</memory>"
)
parts.append("</team_memories>")
return "\n".join(parts)
def create_save_shared_memory_tool(
search_space_id: int,
created_by_id: str | UUID,
db_session: AsyncSession,
):
"""
Factory function to create the save_memory tool for shared (team) chats.
Args:
search_space_id: The search space ID
created_by_id: The user ID of the person adding the memory
db_session: Database session for executing queries
Returns:
A configured tool function for saving team memories
"""
@tool
async def save_memory(
content: str,
category: str = "fact",
) -> dict[str, Any]:
"""
Save a fact, preference, or context to the team's shared memory for future reference.
Use this tool when:
- User or a team member says "remember this", "keep this in mind", or similar in this shared chat
- The team agrees on something to remember (e.g., decisions, conventions, where things live)
- Someone shares a preference or fact that should be visible to the whole team
The saved information will be available in future shared conversations in this space.
Args:
content: The fact/preference/context to remember.
Phrase it clearly, e.g., "API keys are stored in Vault",
"The team prefers weekly demos on Fridays"
category: Type of memory. One of:
- "preference": Team or workspace preferences
- "fact": Facts the team agreed on (e.g., processes, locations)
- "instruction": Standing instructions for the team
- "context": Current context (e.g., ongoing projects, goals)
Returns:
A dictionary with the save status and memory details
"""
return await save_shared_memory(
db_session, search_space_id, created_by_id, content, category
)
return save_memory
def create_recall_shared_memory_tool(
search_space_id: int,
db_session: AsyncSession,
):
"""
Factory function to create the recall_memory tool for shared (team) chats.
Args:
search_space_id: The search space ID
db_session: Database session for executing queries
Returns:
A configured tool function for recalling team memories
"""
@tool
async def recall_memory(
query: str | None = None,
category: str | None = None,
top_k: int = DEFAULT_RECALL_TOP_K,
) -> dict[str, Any]:
"""
Recall relevant team memories for this space to provide contextual responses.
Use this tool when:
- You need team context to answer (e.g., "where do we store X?", "what did we decide about Y?")
- Someone asks about something the team agreed to remember
- Team preferences or conventions would improve the response
Args:
query: Optional search query to find specific memories.
If not provided, returns the most recent memories.
category: Optional category filter. One of:
"preference", "fact", "instruction", "context"
top_k: Number of memories to retrieve (default: 5, max: 20)
Returns:
A dictionary containing relevant memories and formatted context
"""
return await recall_shared_memory(
db_session, search_space_id, query, category, top_k
)
return recall_memory

View file

@ -1,7 +1,17 @@
import logging
import time
from collections import defaultdict
from contextlib import asynccontextmanager
from threading import Lock
import redis
from fastapi import Depends, FastAPI, HTTPException, Request, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from slowapi import Limiter
from slowapi.errors import RateLimitExceeded
from slowapi.middleware import SlowAPIASGIMiddleware
from slowapi.util import get_remote_address
from sqlalchemy.ext.asyncio import AsyncSession
from uvicorn.middleware.proxy_headers import ProxyHeadersMiddleware
@ -17,6 +27,147 @@ from app.schemas import UserCreate, UserRead, UserUpdate
from app.tasks.surfsense_docs_indexer import seed_surfsense_docs
from app.users import SECRET, auth_backend, current_active_user, fastapi_users
rate_limit_logger = logging.getLogger("surfsense.rate_limit")
# ============================================================================
# Rate Limiting Configuration (SlowAPI + Redis)
# ============================================================================
# Uses the same Redis instance as Celery for zero additional infrastructure.
# Protects auth endpoints from brute force and user enumeration attacks.
# SlowAPI limiter — provides default rate limits (60/min) for ALL routes
# via the ASGI middleware. This is the general safety net.
limiter = Limiter(
key_func=get_remote_address,
storage_uri=config.REDIS_APP_URL,
default_limits=["60/minute"],
)
def _rate_limit_exceeded_handler(request: Request, exc: RateLimitExceeded):
"""Custom 429 handler that returns JSON matching our frontend error format."""
retry_after = exc.detail.split("per")[-1].strip() if exc.detail else "60"
return JSONResponse(
status_code=429,
content={"detail": "RATE_LIMIT_EXCEEDED"},
headers={"Retry-After": retry_after},
)
# ============================================================================
# Auth-Specific Rate Limits (Redis-backed with in-memory fallback)
# ============================================================================
# Stricter per-IP limits on auth endpoints to prevent:
# - Brute force password attacks
# - User enumeration via REGISTER_USER_ALREADY_EXISTS
# - Email spam via forgot-password
#
# Primary: Redis INCR+EXPIRE (shared across all workers).
# Fallback: In-memory sliding window (per-worker) when Redis is unavailable.
# Same Redis instance as SlowAPI / Celery.
_rate_limit_redis: redis.Redis | None = None
# In-memory fallback rate limiter (per-worker, used only when Redis is down)
_memory_rate_limits: dict[str, list[float]] = defaultdict(list)
_memory_lock = Lock()
def _get_rate_limit_redis() -> redis.Redis:
"""Get or create Redis client for auth rate limiting."""
global _rate_limit_redis
if _rate_limit_redis is None:
_rate_limit_redis = redis.from_url(config.REDIS_APP_URL, decode_responses=True)
return _rate_limit_redis
def _check_rate_limit_memory(
client_ip: str, max_requests: int, window_seconds: int, scope: str
):
"""
In-memory fallback rate limiter using a sliding window.
Used only when Redis is unavailable. Per-worker only (not shared),
so effective limit = max_requests x num_workers.
"""
key = f"{scope}:{client_ip}"
now = time.monotonic()
with _memory_lock:
# Evict timestamps outside the current window
_memory_rate_limits[key] = [
t for t in _memory_rate_limits[key] if now - t < window_seconds
]
if len(_memory_rate_limits[key]) >= max_requests:
rate_limit_logger.warning(
f"Rate limit exceeded (in-memory fallback) on {scope} for IP {client_ip} "
f"({len(_memory_rate_limits[key])}/{max_requests} in {window_seconds}s)"
)
raise HTTPException(
status_code=429,
detail="RATE_LIMIT_EXCEEDED",
)
_memory_rate_limits[key].append(now)
def _check_rate_limit(
request: Request, max_requests: int, window_seconds: int, scope: str
):
"""
Check per-IP rate limit using Redis. Raises 429 if exceeded.
Uses atomic INCR + EXPIRE to avoid race conditions.
Falls back to in-memory sliding window if Redis is unavailable.
"""
client_ip = get_remote_address(request)
key = f"surfsense:auth_rate_limit:{scope}:{client_ip}"
try:
r = _get_rate_limit_redis()
# Atomic: increment first, then set TTL if this is a new key
pipe = r.pipeline()
pipe.incr(key)
pipe.expire(key, window_seconds)
result = pipe.execute()
except (redis.exceptions.RedisError, OSError) as exc:
# Redis unavailable — fall back to in-memory rate limiting
rate_limit_logger.warning(
f"Redis unavailable for rate limiting ({scope}), "
f"falling back to in-memory limiter for {client_ip}: {exc}"
)
_check_rate_limit_memory(client_ip, max_requests, window_seconds, scope)
return
current_count = result[0] # INCR returns the new value
if current_count > max_requests:
rate_limit_logger.warning(
f"Rate limit exceeded on {scope} for IP {client_ip} "
f"({current_count}/{max_requests} in {window_seconds}s)"
)
raise HTTPException(
status_code=429,
detail="RATE_LIMIT_EXCEEDED",
)
def rate_limit_login(request: Request):
"""5 login attempts per minute per IP."""
_check_rate_limit(request, max_requests=5, window_seconds=60, scope="login")
def rate_limit_register(request: Request):
"""3 registration attempts per minute per IP."""
_check_rate_limit(request, max_requests=3, window_seconds=60, scope="register")
def rate_limit_password_reset(request: Request):
"""2 password reset attempts per minute per IP."""
_check_rate_limit(
request, max_requests=2, window_seconds=60, scope="password_reset"
)
@asynccontextmanager
async def lifespan(app: FastAPI):
@ -45,6 +196,14 @@ def registration_allowed():
app = FastAPI(lifespan=lifespan)
# Register rate limiter and custom 429 handler
app.state.limiter = limiter
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
# Add SlowAPI ASGI middleware for automatic rate limiting
# This applies default_limits to all routes and enables per-route overrides
app.add_middleware(SlowAPIASGIMiddleware)
# Add ProxyHeaders middleware FIRST to trust proxy headers (e.g., from Cloudflare)
# This ensures FastAPI uses HTTPS in redirects when behind a proxy
app.add_middleware(ProxyHeadersMiddleware, trusted_hosts="*")
@ -90,18 +249,25 @@ app.add_middleware(
)
app.include_router(
fastapi_users.get_auth_router(auth_backend), prefix="/auth/jwt", tags=["auth"]
fastapi_users.get_auth_router(auth_backend),
prefix="/auth/jwt",
tags=["auth"],
dependencies=[Depends(rate_limit_login)],
)
app.include_router(
fastapi_users.get_register_router(UserRead, UserCreate),
prefix="/auth",
tags=["auth"],
dependencies=[Depends(registration_allowed)], # blocks registration when disabled
dependencies=[
Depends(rate_limit_register),
Depends(registration_allowed), # blocks registration when disabled
],
)
app.include_router(
fastapi_users.get_reset_password_router(),
prefix="/auth",
tags=["auth"],
dependencies=[Depends(rate_limit_password_reset)],
)
app.include_router(
fastapi_users.get_verify_router(UserRead),

View file

@ -213,6 +213,17 @@ class Config:
# Database
DATABASE_URL = os.getenv("DATABASE_URL")
# Celery / Redis
CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL", "redis://localhost:6379/0")
CELERY_RESULT_BACKEND = os.getenv(
"CELERY_RESULT_BACKEND", "redis://localhost:6379/0"
)
CELERY_TASK_DEFAULT_QUEUE = os.getenv("CELERY_TASK_DEFAULT_QUEUE", "surfsense")
REDIS_APP_URL = os.getenv("REDIS_APP_URL", CELERY_BROKER_URL)
CONNECTOR_INDEXING_LOCK_TTL_SECONDS = int(
os.getenv("CONNECTOR_INDEXING_LOCK_TTL_SECONDS", str(8 * 60 * 60))
)
NEXT_FRONTEND_URL = os.getenv("NEXT_FRONTEND_URL")
# Backend URL to override the http to https in the OAuth redirect URI
BACKEND_URL = os.getenv("BACKEND_URL")

View file

@ -27,6 +27,12 @@ T = TypeVar("T")
MAX_RETRIES = 5
BASE_RETRY_DELAY = 1.0 # seconds
MAX_RETRY_DELAY = 60.0 # seconds (Notion's max request timeout)
MAX_RATE_LIMIT_WAIT_SECONDS = float(
getattr(config, "NOTION_MAX_RETRY_AFTER_SECONDS", 30.0)
)
MAX_TOTAL_RETRY_WAIT_SECONDS = float(
getattr(config, "NOTION_MAX_TOTAL_RETRY_WAIT_SECONDS", 120.0)
)
# Type alias for retry callback function
# Signature: async callback(retry_reason, attempt, max_attempts, wait_seconds) -> None
@ -292,6 +298,7 @@ class NotionHistoryConnector:
"""
last_exception: APIResponseError | None = None
retry_delay = BASE_RETRY_DELAY
total_wait_time = 0.0
for attempt in range(MAX_RETRIES):
try:
@ -325,6 +332,15 @@ class NotionHistoryConnector:
wait_time = retry_delay
else:
wait_time = retry_delay
# Avoid very long worker sleeps from external Retry-After values.
if wait_time > MAX_RATE_LIMIT_WAIT_SECONDS:
logger.warning(
f"Notion Retry-After ({wait_time}s) exceeds cap "
f"({MAX_RATE_LIMIT_WAIT_SECONDS}s). Clamping wait time."
)
wait_time = MAX_RATE_LIMIT_WAIT_SECONDS
logger.warning(
f"Notion API rate limited (429). "
f"Waiting {wait_time}s. Attempt {attempt + 1}/{MAX_RETRIES}"
@ -348,6 +364,14 @@ class NotionHistoryConnector:
# Notify about retry via callback (for user notifications)
# Call before sleeping so user sees the message while we wait
if total_wait_time + wait_time > MAX_TOTAL_RETRY_WAIT_SECONDS:
logger.error(
"Notion API retry budget exceeded "
f"({total_wait_time + wait_time:.1f}s > "
f"{MAX_TOTAL_RETRY_WAIT_SECONDS:.1f}s). Failing fast."
)
raise
if on_retry:
try:
await on_retry(
@ -362,6 +386,7 @@ class NotionHistoryConnector:
# Wait before retrying
await asyncio.sleep(wait_time)
total_wait_time += wait_time
# Exponential backoff for next attempt
retry_delay = min(retry_delay * 2, MAX_RETRY_DELAY)

View file

@ -211,6 +211,7 @@ class LiteLLMProvider(str, Enum):
DATABRICKS = "DATABRICKS"
COMETAPI = "COMETAPI"
HUGGINGFACE = "HUGGINGFACE"
GITHUB_MODELS = "GITHUB_MODELS"
CUSTOM = "CUSTOM"
@ -272,19 +273,19 @@ INCENTIVE_TASKS_CONFIG = {
IncentiveTaskType.GITHUB_STAR: {
"title": "Star our GitHub repository",
"description": "Show your support by starring SurfSense on GitHub",
"pages_reward": 100,
"pages_reward": 30,
"action_url": "https://github.com/MODSetter/SurfSense",
},
IncentiveTaskType.REDDIT_FOLLOW: {
"title": "Join our Subreddit",
"description": "Join the SurfSense community on Reddit",
"pages_reward": 100,
"pages_reward": 30,
"action_url": "https://www.reddit.com/r/SurfSense/",
},
IncentiveTaskType.DISCORD_JOIN: {
"title": "Join our Discord",
"description": "Join the SurfSense community on Discord",
"pages_reward": 100,
"pages_reward": 40,
"action_url": "https://discord.gg/ejRNvftDp9",
},
# Future tasks can be configured here:
@ -801,9 +802,8 @@ class MemoryCategory(str, Enum):
class UserMemory(BaseModel, TimestampMixin):
"""
Stores facts, preferences, and context about users for personalized AI responses.
Similar to Claude's memory feature - enables the AI to remember user information
across conversations.
Private memory: facts, preferences, context per user per search space.
Used only for private chats (not shared/team chats).
"""
__tablename__ = "user_memories"
@ -847,6 +847,40 @@ class UserMemory(BaseModel, TimestampMixin):
search_space = relationship("SearchSpace", back_populates="user_memories")
class SharedMemory(BaseModel, TimestampMixin):
__tablename__ = "shared_memories"
search_space_id = Column(
Integer,
ForeignKey("searchspaces.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
created_by_id = Column(
UUID(as_uuid=True),
ForeignKey("user.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
memory_text = Column(Text, nullable=False)
category = Column(
SQLAlchemyEnum(MemoryCategory),
nullable=False,
default=MemoryCategory.fact,
)
embedding = Column(Vector(config.embedding_model_instance.dimension))
updated_at = Column(
TIMESTAMP(timezone=True),
nullable=False,
default=lambda: datetime.now(UTC),
onupdate=lambda: datetime.now(UTC),
index=True,
)
search_space = relationship("SearchSpace", back_populates="shared_memories")
created_by = relationship("User")
class Document(BaseModel, TimestampMixin):
__tablename__ = "documents"
@ -1215,6 +1249,12 @@ class SearchSpace(BaseModel, TimestampMixin):
order_by="UserMemory.updated_at.desc()",
cascade="all, delete-orphan",
)
shared_memories = relationship(
"SharedMemory",
back_populates="search_space",
order_by="SharedMemory.updated_at.desc()",
cascade="all, delete-orphan",
)
class SearchSourceConnector(BaseModel, TimestampMixin):
@ -1265,7 +1305,7 @@ class NewLLMConfig(BaseModel, TimestampMixin):
- Configurable system instructions (defaults to SURFSENSE_SYSTEM_INSTRUCTIONS)
- Citation toggle (enable/disable citation instructions)
Note: SURFSENSE_TOOLS_INSTRUCTIONS is always used and not configurable.
Note: Tools instructions are built by get_tools_instructions(thread_visibility) (personal vs shared memory).
"""
__tablename__ = "new_llm_configs"

View file

@ -19,6 +19,8 @@ from app.db import (
from app.schemas import (
DocumentRead,
DocumentsCreate,
DocumentStatusBatchResponse,
DocumentStatusItemRead,
DocumentStatusSchema,
DocumentTitleRead,
DocumentTitleSearchResponse,
@ -148,6 +150,7 @@ async def create_documents_file_upload(
tuple[Document, str, str]
] = [] # (document, temp_path, filename)
skipped_duplicates = 0
duplicate_document_ids: list[int] = []
# ===== PHASE 1: Create pending documents for all files =====
# This makes ALL documents visible in the UI immediately with pending status
@ -182,6 +185,7 @@ async def create_documents_file_upload(
# True duplicate — content already indexed, skip
os.unlink(temp_path)
skipped_duplicates += 1
duplicate_document_ids.append(existing.id)
continue
# Existing document is stuck (failed/pending/processing)
@ -255,6 +259,7 @@ async def create_documents_file_upload(
return {
"message": "Files uploaded for processing",
"document_ids": [doc.id for doc in created_documents],
"duplicate_document_ids": duplicate_document_ids,
"total_files": len(files),
"pending_files": len(files_to_process),
"skipped_duplicates": skipped_duplicates,
@ -678,6 +683,74 @@ async def search_document_titles(
) from e
@router.get("/documents/status", response_model=DocumentStatusBatchResponse)
async def get_documents_status(
search_space_id: int,
document_ids: str,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Batch status endpoint for documents in a search space.
Returns lightweight status info for the provided document IDs, intended for
polling async ETL progress in chat upload flows.
"""
try:
await check_permission(
session,
user,
search_space_id,
Permission.DOCUMENTS_READ.value,
"You don't have permission to read documents in this search space",
)
# Parse comma-separated IDs (e.g. "1,2,3")
parsed_ids = []
for raw_id in document_ids.split(","):
value = raw_id.strip()
if not value:
continue
try:
parsed_ids.append(int(value))
except ValueError:
raise HTTPException(
status_code=400,
detail=f"Invalid document id: {value}",
) from None
if not parsed_ids:
return DocumentStatusBatchResponse(items=[])
result = await session.execute(
select(Document).filter(
Document.search_space_id == search_space_id,
Document.id.in_(parsed_ids),
)
)
docs = result.scalars().all()
items = [
DocumentStatusItemRead(
id=doc.id,
title=doc.title,
document_type=doc.document_type,
status=DocumentStatusSchema(
state=(doc.status or {}).get("state", "ready"),
reason=(doc.status or {}).get("reason"),
),
)
for doc in docs
]
return DocumentStatusBatchResponse(items=items)
except HTTPException:
raise
except Exception as e:
raise HTTPException(
status_code=500, detail=f"Failed to fetch document status: {e!s}"
) from e
@router.get("/documents/type-counts")
async def get_document_type_counts(
search_space_id: int | None = None,

View file

@ -8,16 +8,11 @@ These endpoints support the ThreadHistoryAdapter pattern from assistant-ui:
- PUT /threads/{thread_id} - Update thread (rename, archive)
- DELETE /threads/{thread_id} - Delete thread
- POST /threads/{thread_id}/messages - Append message
- POST /attachments/process - Process attachments for chat context
"""
import contextlib
import os
import tempfile
import uuid
from datetime import UTC, datetime
from fastapi import APIRouter, Depends, File, HTTPException, Request, UploadFile
from fastapi import APIRouter, Depends, HTTPException, Request
from fastapi.responses import StreamingResponse
from sqlalchemy import func, or_
from sqlalchemy.exc import IntegrityError, OperationalError
@ -1045,12 +1040,13 @@ async def handle_new_chat(
search_space_id=request.search_space_id,
chat_id=request.chat_id,
session=session,
user_id=str(user.id), # Pass user ID for memory tools and session state
user_id=str(user.id),
llm_config_id=llm_config_id,
attachments=request.attachments,
mentioned_document_ids=request.mentioned_document_ids,
mentioned_surfsense_doc_ids=request.mentioned_surfsense_doc_ids,
needs_history_bootstrap=thread.needs_history_bootstrap,
thread_visibility=thread.visibility,
current_user_display_name=user.display_name or "A team member",
),
media_type="text/event-stream",
headers={
@ -1276,11 +1272,12 @@ async def regenerate_response(
session=session,
user_id=str(user.id),
llm_config_id=llm_config_id,
attachments=request.attachments,
mentioned_document_ids=request.mentioned_document_ids,
mentioned_surfsense_doc_ids=request.mentioned_surfsense_doc_ids,
checkpoint_id=target_checkpoint_id,
needs_history_bootstrap=thread.needs_history_bootstrap,
thread_visibility=thread.visibility,
current_user_display_name=user.display_name or "A team member",
):
yield chunk
# If we get here, streaming completed successfully
@ -1329,185 +1326,3 @@ async def regenerate_response(
status_code=500,
detail=f"An unexpected error occurred during regeneration: {e!s}",
) from None
# =============================================================================
# Attachment Processing Endpoint
# =============================================================================
@router.post("/attachments/process")
async def process_attachment(
file: UploadFile = File(...),
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Process an attachment file and extract its content as markdown.
This endpoint uses the configured ETL service to parse files and return
the extracted content that can be used as context in chat messages.
Supported file types depend on the configured ETL_SERVICE:
- Markdown/Text files: .md, .markdown, .txt (always supported)
- Audio files: .mp3, .mp4, .mpeg, .mpga, .m4a, .wav, .webm (if STT configured)
- Documents: .pdf, .docx, .doc, .pptx, .xlsx (depends on ETL service)
Returns:
JSON with attachment id, name, type, and extracted content
"""
from app.config import config as app_config
if not file.filename:
raise HTTPException(status_code=400, detail="No filename provided")
filename = file.filename
attachment_id = str(uuid.uuid4())
try:
# Save file to a temporary location
file_ext = os.path.splitext(filename)[1].lower()
with tempfile.NamedTemporaryFile(delete=False, suffix=file_ext) as temp_file:
temp_path = temp_file.name
content = await file.read()
temp_file.write(content)
extracted_content = ""
# Process based on file type
if file_ext in (".md", ".markdown", ".txt"):
# For text/markdown files, read content directly
with open(temp_path, encoding="utf-8") as f:
extracted_content = f.read()
elif file_ext in (".mp3", ".mp4", ".mpeg", ".mpga", ".m4a", ".wav", ".webm"):
# Audio files - transcribe if STT service is configured
if not app_config.STT_SERVICE:
raise HTTPException(
status_code=422,
detail="Audio transcription is not configured. Please set STT_SERVICE.",
)
stt_service_type = (
"local" if app_config.STT_SERVICE.startswith("local/") else "external"
)
if stt_service_type == "local":
from app.services.stt_service import stt_service
result = stt_service.transcribe_file(temp_path)
extracted_content = result.get("text", "")
else:
from litellm import atranscription
with open(temp_path, "rb") as audio_file:
transcription_kwargs = {
"model": app_config.STT_SERVICE,
"file": audio_file,
"api_key": app_config.STT_SERVICE_API_KEY,
}
if app_config.STT_SERVICE_API_BASE:
transcription_kwargs["api_base"] = (
app_config.STT_SERVICE_API_BASE
)
transcription_response = await atranscription(
**transcription_kwargs
)
extracted_content = transcription_response.get("text", "")
if extracted_content:
extracted_content = (
f"# Transcription of {filename}\n\n{extracted_content}"
)
else:
# Document files - use configured ETL service
if app_config.ETL_SERVICE == "UNSTRUCTURED":
from langchain_unstructured import UnstructuredLoader
from app.utils.document_converters import convert_document_to_markdown
loader = UnstructuredLoader(
temp_path,
mode="elements",
post_processors=[],
languages=["eng"],
include_orig_elements=False,
include_metadata=False,
strategy="auto",
)
docs = await loader.aload()
extracted_content = await convert_document_to_markdown(docs)
elif app_config.ETL_SERVICE == "LLAMACLOUD":
from llama_cloud_services import LlamaParse
from llama_cloud_services.parse.utils import ResultType
parser = LlamaParse(
api_key=app_config.LLAMA_CLOUD_API_KEY,
num_workers=1,
verbose=False,
language="en",
result_type=ResultType.MD,
)
result = await parser.aparse(temp_path)
markdown_documents = await result.aget_markdown_documents(
split_by_page=False
)
if markdown_documents:
extracted_content = "\n\n".join(
doc.text for doc in markdown_documents
)
elif app_config.ETL_SERVICE == "DOCLING":
from app.services.docling_service import create_docling_service
docling_service = create_docling_service()
result = await docling_service.process_document(temp_path, filename)
extracted_content = result.get("content", "")
else:
raise HTTPException(
status_code=422,
detail=f"ETL service not configured or unsupported file type: {file_ext}",
)
# Clean up temp file
with contextlib.suppress(Exception):
os.unlink(temp_path)
if not extracted_content:
raise HTTPException(
status_code=422,
detail=f"Could not extract content from file: {filename}",
)
# Determine attachment type (must be one of: "image", "document", "file")
# assistant-ui only supports these three types
if file_ext in (".png", ".jpg", ".jpeg", ".gif", ".webp"):
attachment_type = "image"
else:
# All other files (including audio, documents, text) are treated as "document"
attachment_type = "document"
return {
"id": attachment_id,
"name": filename,
"type": attachment_type,
"content": extracted_content,
"contentLength": len(extracted_content),
}
except HTTPException:
raise
except Exception as e:
# Clean up temp file on error
with contextlib.suppress(Exception):
os.unlink(temp_path)
raise HTTPException(
status_code=500,
detail=f"Failed to process attachment: {e!s}",
) from e

View file

@ -19,7 +19,7 @@ Non-OAuth connectors (BookStack, GitHub, etc.) are limited to one per search spa
"""
import logging
import os
from contextlib import suppress
from datetime import UTC, datetime, timedelta
from typing import Any
@ -32,6 +32,7 @@ from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from app.config import config
from app.connectors.github_connector import GitHubConnector
from app.db import (
Permission,
@ -70,6 +71,10 @@ from app.tasks.connector_indexers import (
index_slack_messages,
)
from app.users import current_active_user
from app.utils.indexing_locks import (
acquire_connector_indexing_lock,
release_connector_indexing_lock,
)
from app.utils.periodic_scheduler import (
create_periodic_schedule,
delete_periodic_schedule,
@ -91,11 +96,9 @@ def get_heartbeat_redis_client() -> redis.Redis:
"""Get or create Redis client for heartbeat tracking."""
global _heartbeat_redis_client
if _heartbeat_redis_client is None:
redis_url = os.getenv(
"REDIS_APP_URL",
os.getenv("CELERY_BROKER_URL", "redis://localhost:6379/0"),
_heartbeat_redis_client = redis.from_url(
config.REDIS_APP_URL, decode_responses=True
)
_heartbeat_redis_client = redis.from_url(redis_url, decode_responses=True)
return _heartbeat_redis_client
@ -1229,10 +1232,19 @@ async def _run_indexing_with_notifications(
from celery.exceptions import SoftTimeLimitExceeded
notification = None
connector_lock_acquired = False
# Track indexed count for retry notifications and heartbeat
current_indexed_count = 0
try:
connector_lock_acquired = acquire_connector_indexing_lock(connector_id)
if not connector_lock_acquired:
logger.info(
f"Skipping indexing for connector {connector_id} "
"(another worker already holds Redis connector lock)"
)
return
# Get connector info for notification
connector_result = await session.execute(
select(SearchSourceConnector).where(
@ -1558,6 +1570,9 @@ async def _run_indexing_with_notifications(
get_heartbeat_redis_client().delete(heartbeat_key)
except Exception:
pass # Ignore cleanup errors - key will expire anyway
if connector_lock_acquired:
with suppress(Exception):
release_connector_indexing_lock(connector_id)
async def run_notion_indexing_with_new_session(

View file

@ -11,6 +11,8 @@ from .documents import (
DocumentBase,
DocumentRead,
DocumentsCreate,
DocumentStatusBatchResponse,
DocumentStatusItemRead,
DocumentStatusSchema,
DocumentTitleRead,
DocumentTitleSearchResponse,
@ -105,6 +107,8 @@ __all__ = [
# Document schemas
"DocumentBase",
"DocumentRead",
"DocumentStatusBatchResponse",
"DocumentStatusItemRead",
"DocumentStatusSchema",
"DocumentTitleRead",
"DocumentTitleSearchResponse",

View file

@ -99,3 +99,20 @@ class DocumentTitleSearchResponse(BaseModel):
items: list[DocumentTitleRead]
has_more: bool
class DocumentStatusItemRead(BaseModel):
"""Lightweight document status payload for batch status polling."""
id: int
title: str
document_type: DocumentType
status: DocumentStatusSchema
model_config = ConfigDict(from_attributes=True)
class DocumentStatusBatchResponse(BaseModel):
"""Batch status response for a set of document IDs."""
items: list[DocumentStatusItemRead]

View file

@ -159,15 +159,6 @@ class ChatMessage(BaseModel):
content: str
class ChatAttachment(BaseModel):
"""An attachment with its extracted content for chat context."""
id: str # Unique attachment ID
name: str # Original filename
type: str # Attachment type: document, image, audio
content: str # Extracted markdown content from the file
class NewChatRequest(BaseModel):
"""Request schema for the deep agent chat endpoint."""
@ -175,9 +166,6 @@ class NewChatRequest(BaseModel):
user_query: str
search_space_id: int
messages: list[ChatMessage] | None = None # Optional chat history from frontend
attachments: list[ChatAttachment] | None = (
None # Optional attachments with extracted content
)
mentioned_document_ids: list[int] | None = (
None # Optional document IDs mentioned with @ in the chat
)
@ -201,7 +189,6 @@ class RegenerateRequest(BaseModel):
user_query: str | None = (
None # New user query (for edit). None = reload with same query
)
attachments: list[ChatAttachment] | None = None
mentioned_document_ids: list[int] | None = None
mentioned_surfsense_doc_ids: list[int] | None = None

View file

@ -56,6 +56,7 @@ PROVIDER_MAP = {
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai",
"GITHUB_MODELS": "github",
"HUGGINGFACE": "huggingface",
"CUSTOM": "custom",
}

View file

@ -119,6 +119,7 @@ async def validate_llm_config(
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai", # GLM needs special handling
"GITHUB_MODELS": "github",
}
provider_prefix = provider_map.get(provider, provider.lower())
model_string = f"{provider_prefix}/{model_name}"
@ -335,6 +336,7 @@ async def get_search_space_llm_instance(
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai",
"GITHUB_MODELS": "github",
}
provider_prefix = provider_map.get(
llm_config.provider.value, llm_config.provider.value.lower()

View file

@ -36,11 +36,9 @@ def _get_doc_heartbeat_redis():
global _doc_heartbeat_redis
if _doc_heartbeat_redis is None:
redis_url = os.getenv(
"REDIS_APP_URL",
os.getenv("CELERY_BROKER_URL", "redis://localhost:6379/0"),
_doc_heartbeat_redis = redis.from_url(
config.REDIS_APP_URL, decode_responses=True
)
_doc_heartbeat_redis = redis.from_url(redis_url, decode_responses=True)
return _doc_heartbeat_redis

View file

@ -46,16 +46,10 @@ def get_celery_session_maker():
def _clear_generating_podcast(search_space_id: int) -> None:
"""Clear the generating podcast marker from Redis when task completes."""
import os
import redis
try:
redis_url = os.getenv(
"REDIS_APP_URL",
os.getenv("CELERY_BROKER_URL", "redis://localhost:6379/0"),
)
client = redis.from_url(redis_url, decode_responses=True)
client = redis.from_url(config.REDIS_APP_URL, decode_responses=True)
key = f"podcast:generating:{search_space_id}"
client.delete(key)
logger.info(

View file

@ -9,7 +9,8 @@ from sqlalchemy.pool import NullPool
from app.celery_app import celery_app
from app.config import config
from app.db import SearchSourceConnector, SearchSourceConnectorType
from app.db import Notification, SearchSourceConnector, SearchSourceConnectorType
from app.utils.indexing_locks import is_connector_indexing_locked
logger = logging.getLogger(__name__)
@ -107,6 +108,32 @@ async def _check_and_trigger_schedules():
# Trigger indexing for each due connector
for connector in due_connectors:
# Primary guard: Redis lock indicates a task is currently running.
if is_connector_indexing_locked(connector.id):
logger.info(
f"Skipping periodic indexing for connector {connector.id} "
"(Redis lock indicates indexing is already in progress)"
)
continue
# Skip scheduling if a sync for this connector is already in progress.
# This prevents duplicate tasks from piling up under slow/rate-limited providers.
in_progress_result = await session.execute(
select(Notification.id).where(
Notification.type == "connector_indexing",
Notification.notification_metadata["connector_id"].astext
== str(connector.id),
Notification.notification_metadata["status"].astext
== "in_progress",
)
)
if in_progress_result.first():
logger.info(
f"Skipping periodic indexing for connector {connector.id} "
"(already has in-progress indexing notification)"
)
continue
task = task_map.get(connector.connector_type)
if task:
logger.info(

View file

@ -25,7 +25,6 @@ Detection mechanism:
import contextlib
import json
import logging
import os
from datetime import UTC, datetime
import redis
@ -52,11 +51,7 @@ def get_redis_client() -> redis.Redis:
"""Get or create Redis client for heartbeat checking."""
global _redis_client
if _redis_client is None:
redis_url = os.getenv(
"REDIS_APP_URL",
os.getenv("CELERY_BROKER_URL", "redis://localhost:6379/0"),
)
_redis_client = redis.from_url(redis_url, decode_responses=True)
_redis_client = redis.from_url(config.REDIS_APP_URL, decode_responses=True)
return _redis_client

View file

@ -26,9 +26,8 @@ from app.agents.new_chat.llm_config import (
load_agent_config,
load_llm_config_from_yaml,
)
from app.db import Document, SurfsenseDocsDocument
from app.db import ChatVisibility, Document, SurfsenseDocsDocument
from app.prompts import TITLE_GENERATION_PROMPT_TEMPLATE
from app.schemas.new_chat import ChatAttachment
from app.services.chat_session_state_service import (
clear_ai_responding,
set_ai_responding,
@ -38,23 +37,6 @@ from app.services.new_streaming_service import VercelStreamingService
from app.utils.content_utils import bootstrap_history_from_db
def format_attachments_as_context(attachments: list[ChatAttachment]) -> str:
"""Format attachments as context for the agent."""
if not attachments:
return ""
context_parts = ["<user_attachments>"]
for i, attachment in enumerate(attachments, 1):
context_parts.append(
f"<attachment index='{i}' name='{attachment.name}' type='{attachment.type}'>"
)
context_parts.append(f"<![CDATA[{attachment.content}]]>")
context_parts.append("</attachment>")
context_parts.append("</user_attachments>")
return "\n".join(context_parts)
def format_mentioned_documents_as_context(documents: list[Document]) -> str:
"""
Format mentioned documents as context for the agent.
@ -203,11 +185,12 @@ async def stream_new_chat(
session: AsyncSession,
user_id: str | None = None,
llm_config_id: int = -1,
attachments: list[ChatAttachment] | None = None,
mentioned_document_ids: list[int] | None = None,
mentioned_surfsense_doc_ids: list[int] | None = None,
checkpoint_id: str | None = None,
needs_history_bootstrap: bool = False,
thread_visibility: ChatVisibility | None = None,
current_user_display_name: str | None = None,
) -> AsyncGenerator[str, None]:
"""
Stream chat responses from the new SurfSense deep agent.
@ -222,7 +205,6 @@ async def stream_new_chat(
session: The database session
user_id: The current user's UUID string (for memory tools and session state)
llm_config_id: The LLM configuration ID (default: -1 for first global config)
attachments: Optional attachments with extracted content
needs_history_bootstrap: If True, load message history from DB (for cloned chats)
mentioned_document_ids: Optional list of document IDs mentioned with @ in the chat
mentioned_surfsense_doc_ids: Optional list of SurfSense doc IDs mentioned with @ in the chat
@ -295,17 +277,18 @@ async def stream_new_chat(
# Get the PostgreSQL checkpointer for persistent conversation memory
checkpointer = await get_checkpointer()
# Create the deep agent with checkpointer and configurable prompts
visibility = thread_visibility or ChatVisibility.PRIVATE
agent = await create_surfsense_deep_agent(
llm=llm,
search_space_id=search_space_id,
db_session=session,
connector_service=connector_service,
checkpointer=checkpointer,
user_id=user_id, # Pass user ID for memory tools
thread_id=chat_id, # Pass chat ID for podcast association
agent_config=agent_config, # Pass prompt configuration
firecrawl_api_key=firecrawl_api_key, # Pass Firecrawl API key if configured
user_id=user_id,
thread_id=chat_id,
agent_config=agent_config,
firecrawl_api_key=firecrawl_api_key,
thread_visibility=visibility,
)
# Build input with message history
@ -313,7 +296,9 @@ async def stream_new_chat(
# Bootstrap history for cloned chats (no LangGraph checkpoint exists yet)
if needs_history_bootstrap:
langchain_messages = await bootstrap_history_from_db(session, chat_id)
langchain_messages = await bootstrap_history_from_db(
session, chat_id, thread_visibility=visibility
)
# Clear the flag so we don't bootstrap again on next message
from app.db import NewChatThread
@ -355,13 +340,10 @@ async def stream_new_chat(
)
mentioned_surfsense_docs = list(result.scalars().all())
# Format the user query with context (attachments + mentioned documents + surfsense docs)
# Format the user query with context (mentioned documents + SurfSense docs)
final_query = user_query
context_parts = []
if attachments:
context_parts.append(format_attachments_as_context(attachments))
if mentioned_documents:
context_parts.append(
format_mentioned_documents_as_context(mentioned_documents)
@ -376,6 +358,9 @@ async def stream_new_chat(
context = "\n\n".join(context_parts)
final_query = f"{context}\n\n<user_query>{user_query}</user_query>"
if visibility == ChatVisibility.SEARCH_SPACE and current_user_display_name:
final_query = f"**[{current_user_display_name}]:** {final_query}"
# if messages:
# # Convert frontend messages to LangChain format
# for msg in messages:
@ -451,39 +436,20 @@ async def stream_new_chat(
last_active_step_id = analyze_step_id
# Determine step title and action verb based on context
if attachments and (mentioned_documents or mentioned_surfsense_docs):
last_active_step_title = "Analyzing your content"
action_verb = "Reading"
elif attachments:
last_active_step_title = "Reading your content"
action_verb = "Reading"
elif mentioned_documents or mentioned_surfsense_docs:
if mentioned_documents or mentioned_surfsense_docs:
last_active_step_title = "Analyzing referenced content"
action_verb = "Analyzing"
else:
last_active_step_title = "Understanding your request"
action_verb = "Processing"
# Build the message with inline context about attachments/documents
# Build the message with inline context about referenced documents
processing_parts = []
# Add the user query
query_text = user_query[:80] + ("..." if len(user_query) > 80 else "")
processing_parts.append(query_text)
# Add file attachment names inline
if attachments:
attachment_names = []
for attachment in attachments:
name = attachment.name
if len(name) > 30:
name = name[:27] + "..."
attachment_names.append(name)
if len(attachment_names) == 1:
processing_parts.append(f"[{attachment_names[0]}]")
else:
processing_parts.append(f"[{len(attachment_names)} files]")
# Add mentioned document names inline
if mentioned_documents:
doc_names = []

View file

@ -52,10 +52,22 @@ def safe_set_chunks(document: Document, chunks: list) -> None:
# Instead of: document.chunks = chunks (DANGEROUS!)
safe_set_chunks(document, chunks) # Always safe
"""
from sqlalchemy.orm import object_session
from sqlalchemy.orm.attributes import set_committed_value
# Keep relationship assignment lazy-load-safe.
set_committed_value(document, "chunks", chunks)
# Ensure chunk rows are actually persisted.
# set_committed_value bypasses normal unit-of-work tracking, so we need to
# explicitly attach chunk objects to the current session.
session = object_session(document)
if session is not None:
if document.id is not None:
for chunk in chunks:
chunk.document_id = document.id
session.add_all(chunks)
def parse_date_flexible(date_str: str) -> datetime:
"""

View file

@ -1,7 +1,10 @@
"""
Microsoft Teams connector indexer.
Implements 2-phase document status updates for real-time UI feedback:
Implements batch indexing: groups up to TEAMS_BATCH_SIZE messages per channel
into a single document for efficient indexing and better conversational context.
Uses 2-phase document status updates for real-time UI feedback:
- Phase 1: Create all documents with 'pending' status (visible in UI immediately)
- Phase 2: Process each document: pending processing ready/failed
"""
@ -41,6 +44,72 @@ HeartbeatCallbackType = Callable[[int], Awaitable[None]]
# Heartbeat interval in seconds - update notification every 30 seconds
HEARTBEAT_INTERVAL_SECONDS = 30
# Number of messages to combine into a single document for batch indexing.
# Grouping messages improves conversational context in embeddings/chunks and
# drastically reduces the number of documents, embedding calls, and DB overhead.
TEAMS_BATCH_SIZE = 100
def _build_batch_document_string(
team_name: str,
team_id: str,
channel_name: str,
channel_id: str,
messages: list[dict],
) -> str:
"""
Combine multiple Teams messages into a single document string.
Each message is formatted with its timestamp and author, and all messages
are concatenated into a conversation-style document. The chunker will
later split this into overlapping windows of ~8-10 consecutive messages,
preserving conversational context in each chunk's embedding.
Args:
team_name: Name of the Microsoft Team
team_id: ID of the Microsoft Team
channel_name: Name of the channel
channel_id: ID of the channel
messages: List of formatted message dicts with 'user_name', 'created_datetime', 'content'
Returns:
Formatted document string with metadata and conversation content
"""
first_msg_time = messages[0].get("created_datetime", "Unknown")
last_msg_time = messages[-1].get("created_datetime", "Unknown")
metadata_lines = [
f"TEAM_NAME: {team_name}",
f"TEAM_ID: {team_id}",
f"CHANNEL_NAME: {channel_name}",
f"CHANNEL_ID: {channel_id}",
f"MESSAGE_COUNT: {len(messages)}",
f"FIRST_MESSAGE_TIME: {first_msg_time}",
f"LAST_MESSAGE_TIME: {last_msg_time}",
]
conversation_lines = []
for msg in messages:
author = msg.get("user_name", "Unknown User")
timestamp = msg.get("created_datetime", "Unknown Time")
content = msg.get("content", "")
conversation_lines.append(f"[{timestamp}] {author}: {content}")
metadata_sections = [
("METADATA", metadata_lines),
(
"CONTENT",
[
"FORMAT: markdown",
"TEXT_START",
"\n".join(conversation_lines),
"TEXT_END",
],
),
]
return build_document_metadata_markdown(metadata_sections)
async def index_teams_messages(
session: AsyncSession,
@ -55,6 +124,12 @@ async def index_teams_messages(
"""
Index Microsoft Teams messages from all accessible teams and channels.
Messages are grouped into batches of TEAMS_BATCH_SIZE per channel,
so each document contains up to 100 consecutive messages with full
conversational context. This reduces document count, embedding calls,
and DB overhead by ~100x while improving search quality through
context-aware chunk embeddings.
Implements 2-phase document status updates for real-time UI feedback:
- Phase 1: Create all documents with 'pending' status (visible in UI immediately)
- Phase 2: Process each document: pending processing ready/failed
@ -184,6 +259,7 @@ async def index_teams_messages(
documents_skipped = 0
documents_failed = 0
duplicate_content_count = 0
total_messages_collected = 0
skipped_channels = []
# Heartbeat tracking - update notification periodically to prevent appearing stuck
@ -199,21 +275,21 @@ async def index_teams_messages(
start_datetime = None
end_datetime = None
if start_date_str:
# Parse as naive datetime and make it timezone-aware (UTC)
start_datetime = datetime.strptime(start_date_str, "%Y-%m-%d").replace(
tzinfo=UTC
)
if end_date_str:
# Parse as naive datetime, set to end of day, and make it timezone-aware (UTC)
end_datetime = datetime.strptime(end_date_str, "%Y-%m-%d").replace(
hour=23, minute=59, second=59, tzinfo=UTC
)
# =======================================================================
# PHASE 1: Collect all messages and create pending documents
# This makes ALL documents visible in the UI immediately with pending status
# PHASE 1: Collect messages, group into batches, and create pending documents
# Messages are grouped into batches of TEAMS_BATCH_SIZE per channel.
# Each batch becomes a single document with full conversational context.
# All documents are visible in the UI immediately with pending status.
# =======================================================================
messages_to_process = [] # List of dicts with document and message data
batches_to_process = [] # List of dicts with document and batch data
new_documents_created = False
for team in teams:
@ -251,65 +327,72 @@ async def index_teams_messages(
)
continue
# Process each message
# Format messages for batching
formatted_messages = []
for msg in messages:
# Skip deleted messages or empty content
if msg.get("deletedDateTime"):
continue
# Extract message details
message_id = msg.get("id", "")
created_datetime = msg.get("createdDateTime", "")
from_user = msg.get("from", {})
user_name = from_user.get("user", {}).get(
"displayName", "Unknown User"
)
user_email = from_user.get("user", {}).get(
"userPrincipalName", "Unknown Email"
)
# Extract message content
body = msg.get("body", {})
content_type = body.get("contentType", "text")
msg_text = body.get("content", "")
# Skip empty messages
if not msg_text or msg_text.strip() == "":
continue
# Format document metadata
metadata_sections = [
(
"METADATA",
[
f"TEAM_NAME: {team_name}",
f"TEAM_ID: {team_id}",
f"CHANNEL_NAME: {channel_name}",
f"CHANNEL_ID: {channel_id}",
f"MESSAGE_TIMESTAMP: {created_datetime}",
f"MESSAGE_USER_NAME: {user_name}",
f"MESSAGE_USER_EMAIL: {user_email}",
f"CONTENT_TYPE: {content_type}",
],
),
(
"CONTENT",
[
f"FORMAT: {content_type}",
"TEXT_START",
msg_text,
"TEXT_END",
],
),
]
# Build the document string
combined_document_string = build_document_metadata_markdown(
metadata_sections
formatted_messages.append(
{
"message_id": msg.get("id", ""),
"created_datetime": msg.get("createdDateTime", ""),
"user_name": user_name,
"content": msg_text,
}
)
# Generate unique identifier hash for this Teams message
unique_identifier = f"{team_id}_{channel_id}_{message_id}"
if not formatted_messages:
logger.info(
"No valid messages found in channel %s of team %s after filtering.",
channel_name,
team_name,
)
documents_skipped += 1
continue
total_messages_collected += len(formatted_messages)
# =======================================================
# Group messages into batches of TEAMS_BATCH_SIZE
# Each batch becomes a single document with conversation context
# =======================================================
for batch_start in range(
0, len(formatted_messages), TEAMS_BATCH_SIZE
):
batch = formatted_messages[
batch_start : batch_start + TEAMS_BATCH_SIZE
]
# Build combined document string from all messages in this batch
combined_document_string = _build_batch_document_string(
team_name=team_name,
team_id=team_id,
channel_name=channel_name,
channel_id=channel_id,
messages=batch,
)
# Generate unique identifier for this batch using
# team_id + channel_id + first message id + last message id
first_msg_id = batch[0].get("message_id", "")
last_msg_id = batch[-1].get("message_id", "")
unique_identifier = (
f"{team_id}_{channel_id}_{first_msg_id}_{last_msg_id}"
)
unique_identifier_hash = generate_unique_identifier_hash(
DocumentType.TEAMS_CONNECTOR,
unique_identifier,
@ -331,7 +414,6 @@ async def index_teams_messages(
if existing_document:
# Document exists - check if content has changed
if existing_document.content_hash == content_hash:
# Ensure status is ready (might have been stuck in processing/pending)
if not DocumentStatus.is_state(
existing_document.status, DocumentStatus.READY
):
@ -342,7 +424,7 @@ async def index_teams_messages(
continue
# Queue existing document for update (will be set to processing in Phase 2)
messages_to_process.append(
batches_to_process.append(
{
"document": existing_document,
"is_new": False,
@ -352,14 +434,21 @@ async def index_teams_messages(
"team_id": team_id,
"channel_name": channel_name,
"channel_id": channel_id,
"message_id": message_id,
"first_message_id": first_msg_id,
"last_message_id": last_msg_id,
"first_message_time": batch[0].get(
"created_datetime", "Unknown"
),
"last_message_time": batch[-1].get(
"created_datetime", "Unknown"
),
"message_count": len(batch),
"start_date": start_date_str,
"end_date": end_date_str,
}
)
continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = (
@ -370,9 +459,10 @@ async def index_teams_messages(
if duplicate_by_content:
logger.info(
"Teams message %s in channel %s already indexed by another connector "
"Teams batch (%s msgs) in %s/%s already indexed by another connector "
"(existing document ID: %s, type: %s). Skipping.",
message_id,
len(batch),
team_name,
channel_name,
duplicate_by_content.id,
duplicate_by_content.document_type,
@ -391,6 +481,9 @@ async def index_teams_messages(
"team_id": team_id,
"channel_name": channel_name,
"channel_id": channel_id,
"first_message_id": first_msg_id,
"last_message_id": last_msg_id,
"message_count": len(batch),
"connector_id": connector_id,
},
content="Pending...", # Placeholder until processed
@ -406,7 +499,7 @@ async def index_teams_messages(
session.add(document)
new_documents_created = True
messages_to_process.append(
batches_to_process.append(
{
"document": document,
"is_new": True,
@ -416,12 +509,30 @@ async def index_teams_messages(
"team_id": team_id,
"channel_name": channel_name,
"channel_id": channel_id,
"message_id": message_id,
"first_message_id": first_msg_id,
"last_message_id": last_msg_id,
"first_message_time": batch[0].get(
"created_datetime", "Unknown"
),
"last_message_time": batch[-1].get(
"created_datetime", "Unknown"
),
"message_count": len(batch),
"start_date": start_date_str,
"end_date": end_date_str,
}
)
logger.info(
"Phase 1: Collected %s messages from %s/%s, "
"grouped into %s batch(es)",
len(formatted_messages),
team_name,
channel_name,
(len(formatted_messages) + TEAMS_BATCH_SIZE - 1)
// TEAMS_BATCH_SIZE,
)
except Exception as e:
logger.error(
"Error processing channel %s in team %s: %s",
@ -441,17 +552,20 @@ async def index_teams_messages(
# Commit all pending documents - they all appear in UI now
if new_documents_created:
logger.info(
f"Phase 1: Committing {len([m for m in messages_to_process if m['is_new']])} pending documents"
"Phase 1: Committing %s pending batch documents "
"(%s total messages across all channels)",
len([b for b in batches_to_process if b["is_new"]]),
total_messages_collected,
)
await session.commit()
# =======================================================================
# PHASE 2: Process each document one by one
# PHASE 2: Process each batch document one by one
# Each document transitions: pending → processing → ready/failed
# =======================================================================
logger.info(f"Phase 2: Processing {len(messages_to_process)} documents")
logger.info("Phase 2: Processing %s batch documents", len(batches_to_process))
for item in messages_to_process:
for item in batches_to_process:
# Send heartbeat periodically
if on_heartbeat_callback:
current_time = time.time()
@ -481,6 +595,11 @@ async def index_teams_messages(
"team_id": item["team_id"],
"channel_name": item["channel_name"],
"channel_id": item["channel_id"],
"first_message_id": item["first_message_id"],
"last_message_id": item["last_message_id"],
"first_message_time": item["first_message_time"],
"last_message_time": item["last_message_time"],
"message_count": item["message_count"],
"start_date": item["start_date"],
"end_date": item["end_date"],
"indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
@ -495,20 +614,25 @@ async def index_teams_messages(
# Batch commit every 10 documents (for ready status updates)
if documents_indexed % 10 == 0:
logger.info(
"Committing batch: %s Teams messages processed so far",
"Committing batch: %s Teams batch documents processed so far",
documents_indexed,
)
await session.commit()
except Exception as e:
logger.error(f"Error processing Teams message: {e!s}", exc_info=True)
logger.error(
"Error processing Teams batch document: %s",
str(e),
exc_info=True,
)
# Mark document as failed with reason (visible in UI)
try:
document.status = DocumentStatus.failed(str(e))
document.updated_at = get_current_timestamp()
except Exception as status_error:
logger.error(
f"Failed to update document status to failed: {status_error}"
"Failed to update document status to failed: %s",
str(status_error),
)
documents_failed += 1
continue
@ -518,7 +642,9 @@ async def index_teams_messages(
# Final commit for any remaining documents not yet committed in batches
logger.info(
"Final commit: Total %s Teams messages processed", documents_indexed
"Final commit: Total %s Teams batch documents processed (from %s messages)",
documents_indexed,
total_messages_collected,
)
try:
await session.commit()
@ -530,8 +656,9 @@ async def index_teams_messages(
or "uniqueviolationerror" in str(e).lower()
):
logger.warning(
f"Duplicate content_hash detected during final commit. "
f"Rolling back and continuing. Error: {e!s}"
"Duplicate content_hash detected during final commit. "
"Rolling back and continuing. Error: %s",
str(e),
)
await session.rollback()
else:
@ -557,13 +684,16 @@ async def index_teams_messages(
"documents_failed": documents_failed,
"duplicate_content_count": duplicate_content_count,
"skipped_channels_count": len(skipped_channels),
"total_messages_collected": total_messages_collected,
"batch_size": TEAMS_BATCH_SIZE,
},
)
logger.info(
"Teams indexing completed: %s ready, %s skipped, %s failed "
"(%s duplicate content)",
"Teams indexing completed: %s batch docs ready (from %s messages), "
"%s skipped, %s failed (%s duplicate content)",
documents_indexed,
total_messages_collected,
documents_skipped,
documents_failed,
duplicate_content_count,

View file

@ -38,10 +38,22 @@ def safe_set_chunks(document: Document, chunks: list) -> None:
# Instead of: document.chunks = chunks (DANGEROUS!)
safe_set_chunks(document, chunks) # Always safe
"""
from sqlalchemy.orm import object_session
from sqlalchemy.orm.attributes import set_committed_value
# Keep relationship assignment lazy-load-safe.
set_committed_value(document, "chunks", chunks)
# Ensure chunk rows are actually persisted.
# set_committed_value bypasses normal unit-of-work tracking, so we need to
# explicitly attach chunk objects to the current session.
session = object_session(document)
if session is not None:
if document.id is not None:
for chunk in chunks:
chunk.document_id = document.id
session.add_all(chunks)
def get_current_timestamp() -> datetime:
"""

View file

@ -47,6 +47,14 @@ if config.AUTH_TYPE == "GOOGLE":
class UserManager(UUIDIDMixin, BaseUserManager[User, uuid.UUID]):
"""
Custom user manager extending fastapi-users BaseUserManager.
Authentication returns a generic error for both non-existent accounts
and incorrect passwords to comply with OWASP WSTG-IDNT-04 and
prevent user enumeration attacks.
"""
reset_password_token_secret = SECRET
verification_token_secret = SECRET

View file

@ -9,9 +9,17 @@ Message content in new_chat_messages can be stored in various formats:
These utilities help extract and transform content for different use cases.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core.messages import AIMessage, HumanMessage
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
if TYPE_CHECKING:
from app.db import ChatVisibility
def extract_text_content(content: str | dict | list) -> str:
@ -38,6 +46,7 @@ def extract_text_content(content: str | dict | list) -> str:
async def bootstrap_history_from_db(
session: AsyncSession,
thread_id: int,
thread_visibility: ChatVisibility | None = None,
) -> list[HumanMessage | AIMessage]:
"""
Load message history from database and convert to LangChain format.
@ -45,20 +54,28 @@ async def bootstrap_history_from_db(
Used for cloned chats where the LangGraph checkpointer has no state,
but we have messages in the database that should be used as context.
When thread_visibility is SEARCH_SPACE, user messages are prefixed with
the author's display name so the LLM sees who said what.
Args:
session: Database session
thread_id: The chat thread ID
thread_visibility: When SEARCH_SPACE, user messages get author prefix
Returns:
List of LangChain messages (HumanMessage/AIMessage)
"""
from app.db import NewChatMessage
from app.db import ChatVisibility, NewChatMessage
result = await session.execute(
is_shared = thread_visibility == ChatVisibility.SEARCH_SPACE
stmt = (
select(NewChatMessage)
.filter(NewChatMessage.thread_id == thread_id)
.order_by(NewChatMessage.created_at)
)
if is_shared:
stmt = stmt.options(selectinload(NewChatMessage.author))
result = await session.execute(stmt)
db_messages = result.scalars().all()
langchain_messages: list[HumanMessage | AIMessage] = []
@ -68,6 +85,11 @@ async def bootstrap_history_from_db(
if not text_content:
continue
if msg.role == "user":
if is_shared:
author_name = (
msg.author.display_name if msg.author else None
) or "A team member"
text_content = f"**[{author_name}]:** {text_content}"
langchain_messages.append(HumanMessage(content=text_content))
elif msg.role == "assistant":
langchain_messages.append(AIMessage(content=text_content))

View file

@ -0,0 +1,46 @@
"""Redis-based connector indexing locks to prevent duplicate sync tasks."""
import redis
from app.config import config
_redis_client: redis.Redis | None = None
LOCK_TTL_SECONDS = config.CONNECTOR_INDEXING_LOCK_TTL_SECONDS
def get_indexing_lock_redis_client() -> redis.Redis:
"""Get or create Redis client for connector indexing locks."""
global _redis_client
if _redis_client is None:
_redis_client = redis.from_url(config.REDIS_APP_URL, decode_responses=True)
return _redis_client
def _get_connector_lock_key(connector_id: int) -> str:
"""Generate Redis key for a connector indexing lock."""
return f"indexing:connector_lock:{connector_id}"
def acquire_connector_indexing_lock(connector_id: int) -> bool:
"""Acquire lock for connector indexing. Returns True if acquired."""
key = _get_connector_lock_key(connector_id)
return bool(
get_indexing_lock_redis_client().set(
key,
"1",
nx=True,
ex=LOCK_TTL_SECONDS,
)
)
def release_connector_indexing_lock(connector_id: int) -> None:
"""Release lock for connector indexing."""
key = _get_connector_lock_key(connector_id)
get_indexing_lock_redis_client().delete(key)
def is_connector_indexing_locked(connector_id: int) -> bool:
"""Check if connector indexing lock exists."""
key = _get_connector_lock_key(connector_id)
return bool(get_indexing_lock_redis_client().exists(key))

View file

@ -1,6 +1,6 @@
[project]
name = "surf-new-backend"
version = "0.0.12"
version = "0.0.13"
description = "SurfSense Backend"
requires-python = ">=3.12"
dependencies = [
@ -62,6 +62,7 @@ dependencies = [
"unstructured[all-docs]>=0.18.31",
"unstructured-client>=0.42.3",
"langchain-unstructured>=1.0.1",
"slowapi>=0.1.9",
]
[dependency-groups]

6443
surfsense_backend/uv.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
{
"name": "surfsense_browser_extension",
"displayName": "Surfsense Browser Extension",
"version": "0.0.12",
"version": "0.0.13",
"description": "Extension to collect Browsing History for SurfSense.",
"author": "https://github.com/MODSetter",
"engines": {

View file

@ -9,7 +9,7 @@ import { useEffect, useState } from "react";
import { toast } from "sonner";
import { loginMutationAtom } from "@/atoms/auth/auth-mutation.atoms";
import { Spinner } from "@/components/ui/spinner";
import { getAuthErrorDetails, isNetworkError, shouldRetry } from "@/lib/auth-errors";
import { getAuthErrorDetails, isNetworkError } from "@/lib/auth-errors";
import { AUTH_TYPE } from "@/lib/env-config";
import { ValidationError } from "@/lib/error";
import { trackLoginAttempt, trackLoginFailure, trackLoginSuccess } from "@/lib/posthog/events";
@ -65,10 +65,6 @@ export function LocalLoginForm() {
if (err instanceof ValidationError) {
trackLoginFailure("local", err.message);
setError({ title: err.name, message: err.message });
toast.error(err.name, {
description: err.message,
duration: 6000,
});
return;
}
@ -92,22 +88,6 @@ export function LocalLoginForm() {
title: errorDetails.title,
message: errorDetails.description,
});
// Show error toast with conditional retry action
const toastOptions: any = {
description: errorDetails.description,
duration: 6000,
};
// Add retry action if the error is retryable
if (shouldRetry(errorCode)) {
toastOptions.action = {
label: "Retry",
onClick: () => handleSubmit(e),
};
}
toast.error(errorDetails.title, toastOptions);
}
};

View file

@ -1,19 +1,51 @@
"use client";
import type React from "react";
import { useRef, useState, useEffect } from "react";
import { getConnectorIcon } from "@/contracts/enums/connectorIcons";
import { useEffect, useRef, useState } from "react";
import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip";
import { getConnectorIcon } from "@/contracts/enums/connectorIcons";
export function getDocumentTypeIcon(type: string, className?: string): React.ReactNode {
return getConnectorIcon(type, className);
}
export function getDocumentTypeLabel(type: string): string {
return type
.split("_")
.map((word) => word.charAt(0) + word.slice(1).toLowerCase())
.join(" ");
const labelMap: Record<string, string> = {
EXTENSION: "Extension",
CRAWLED_URL: "Web Page",
FILE: "File",
SLACK_CONNECTOR: "Slack",
TEAMS_CONNECTOR: "Microsoft Teams",
NOTION_CONNECTOR: "Notion",
YOUTUBE_VIDEO: "YouTube Video",
GITHUB_CONNECTOR: "GitHub",
LINEAR_CONNECTOR: "Linear",
DISCORD_CONNECTOR: "Discord",
JIRA_CONNECTOR: "Jira",
CONFLUENCE_CONNECTOR: "Confluence",
CLICKUP_CONNECTOR: "ClickUp",
GOOGLE_CALENDAR_CONNECTOR: "Google Calendar",
GOOGLE_GMAIL_CONNECTOR: "Gmail",
GOOGLE_DRIVE_FILE: "Google Drive",
AIRTABLE_CONNECTOR: "Airtable",
LUMA_CONNECTOR: "Luma",
ELASTICSEARCH_CONNECTOR: "Elasticsearch",
BOOKSTACK_CONNECTOR: "BookStack",
CIRCLEBACK: "Circleback",
OBSIDIAN_CONNECTOR: "Obsidian",
SURFSENSE_DOCS: "SurfSense Docs",
NOTE: "Note",
COMPOSIO_GOOGLE_DRIVE_CONNECTOR: "Composio Google Drive",
COMPOSIO_GMAIL_CONNECTOR: "Composio Gmail",
COMPOSIO_GOOGLE_CALENDAR_CONNECTOR: "Composio Google Calendar",
};
return (
labelMap[type] ||
type
.split("_")
.map((word) => word.charAt(0) + word.slice(1).toLowerCase())
.join(" ")
);
}
export function DocumentTypeChip({ type, className }: { type: string; className?: string }) {

View file

@ -11,14 +11,13 @@ import {
Clock,
FileText,
FileX,
Loader2,
Network,
Plus,
User,
} from "lucide-react";
import { motion } from "motion/react";
import { useTranslations } from "next-intl";
import React, { useRef, useState, useEffect, useCallback } from "react";
import React, { useCallback, useEffect, useRef, useState } from "react";
import { useDocumentUploadDialog } from "@/components/assistant-ui/document-upload-popup";
import { JsonMetadataViewer } from "@/components/json-metadata-viewer";
import { MarkdownViewer } from "@/components/markdown-viewer";
@ -354,11 +353,11 @@ export function DocumentsTableShell({
<Skeleton className="h-4 w-4 rounded" />
</div>
</TableHead>
<TableHead className="w-[35%] max-w-0 border-r border-border/40">
<TableHead className="w-[40%] max-w-0 border-r border-border/40">
<Skeleton className="h-3 w-20" />
</TableHead>
{columnVisibility.document_type && (
<TableHead className="w-[20%] min-w-[120px] max-w-[200px] border-r border-border/40">
<TableHead className="w-[15%] min-w-[100px] max-w-[170px] border-r border-border/40">
<Skeleton className="h-3 w-14" />
</TableHead>
)}
@ -396,11 +395,11 @@ export function DocumentsTableShell({
<Skeleton className="h-4 w-4 rounded" />
</div>
</TableCell>
<TableCell className="w-[35%] py-2.5 max-w-0 border-r border-border/40">
<TableCell className="w-[40%] py-2.5 max-w-0 border-r border-border/40">
<Skeleton className="h-4" style={{ width: `${widthPercent}%` }} />
</TableCell>
{columnVisibility.document_type && (
<TableCell className="w-[20%] min-w-[120px] max-w-[200px] py-2.5 border-r border-border/40 overflow-hidden">
<TableCell className="w-[15%] min-w-[100px] max-w-[170px] py-2.5 border-r border-border/40 overflow-hidden">
<Skeleton className="h-5 w-24 rounded" />
</TableCell>
)}
@ -499,7 +498,7 @@ export function DocumentsTableShell({
/>
</div>
</TableHead>
<TableHead className="w-[35%] border-r border-border/40">
<TableHead className="w-[40%] border-r border-border/40">
<SortableHeader
sortKey="title"
currentSortKey={sortKey}
@ -511,7 +510,7 @@ export function DocumentsTableShell({
</SortableHeader>
</TableHead>
{columnVisibility.document_type && (
<TableHead className="w-[20%] min-w-[120px] max-w-[200px] border-r border-border/40">
<TableHead className="w-[15%] min-w-[100px] max-w-[170px] border-r border-border/40">
<SortableHeader
sortKey="document_type"
currentSortKey={sortKey}
@ -594,7 +593,7 @@ export function DocumentsTableShell({
/>
</div>
</TableCell>
<TableCell className="w-[35%] py-2.5 max-w-0 border-r border-border/40">
<TableCell className="w-[40%] py-2.5 max-w-0 border-r border-border/40">
<button
type="button"
className="block w-full text-left text-sm text-foreground hover:text-foreground transition-colors cursor-pointer bg-transparent border-0 p-0 truncate"
@ -624,7 +623,7 @@ export function DocumentsTableShell({
</button>
</TableCell>
{columnVisibility.document_type && (
<TableCell className="w-[20%] min-w-[120px] max-w-[200px] py-2.5 border-r border-border/40 overflow-hidden">
<TableCell className="w-[15%] min-w-[100px] max-w-[170px] py-2.5 border-r border-border/40 overflow-hidden">
<DocumentTypeChip type={doc.document_type} />
</TableCell>
)}
@ -773,7 +772,7 @@ export function DocumentsTableShell({
<div className="mt-4">
{viewingLoading ? (
<div className="flex items-center justify-center py-12">
<Loader2 className="h-8 w-8 animate-spin text-muted-foreground" />
<Spinner size="lg" className="text-muted-foreground" />
</div>
) : (
<MarkdownViewer content={viewingContent} />

View file

@ -2,7 +2,7 @@
import { IconCalendar, IconMailFilled } from "@tabler/icons-react";
import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query";
import { Check, ExternalLink, Gift, Loader2, Mail, Star } from "lucide-react";
import { Check, ExternalLink, Gift, Mail, Star } from "lucide-react";
import { motion } from "motion/react";
import Link from "next/link";
import { useEffect } from "react";
@ -19,6 +19,7 @@ import {
} from "@/components/ui/dialog";
import { Separator } from "@/components/ui/separator";
import { Skeleton } from "@/components/ui/skeleton";
import { Spinner } from "@/components/ui/spinner";
import type { IncentiveTaskInfo } from "@/contracts/types/incentive-tasks.types";
import { incentiveTasksApiService } from "@/lib/apis/incentive-tasks-api.service";
import {
@ -144,7 +145,7 @@ export default function MorePagesPage() {
className="gap-1"
>
{completeMutation.isPending ? (
<Loader2 className="h-3 w-3 animate-spin" />
<Spinner size="xs" />
) : (
<>
Go

View file

@ -44,7 +44,6 @@ import { useMessagesElectric } from "@/hooks/use-messages-electric";
import { documentsApiService } from "@/lib/apis/documents-api.service";
// import { WriteTodosToolUI } from "@/components/tool-ui/write-todos";
import { getBearerToken } from "@/lib/auth-utils";
import { createAttachmentAdapter, extractAttachmentContent } from "@/lib/chat/attachment-adapter";
import { convertToThreadMessage } from "@/lib/chat/message-utils";
import {
isPodcastGenerating,
@ -216,9 +215,6 @@ export default function NewChatPage() {
useMessagesElectric(threadId, handleElectricMessagesUpdate);
// Create the attachment adapter for file processing
const attachmentAdapter = useMemo(() => createAttachmentAdapter(), []);
// Extract search_space_id from URL params
const searchSpaceId = useMemo(() => {
const id = params.search_space_id;
@ -409,16 +405,7 @@ export default function NewChatPage() {
}
}
// Extract attachments from message
// AppendMessage.attachments contains the processed attachment objects (from adapter.send())
const messageAttachments: Array<Record<string, unknown>> = [];
if (message.attachments && message.attachments.length > 0) {
for (const att of message.attachments) {
messageAttachments.push(att as unknown as Record<string, unknown>);
}
}
if (!userQuery.trim() && messageAttachments.length === 0) return;
if (!userQuery.trim()) return;
// Check if podcast is already generating
if (isPodcastGenerating() && looksLikePodcastRequest(userQuery)) {
@ -485,14 +472,13 @@ export default function NewChatPage() {
role: "user",
content: message.content,
createdAt: new Date(),
attachments: message.attachments || [],
metadata: authorMetadata,
};
setMessages((prev) => [...prev, userMessage]);
// Track message sent
trackChatMessageSent(searchSpaceId, currentThreadId, {
hasAttachments: messageAttachments.length > 0,
hasAttachments: false,
hasMentionedDocuments:
mentionedDocumentIds.surfsense_doc_ids.length > 0 ||
mentionedDocumentIds.document_ids.length > 0,
@ -512,7 +498,7 @@ export default function NewChatPage() {
}));
}
// Persist user message with mentioned documents and attachments (don't await, fire and forget)
// Persist user message with mentioned documents (don't await, fire and forget)
const persistContent: unknown[] = [...message.content];
// Add mentioned documents for persistence
@ -527,23 +513,6 @@ export default function NewChatPage() {
});
}
// Add attachments for persistence (so they survive page reload)
if (message.attachments && message.attachments.length > 0) {
persistContent.push({
type: "attachments",
items: message.attachments.map((att) => ({
id: att.id,
name: att.name,
type: att.type,
contentType: (att as { contentType?: string }).contentType,
// Include imageDataUrl for images so they can be displayed after reload
imageDataUrl: (att as { imageDataUrl?: string }).imageDataUrl,
// Include extractedContent for context (already extracted, no re-processing needed)
extractedContent: (att as { extractedContent?: string }).extractedContent,
})),
});
}
appendMessage(currentThreadId, {
role: "user",
content: persistContent,
@ -688,9 +657,6 @@ export default function NewChatPage() {
})
.filter((m) => m.content.length > 0);
// Extract attachment content to send with the request
const attachments = extractAttachmentContent(messageAttachments);
// Get mentioned document IDs for context (separate fields for backend)
const hasDocumentIds = mentionedDocumentIds.document_ids.length > 0;
const hasSurfsenseDocIds = mentionedDocumentIds.surfsense_doc_ids.length > 0;
@ -715,7 +681,6 @@ export default function NewChatPage() {
user_query: userQuery.trim(),
search_space_id: searchSpaceId,
messages: messageHistory,
attachments: attachments.length > 0 ? attachments : undefined,
mentioned_document_ids: hasDocumentIds ? mentionedDocumentIds.document_ids : undefined,
mentioned_surfsense_doc_ids: hasSurfsenseDocIds
? mentionedDocumentIds.surfsense_doc_ids
@ -1010,7 +975,6 @@ export default function NewChatPage() {
// Extract the original user query BEFORE removing messages (for reload mode)
let userQueryToDisplay = newUserQuery;
let originalUserMessageContent: ThreadMessageLike["content"] | null = null;
let originalUserMessageAttachments: ThreadMessageLike["attachments"] | undefined;
let originalUserMessageMetadata: ThreadMessageLike["metadata"] | undefined;
if (!newUserQuery) {
@ -1018,7 +982,6 @@ export default function NewChatPage() {
const lastUserMessage = [...messages].reverse().find((m) => m.role === "user");
if (lastUserMessage) {
originalUserMessageContent = lastUserMessage.content;
originalUserMessageAttachments = lastUserMessage.attachments;
originalUserMessageMetadata = lastUserMessage.metadata;
// Extract text for the API request
for (const part of lastUserMessage.content) {
@ -1144,7 +1107,6 @@ export default function NewChatPage() {
? [{ type: "text", text: newUserQuery }]
: originalUserMessageContent || [{ type: "text", text: userQueryToDisplay || "" }],
createdAt: new Date(),
attachments: newUserQuery ? undefined : originalUserMessageAttachments,
metadata: newUserQuery ? undefined : originalUserMessageMetadata,
};
setMessages((prev) => [...prev, userMessage]);
@ -1391,7 +1353,7 @@ export default function NewChatPage() {
await handleRegenerate(null);
}, [handleRegenerate]);
// Create external store runtime with attachment support
// Create external store runtime
const runtime = useExternalStoreRuntime({
messages,
isRunning,
@ -1400,9 +1362,6 @@ export default function NewChatPage() {
onReload,
convertMessage,
onCancel: cancelRun,
adapters: {
attachments: attachmentAdapter,
},
});
// Show loading state only when loading an existing thread

View file

@ -187,5 +187,24 @@ button {
background-color: hsl(var(--muted-foreground) / 0.4);
}
/* Integrations section — vertical column auto-scroll */
@keyframes integrations-scroll-up {
0% {
transform: translateY(0);
}
100% {
transform: translateY(-50%);
}
}
@keyframes integrations-scroll-down {
0% {
transform: translateY(-50%);
}
100% {
transform: translateY(0);
}
}
@source '../node_modules/@llamaindex/chat-ui/**/*.{ts,tsx}';
@source '../node_modules/streamdown/dist/*.js';

View file

@ -1,16 +1,14 @@
import { atomWithQuery } from "jotai-tanstack-query";
import { userApiService } from "@/lib/apis/user-api.service";
import { getBearerToken } from "@/lib/auth-utils";
import { getBearerToken, isPublicRoute } from "@/lib/auth-utils";
import { cacheKeys } from "@/lib/query-client/cache-keys";
export const currentUserAtom = atomWithQuery(() => {
const pathname = typeof window !== "undefined" ? window.location.pathname : null;
return {
queryKey: cacheKeys.user.current(),
staleTime: 5 * 60 * 1000, // 5 minutes
// Only fetch user data when a bearer token is present
enabled: !!getBearerToken(),
queryFn: async () => {
return userApiService.getMe();
},
enabled: !!getBearerToken() && pathname !== null && !isPublicRoute(pathname),
queryFn: async () => userApiService.getMe(),
};
});

View file

@ -0,0 +1,72 @@
---
title: "SurfSense v0.0.13 - Public Sharing, Image Generation & Redesigned Documents"
description: "SurfSense v0.0.13 introduces public chat sharing with permissions, image generation support, an auto load-balanced model mode, a redesigned Documents page, and numerous bug fixes across connectors and UI."
date: "2026-02-09"
tags: ["Public Sharing", "Image Generation", "Documents", "UI", "Bug Fixes"]
version: "0.0.13"
---
## What's New in v0.0.13
This update brings **public sharing, image generation**, a redesigned Documents page, and numerous bug fixes.
### Features & Improvements
#### Image Generation
- **Image Generation**: Generate images directly in chat with custom model and provider configurations.
#### Public Sharing
- **Public Chat Links**: Share snapshots of chats via public links.
- **Sharing Permissions**: Search Space owners control who can create and manage public links.
- **Link Management Page**: View and revoke all public chat links from Search Space Settings.
#### Auto (Load Balanced) Mode
- **Auto Model Selection**: The default cloud model now automatically picks the lowest-load model for faster responses.
#### Redesigned Documents Page
- **Unified Page**: Merged Logs and Documents into one page with real-time statuses.
- **Inline Connector Management**: Configure and monitor connectors alongside your documents.
#### UI & UX Polish
- **Inbox Refresh**: Cleaner inbox layout for easier scanning.
- **Streamlined Login**: Consolidated loading screens into a single unified flow.
- **Chat UI Tweaks**: Small refinements for a cleaner, more consistent chat experience.
- **Prompt Suggestions**: Empty chat windows now show contextual suggestions.
#### Documentation
- **New Connector Docs**: Added docs for Luma, Circleback, Elasticsearch, Bookstack, and Obsidian connectors.
<Accordion type="multiple" className="w-full not-prose">
<AccordionItem value="item-1">
<AccordionTrigger>Bug Fixes</AccordionTrigger>
<AccordionContent className="flex flex-col gap-4 text-balance">
<ul className="list-disc space-y-2 pl-4">
<li>Fixed cloud scaling issues where document queue congestion occurred under high load</li>
<li>Documents now correctly attribute to the uploading user and de-index on disconnect or deletion</li>
<li>Fixed common backend errors in indexing and large file handling</li>
<li>Fixed Notion indexing failures caused by transcription blocks</li>
<li>Chat refresh button now correctly regenerates AI responses</li>
<li>Restored the previously disabled Role Editor</li>
<li>Fixed Mentions tab appearing empty when document notifications pushed mentions out of the pagination window</li>
<li>Bundled git in the Docker image to fix GitHub connector failures with gitingest</li>
<li>Fixed Google Calendar default date range errors and aligned backend defaults with the frontend</li>
</ul>
</AccordionContent>
</AccordionItem>
<AccordionItem value="item-2">
<AccordionTrigger>Technical Improvements</AccordionTrigger>
<AccordionContent className="flex flex-col gap-4 text-balance">
<ul className="list-disc space-y-2 pl-4">
<li>Rebuilt the GitHub connector on gitingest for more efficient, lower-cost repository fetching</li>
</ul>
</AccordionContent>
</AccordionItem>
</Accordion>
SurfSense is your AI-powered federated search solution, connecting all your knowledge sources in one place.

View file

@ -1,6 +1,6 @@
"use client";
import { BadgeCheck, Loader2, LogOut } from "lucide-react";
import { BadgeCheck, LogOut } from "lucide-react";
import { useRouter } from "next/navigation";
import { useState } from "react";
import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar";
@ -14,6 +14,7 @@ import {
DropdownMenuSeparator,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu";
import { Spinner } from "@/components/ui/spinner";
import { logout } from "@/lib/auth-utils";
import { cleanupElectric } from "@/lib/electric/client";
import { resetUser, trackLogout } from "@/lib/posthog/events";
@ -98,7 +99,7 @@ export function UserDropdown({
disabled={isLoggingOut}
>
{isLoggingOut ? (
<Loader2 className="mr-2 h-3.5 w-3.5 md:h-4 md:w-4 animate-spin" />
<Spinner size="sm" className="mr-2" />
) : (
<LogOut className="mr-2 h-3.5 w-3.5 md:h-4 md:w-4" />
)}

View file

@ -1,377 +0,0 @@
"use client";
import {
AttachmentPrimitive,
ComposerPrimitive,
MessagePrimitive,
useAssistantApi,
useAssistantState,
} from "@assistant-ui/react";
import { FileText, Paperclip, PlusIcon, Upload, XIcon } from "lucide-react";
import Image from "next/image";
import { type FC, type PropsWithChildren, useEffect, useRef, useState } from "react";
import { useShallow } from "zustand/shallow";
import { TooltipIconButton } from "@/components/assistant-ui/tooltip-icon-button";
import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar";
import { Dialog, DialogContent, DialogTitle, DialogTrigger } from "@/components/ui/dialog";
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu";
import { Spinner } from "@/components/ui/spinner";
import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip";
import { cn } from "@/lib/utils";
import { useDocumentUploadDialog } from "./document-upload-popup";
const useFileSrc = (file: File | undefined) => {
const [src, setSrc] = useState<string | undefined>(undefined);
useEffect(() => {
if (!file) {
setSrc(undefined);
return;
}
const objectUrl = URL.createObjectURL(file);
setSrc(objectUrl);
return () => {
URL.revokeObjectURL(objectUrl);
};
}, [file]);
return src;
};
const useAttachmentSrc = () => {
const { file, src } = useAssistantState(
useShallow(({ attachment }): { file?: File; src?: string } => {
if (!attachment || attachment.type !== "image") return {};
// First priority: use File object if available (for new uploads)
if (attachment.file) return { file: attachment.file };
// Second priority: use stored imageDataUrl (for persisted messages)
// This is stored in our custom ChatAttachment interface
const customAttachment = attachment as { imageDataUrl?: string };
if (customAttachment.imageDataUrl) {
return { src: customAttachment.imageDataUrl };
}
// Third priority: try to extract from content array (standard assistant-ui format)
if (Array.isArray(attachment.content)) {
const contentSrc = attachment.content.filter((c) => c.type === "image")[0]?.image;
if (contentSrc) return { src: contentSrc };
}
return {};
})
);
return useFileSrc(file) ?? src;
};
type AttachmentPreviewProps = {
src: string;
};
const AttachmentPreview: FC<AttachmentPreviewProps> = ({ src }) => {
const [isLoaded, setIsLoaded] = useState(false);
return (
<Image
src={src}
alt="Image Preview"
width={1}
height={1}
className={
isLoaded
? "aui-attachment-preview-image-loaded block h-auto max-h-[80vh] w-auto max-w-full object-contain"
: "aui-attachment-preview-image-loading hidden"
}
onLoadingComplete={() => setIsLoaded(true)}
priority={false}
/>
);
};
const AttachmentPreviewDialog: FC<PropsWithChildren> = ({ children }) => {
const src = useAttachmentSrc();
if (!src) return children;
return (
<Dialog>
<DialogTrigger
className="aui-attachment-preview-trigger cursor-pointer transition-colors hover:bg-accent/50"
asChild
>
{children}
</DialogTrigger>
<DialogContent className="aui-attachment-preview-dialog-content p-2 sm:max-w-3xl [&>button]:rounded-full [&>button]:bg-foreground/60 [&>button]:p-1 [&>button]:opacity-100 [&>button]:ring-0! [&_svg]:text-background [&>button]:hover:[&_svg]:text-destructive">
<DialogTitle className="aui-sr-only sr-only">Image Attachment Preview</DialogTitle>
<div className="aui-attachment-preview relative mx-auto flex max-h-[80dvh] w-full items-center justify-center overflow-hidden bg-background">
<AttachmentPreview src={src} />
</div>
</DialogContent>
</Dialog>
);
};
const AttachmentThumb: FC = () => {
const isImage = useAssistantState(({ attachment }) => attachment?.type === "image");
// Check if actively processing (running AND progress < 100)
// When progress is 100, processing is done but waiting for send()
const isProcessing = useAssistantState(({ attachment }) => {
const status = attachment?.status;
if (status?.type !== "running") return false;
// If progress is defined and equals 100, processing is complete
const progress = (status as { type: "running"; progress?: number }).progress;
return progress === undefined || progress < 100;
});
const src = useAttachmentSrc();
// Show loading spinner only when actively processing (not when done and waiting for send)
if (isProcessing) {
return (
<div className="flex h-full w-full items-center justify-center bg-muted">
<Spinner size="md" className="text-muted-foreground" />
</div>
);
}
return (
<Avatar className="aui-attachment-tile-avatar h-full w-full rounded-none">
<AvatarImage
src={src}
alt="Attachment preview"
className="aui-attachment-tile-image object-cover"
/>
<AvatarFallback delayMs={isImage ? 200 : 0}>
<FileText className="aui-attachment-tile-fallback-icon size-8 text-muted-foreground" />
</AvatarFallback>
</Avatar>
);
};
const AttachmentUI: FC = () => {
const api = useAssistantApi();
const isComposer = api.attachment.source === "composer";
const isImage = useAssistantState(({ attachment }) => attachment?.type === "image");
// Check if actively processing (running AND progress < 100)
// When progress is 100, processing is done but waiting for send()
const isProcessing = useAssistantState(({ attachment }) => {
const status = attachment?.status;
if (status?.type !== "running") return false;
const progress = (status as { type: "running"; progress?: number }).progress;
return progress === undefined || progress < 100;
});
const typeLabel = useAssistantState(({ attachment }) => {
const type = attachment?.type;
switch (type) {
case "image":
return "Image";
case "document":
return "Document";
case "file":
return "File";
default:
return "File"; // Default fallback for unknown types
}
});
return (
<Tooltip>
<AttachmentPrimitive.Root
className={cn(
"aui-attachment-root relative",
isImage && "aui-attachment-root-composer only:[&>#attachment-tile]:size-24"
)}
>
<AttachmentPreviewDialog>
<TooltipTrigger asChild>
<button
type="button"
className={cn(
"aui-attachment-tile size-14 cursor-pointer overflow-hidden rounded-[14px] border bg-muted transition-opacity hover:opacity-75",
isComposer && "aui-attachment-tile-composer border-foreground/20",
isProcessing && "animate-pulse"
)}
id="attachment-tile"
aria-label={isProcessing ? "Processing attachment..." : `${typeLabel} attachment`}
>
<AttachmentThumb />
</button>
</TooltipTrigger>
</AttachmentPreviewDialog>
{isComposer && !isProcessing && <AttachmentRemove />}
</AttachmentPrimitive.Root>
<TooltipContent
side="top"
className="bg-black text-white font-medium shadow-xl px-3 py-1.5 dark:bg-zinc-800 dark:text-zinc-50 border-none"
>
{isProcessing ? (
<span className="flex items-center gap-1.5">
<Spinner size="xs" />
Processing...
</span>
) : (
<AttachmentPrimitive.Name />
)}
</TooltipContent>
</Tooltip>
);
};
const AttachmentRemove: FC = () => {
return (
<AttachmentPrimitive.Remove asChild>
<TooltipIconButton
tooltip="Remove file"
className="aui-attachment-tile-remove absolute top-1.5 right-1.5 size-3.5 rounded-full bg-white text-muted-foreground opacity-100 shadow-sm hover:bg-white! [&_svg]:text-black hover:[&_svg]:text-destructive"
side="top"
>
<XIcon className="aui-attachment-remove-icon size-3 dark:stroke-[2.5px]" />
</TooltipIconButton>
</AttachmentPrimitive.Remove>
);
};
/**
* Image attachment with preview thumbnail (click to expand)
*/
const MessageImageAttachment: FC = () => {
const attachmentName = useAssistantState(({ attachment }) => attachment?.name || "Image");
const src = useAttachmentSrc();
if (!src) return null;
return (
<AttachmentPreviewDialog>
<div
className="relative group cursor-pointer overflow-hidden rounded-xl border border-border/50 bg-muted transition-all hover:border-primary/30 hover:shadow-md"
title={`Click to expand: ${attachmentName}`}
>
<Image
src={src}
alt={attachmentName}
width={120}
height={90}
className="object-cover w-[120px] h-[90px] transition-transform group-hover:scale-105"
/>
{/* Hover overlay with filename */}
<div className="absolute inset-0 bg-gradient-to-t from-black/60 via-transparent to-transparent opacity-0 group-hover:opacity-100 transition-opacity">
<div className="absolute bottom-1.5 left-1.5 right-1.5">
<span className="text-[10px] text-white/90 font-medium truncate block">
{attachmentName}
</span>
</div>
</div>
</div>
</AttachmentPreviewDialog>
);
};
/**
* Document/file attachment as chip (similar to mentioned documents)
*/
const MessageDocumentAttachment: FC = () => {
const attachmentName = useAssistantState(({ attachment }) => attachment?.name || "Attachment");
return (
<AttachmentPreviewDialog>
<span
className="inline-flex items-center gap-1 px-2 py-0.5 rounded-full bg-primary/10 text-xs font-medium text-primary border border-primary/20 cursor-pointer hover:bg-primary/20 transition-colors"
title={attachmentName}
>
<FileText className="size-3" />
<span className="max-w-[150px] truncate">{attachmentName}</span>
</span>
</AttachmentPreviewDialog>
);
};
/**
* Attachment component for user messages
* Shows image preview for images, chip for documents
*/
const MessageAttachmentChip: FC = () => {
const isImage = useAssistantState(({ attachment }) => attachment?.type === "image");
if (isImage) {
return <MessageImageAttachment />;
}
return <MessageDocumentAttachment />;
};
export const UserMessageAttachments: FC = () => {
return <MessagePrimitive.Attachments components={{ Attachment: MessageAttachmentChip }} />;
};
export const ComposerAttachments: FC = () => {
return (
<div className="aui-composer-attachments mb-2 flex w-full flex-row items-center gap-2 overflow-x-auto px-1.5 pt-0.5 pb-1 empty:hidden">
<ComposerPrimitive.Attachments components={{ Attachment: AttachmentUI }} />
</div>
);
};
export const ComposerAddAttachment: FC = () => {
const chatAttachmentInputRef = useRef<HTMLInputElement>(null);
const { openDialog } = useDocumentUploadDialog();
const handleFileUpload = () => {
openDialog();
};
const handleChatAttachment = () => {
chatAttachmentInputRef.current?.click();
};
// Prevent event bubbling when file input is clicked
const handleFileInputClick = (e: React.MouseEvent<HTMLInputElement>) => {
e.stopPropagation();
};
return (
<>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<TooltipIconButton
tooltip="Upload"
side="bottom"
variant="ghost"
size="icon"
className="aui-composer-add-attachment size-[34px] rounded-full p-1 font-semibold text-xs hover:bg-muted-foreground/15 dark:border-muted-foreground/15 dark:hover:bg-muted-foreground/30"
aria-label="Upload"
>
<PlusIcon className="aui-attachment-add-icon size-5 stroke-[1.5px]" />
</TooltipIconButton>
</DropdownMenuTrigger>
<DropdownMenuContent align="start" className="w-72 bg-background border-border">
<DropdownMenuItem onSelect={handleChatAttachment} className="cursor-pointer">
<Paperclip className="size-4" />
<span>Add attachment to this chat</span>
</DropdownMenuItem>
<DropdownMenuItem onClick={handleFileUpload} className="cursor-pointer">
<Upload className="size-4" />
<span>Upload documents to Search Space</span>
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
<ComposerPrimitive.AddAttachment asChild>
<input
ref={chatAttachmentInputRef}
type="file"
multiple
className="hidden"
accept="image/*,application/pdf,.doc,.docx,.txt"
onClick={handleFileInputClick}
/>
</ComposerPrimitive.AddAttachment>
</>
);
};

View file

@ -122,10 +122,12 @@ export const IndexingConfigurationView: FC<IndexingConfigurationViewProps> = ({
<div className="flex flex-col">
<span className="text-xl sm:text-2xl font-semibold tracking-tight text-wrap whitespace-normal wrap-break-word">
{getConnectorTypeDisplay(connector?.connector_type || "")} Connected !
</span>{" "}
<span className="text-xl sm:text-xl font-semibold text-muted-foreground tracking-tight text-wrap whitespace-normal wrap-break-word">
{getConnectorDisplayName(connector?.name || "")}
</span>
{connector?.name?.includes(" - ") && (
<span className="text-xl sm:text-xl font-semibold text-muted-foreground tracking-tight text-wrap whitespace-normal wrap-break-word">
{getConnectorDisplayName(connector.name)}
</span>
)}
</div>
<p className="text-xs sm:text-base text-muted-foreground mt-1">
Configure when to start syncing your data

View file

@ -27,6 +27,12 @@ export interface InlineMentionEditorRef {
getText: () => string;
getMentionedDocuments: () => MentionedDocument[];
insertDocumentChip: (doc: Pick<Document, "id" | "title" | "document_type">) => void;
setDocumentChipStatus: (
docId: number,
docType: string | undefined,
statusLabel: string | null,
statusKind?: "pending" | "processing" | "ready" | "failed"
) => void;
}
interface InlineMentionEditorProps {
@ -46,6 +52,7 @@ interface InlineMentionEditorProps {
const CHIP_DATA_ATTR = "data-mention-chip";
const CHIP_ID_ATTR = "data-mention-id";
const CHIP_DOCTYPE_ATTR = "data-mention-doctype";
const CHIP_STATUS_ATTR = "data-mention-status";
/**
* Type guard to check if a node is a chip element
@ -182,6 +189,11 @@ export const InlineMentionEditor = forwardRef<InlineMentionEditorRef, InlineMent
titleSpan.className = "max-w-[120px] truncate";
titleSpan.textContent = doc.title;
titleSpan.title = doc.title;
titleSpan.setAttribute("data-mention-title", "true");
const statusSpan = document.createElement("span");
statusSpan.setAttribute(CHIP_STATUS_ATTR, "true");
statusSpan.className = "text-[10px] font-semibold opacity-80 hidden";
const removeBtn = document.createElement("button");
removeBtn.type = "button";
@ -207,6 +219,7 @@ export const InlineMentionEditor = forwardRef<InlineMentionEditorRef, InlineMent
chip.appendChild(iconSpan);
chip.appendChild(titleSpan);
chip.appendChild(statusSpan);
chip.appendChild(removeBtn);
return chip;
@ -332,6 +345,48 @@ export const InlineMentionEditor = forwardRef<InlineMentionEditorRef, InlineMent
}
}, []);
const setDocumentChipStatus = useCallback(
(
docId: number,
docType: string | undefined,
statusLabel: string | null,
statusKind: "pending" | "processing" | "ready" | "failed" = "pending"
) => {
if (!editorRef.current) return;
const chips = editorRef.current.querySelectorAll<HTMLSpanElement>(
`span[${CHIP_DATA_ATTR}="true"]`
);
for (const chip of chips) {
const chipId = getChipId(chip);
const chipType = getChipDocType(chip);
if (chipId !== docId) continue;
if ((docType ?? "UNKNOWN") !== chipType) continue;
const statusEl = chip.querySelector<HTMLSpanElement>(`span[${CHIP_STATUS_ATTR}="true"]`);
if (!statusEl) continue;
if (!statusLabel) {
statusEl.textContent = "";
statusEl.className = "text-[10px] font-semibold opacity-80 hidden";
continue;
}
const statusClass =
statusKind === "failed"
? "text-destructive"
: statusKind === "processing"
? "text-amber-700"
: statusKind === "ready"
? "text-emerald-700"
: "text-amber-700";
statusEl.textContent = statusLabel;
statusEl.className = `text-[10px] font-semibold opacity-80 ${statusClass}`;
}
},
[]
);
// Expose methods via ref
useImperativeHandle(ref, () => ({
focus: () => editorRef.current?.focus(),
@ -339,6 +394,7 @@ export const InlineMentionEditor = forwardRef<InlineMentionEditorRef, InlineMent
getText,
getMentionedDocuments,
insertDocumentChip,
setDocumentChipStatus,
}));
// Handle input changes
@ -526,7 +582,7 @@ export const InlineMentionEditor = forwardRef<InlineMentionEditorRef, InlineMent
className={cn(
"min-h-[24px] max-h-32 overflow-y-auto",
"text-sm outline-none",
"whitespace-pre-wrap break-words",
"whitespace-pre-wrap wrap-break-word",
disabled && "opacity-50 cursor-not-allowed",
className
)}

View file

@ -19,13 +19,15 @@ import {
ChevronRightIcon,
CopyIcon,
DownloadIcon,
Loader2,
FileWarning,
Paperclip,
RefreshCwIcon,
SquareIcon,
} from "lucide-react";
import { useParams } from "next/navigation";
import { type FC, useCallback, useContext, useEffect, useMemo, useRef, useState } from "react";
import { createPortal } from "react-dom";
import { toast } from "sonner";
import { chatSessionStateAtom } from "@/atoms/chat/chat-session-state.atom";
import { showCommentsGutterAtom } from "@/atoms/chat/current-thread.atom";
import {
@ -40,7 +42,6 @@ import {
} from "@/atoms/new-llm-config/new-llm-config-query.atoms";
import { currentUserAtom } from "@/atoms/user/user-query.atoms";
import { AssistantMessage } from "@/components/assistant-ui/assistant-message";
import { ComposerAddAttachment, ComposerAttachments } from "@/components/assistant-ui/attachment";
import { ChatSessionStatus } from "@/components/assistant-ui/chat-session-status";
import { ConnectorIndicator } from "@/components/assistant-ui/connector-popup";
import {
@ -61,20 +62,35 @@ import {
} from "@/components/new-chat/document-mention-picker";
import type { ThinkingStep } from "@/components/tool-ui/deepagent-thinking";
import { Button } from "@/components/ui/button";
import { Spinner } from "@/components/ui/spinner";
import type { Document } from "@/contracts/types/document.types";
import { useCommentsElectric } from "@/hooks/use-comments-electric";
import { documentsApiService } from "@/lib/apis/documents-api.service";
import { cn } from "@/lib/utils";
/** Placeholder texts that cycle in new chats when input is empty */
const CYCLING_PLACEHOLDERS = [
"Ask SurfSense anything or @mention docs.",
"Generate a podcast from marketing tips in the company handbook.",
"Sum up our vacation policy from Drive.",
"Generate a podcast from my vacation ideas in Notion.",
"Sum up last week's meeting notes from Drive in a bulleted list.",
"Give me a brief overview of the most urgent tickets in Jira and Linear.",
"Create a concise table of today's top ten emails and calendar events.",
"Briefly, what are today's top ten important emails and calendar events?",
"Check if this week's Slack messages reference any GitHub issues.",
];
const CHAT_UPLOAD_ACCEPT =
".pdf,.doc,.docx,.txt,.md,.markdown,.ppt,.pptx,.xls,.xlsx,.xlsm,.xlsb,.csv,.html,.htm,.xml,.rtf,.epub,.jpg,.jpeg,.png,.bmp,.webp,.tiff,.tif,.mp3,.mp4,.mpeg,.mpga,.m4a,.wav,.webm";
type UploadState = "pending" | "processing" | "ready" | "failed";
interface UploadedMentionDoc {
id: number;
title: string;
document_type: Document["document_type"];
state: UploadState;
reason?: string | null;
}
interface ThreadProps {
messageThinkingSteps?: Map<string, ThinkingStep[]>;
header?: React.ReactNode;
@ -230,8 +246,13 @@ const Composer: FC = () => {
const [mentionedDocuments, setMentionedDocuments] = useAtom(mentionedDocumentsAtom);
const [showDocumentPopover, setShowDocumentPopover] = useState(false);
const [mentionQuery, setMentionQuery] = useState("");
const [uploadedMentionDocs, setUploadedMentionDocs] = useState<
Record<number, UploadedMentionDoc>
>({});
const [isUploadingDocs, setIsUploadingDocs] = useState(false);
const editorRef = useRef<InlineMentionEditorRef>(null);
const editorContainerRef = useRef<HTMLDivElement>(null);
const uploadInputRef = useRef<HTMLInputElement>(null);
const documentPickerRef = useRef<DocumentMentionPickerRef>(null);
const { search_space_id, chat_id } = useParams();
const setMentionedDocumentIds = useSetAtom(mentionedDocumentIdsAtom);
@ -357,9 +378,28 @@ const Composer: FC = () => {
[showDocumentPopover]
);
const uploadedMentionedDocs = useMemo(
() => mentionedDocuments.filter((doc) => uploadedMentionDocs[doc.id]),
[mentionedDocuments, uploadedMentionDocs]
);
const blockingUploadedMentions = useMemo(
() =>
uploadedMentionedDocs.filter((doc) => {
const state = uploadedMentionDocs[doc.id]?.state;
return state === "pending" || state === "processing" || state === "failed";
}),
[uploadedMentionedDocs, uploadedMentionDocs]
);
// Submit message (blocked during streaming, document picker open, or AI responding to another user)
const handleSubmit = useCallback(() => {
if (isThreadRunning || isBlockedByOtherUser) {
if (
isThreadRunning ||
isBlockedByOtherUser ||
isUploadingDocs ||
blockingUploadedMentions.length > 0
) {
return;
}
if (!showDocumentPopover) {
@ -375,6 +415,8 @@ const Composer: FC = () => {
showDocumentPopover,
isThreadRunning,
isBlockedByOtherUser,
isUploadingDocs,
blockingUploadedMentions.length,
composerRuntime,
setMentionedDocuments,
setMentionedDocumentIds,
@ -395,6 +437,11 @@ const Composer: FC = () => {
});
return updated;
});
setUploadedMentionDocs((prev) => {
if (!(docId in prev)) return prev;
const { [docId]: _removed, ...rest } = prev;
return rest;
});
},
[setMentionedDocuments, setMentionedDocumentIds]
);
@ -433,6 +480,139 @@ const Composer: FC = () => {
[mentionedDocuments, setMentionedDocuments, setMentionedDocumentIds]
);
const refreshUploadedDocStatuses = useCallback(
async (documentIds: number[]) => {
if (!search_space_id || documentIds.length === 0) return;
const statusResponse = await documentsApiService.getDocumentsStatus({
queryParams: {
search_space_id: Number(search_space_id),
document_ids: documentIds,
},
});
setUploadedMentionDocs((prev) => {
const next = { ...prev };
for (const item of statusResponse.items) {
next[item.id] = {
id: item.id,
title: item.title,
document_type: item.document_type,
state: item.status.state,
reason: item.status.reason,
};
}
return next;
});
handleDocumentsMention(
statusResponse.items.map((item) => ({
id: item.id,
title: item.title,
document_type: item.document_type,
}))
);
},
[search_space_id, handleDocumentsMention]
);
const handleUploadClick = useCallback(() => {
uploadInputRef.current?.click();
}, []);
const handleUploadInputChange = useCallback(
async (event: React.ChangeEvent<HTMLInputElement>) => {
const files = Array.from(event.target.files ?? []);
event.target.value = "";
if (files.length === 0 || !search_space_id) return;
setIsUploadingDocs(true);
try {
const uploadResponse = await documentsApiService.uploadDocument({
files,
search_space_id: Number(search_space_id),
});
const uploadedIds = uploadResponse.document_ids ?? [];
const duplicateIds = uploadResponse.duplicate_document_ids ?? [];
const idsToMention = Array.from(new Set([...uploadedIds, ...duplicateIds]));
if (idsToMention.length === 0) {
toast.warning("No documents were created or matched from selected files.");
return;
}
await refreshUploadedDocStatuses(idsToMention);
if (uploadedIds.length > 0 && duplicateIds.length > 0) {
toast.success(
`Uploaded ${uploadedIds.length} file${uploadedIds.length > 1 ? "s" : ""} and matched ${duplicateIds.length} existing file${duplicateIds.length > 1 ? "s" : ""}.`
);
} else if (uploadedIds.length > 0) {
toast.success(`Uploaded ${uploadedIds.length} file${uploadedIds.length > 1 ? "s" : ""}`);
} else {
toast.success(
`Matched ${duplicateIds.length} existing file${duplicateIds.length > 1 ? "s" : ""} and added mention${duplicateIds.length > 1 ? "s" : ""}.`
);
}
} catch (error) {
const message = error instanceof Error ? error.message : "Upload failed";
toast.error(`Upload failed: ${message}`);
} finally {
setIsUploadingDocs(false);
}
},
[search_space_id, refreshUploadedDocStatuses]
);
// Poll status for uploaded mentioned documents until all are ready or removed.
useEffect(() => {
const trackedIds = uploadedMentionedDocs.map((doc) => doc.id);
const needsPolling = trackedIds.some((id) => {
const state = uploadedMentionDocs[id]?.state;
return state === "pending" || state === "processing";
});
if (!needsPolling) return;
const interval = setInterval(() => {
refreshUploadedDocStatuses(trackedIds).catch((error) => {
console.error("[Composer] Failed to refresh uploaded mention statuses:", error);
});
}, 2500);
return () => clearInterval(interval);
}, [uploadedMentionedDocs, uploadedMentionDocs, refreshUploadedDocStatuses]);
// Push upload status directly onto mention chips (instead of separate status rows).
useEffect(() => {
for (const doc of uploadedMentionedDocs) {
const state = uploadedMentionDocs[doc.id]?.state ?? "pending";
const statusLabel =
state === "ready"
? null
: state === "failed"
? "failed"
: state === "processing"
? "indexing"
: "queued";
editorRef.current?.setDocumentChipStatus(doc.id, doc.document_type, statusLabel, state);
}
}, [uploadedMentionedDocs, uploadedMentionDocs]);
// Prune upload status entries that are no longer mentioned in the composer.
useEffect(() => {
const activeIds = new Set(mentionedDocuments.map((doc) => doc.id));
setUploadedMentionDocs((prev) => {
let changed = false;
const next: Record<number, UploadedMentionDoc> = {};
for (const [key, value] of Object.entries(prev)) {
const id = Number(key);
if (activeIds.has(id)) {
next[id] = value;
} else {
changed = true;
}
}
return changed ? next : prev;
});
}, [mentionedDocuments]);
return (
<ComposerPrimitive.Root className="aui-composer-root relative flex w-full flex-col gap-2">
<ChatSessionStatus
@ -441,8 +621,7 @@ const Composer: FC = () => {
currentUserId={currentUser?.id ?? null}
members={members ?? []}
/>
<ComposerPrimitive.AttachmentDropzone className="aui-composer-attachment-dropzone flex w-full flex-col rounded-2xl border-input bg-muted px-1 pt-2 outline-none transition-shadow data-[dragging=true]:border-ring data-[dragging=true]:border-dashed data-[dragging=true]:bg-accent/50">
<ComposerAttachments />
<div className="aui-composer-attachment-dropzone flex w-full flex-col rounded-2xl border-input bg-muted px-1 pt-2 outline-none transition-shadow">
{/* Inline editor with @mention support */}
<div ref={editorContainerRef} className="aui-composer-input-wrapper px-3 pt-3 pb-6">
<InlineMentionEditor
@ -457,6 +636,14 @@ const Composer: FC = () => {
className="min-h-[24px]"
/>
</div>
<input
ref={uploadInputRef}
type="file"
multiple
accept={CHAT_UPLOAD_ACCEPT}
onChange={handleUploadInputChange}
className="hidden"
/>
{/* Document picker popover (portal to body for proper z-index stacking) */}
{showDocumentPopover &&
@ -483,33 +670,43 @@ const Composer: FC = () => {
/>,
document.body
)}
<ComposerAction isBlockedByOtherUser={isBlockedByOtherUser} />
</ComposerPrimitive.AttachmentDropzone>
<ComposerAction
isBlockedByOtherUser={isBlockedByOtherUser}
onUploadClick={handleUploadClick}
isUploadingDocs={isUploadingDocs}
blockingUploadedMentionsCount={blockingUploadedMentions.length}
hasFailedUploadedMentions={blockingUploadedMentions.some(
(doc) => uploadedMentionDocs[doc.id]?.state === "failed"
)}
/>
</div>
</ComposerPrimitive.Root>
);
};
interface ComposerActionProps {
isBlockedByOtherUser?: boolean;
onUploadClick: () => void;
isUploadingDocs: boolean;
blockingUploadedMentionsCount: number;
hasFailedUploadedMentions: boolean;
}
const ComposerAction: FC<ComposerActionProps> = ({ isBlockedByOtherUser = false }) => {
// Check if any attachments are still being processed (running AND progress < 100)
// When progress is 100, processing is done but waiting for send()
const hasProcessingAttachments = useAssistantState(({ composer }) =>
composer.attachments?.some((att) => {
const status = att.status;
if (status?.type !== "running") return false;
const progress = (status as { type: "running"; progress?: number }).progress;
return progress === undefined || progress < 100;
})
);
const ComposerAction: FC<ComposerActionProps> = ({
isBlockedByOtherUser = false,
onUploadClick,
isUploadingDocs,
blockingUploadedMentionsCount,
hasFailedUploadedMentions,
}) => {
const mentionedDocuments = useAtomValue(mentionedDocumentsAtom);
// Check if composer text is empty
const isComposerEmpty = useAssistantState(({ composer }) => {
// Check if composer text is empty (chips are represented in mentionedDocuments atom)
const isComposerTextEmpty = useAssistantState(({ composer }) => {
const text = composer.text?.trim() || "";
return text.length === 0;
});
const isComposerEmpty = isComposerTextEmpty && mentionedDocuments.length === 0;
// Check if a model is configured
const { data: userConfigs } = useAtomValue(newLLMConfigsAtom);
@ -530,25 +727,47 @@ const ComposerAction: FC<ComposerActionProps> = ({ isBlockedByOtherUser = false
}, [preferences, globalConfigs, userConfigs]);
const isSendDisabled =
hasProcessingAttachments || isComposerEmpty || !hasModelConfigured || isBlockedByOtherUser;
isComposerEmpty ||
!hasModelConfigured ||
isBlockedByOtherUser ||
isUploadingDocs ||
blockingUploadedMentionsCount > 0;
return (
<div className="aui-composer-action-wrapper relative mx-2 mb-2 flex items-center justify-between">
<div className="flex items-center gap-1">
<ComposerAddAttachment />
<TooltipIconButton
tooltip={isUploadingDocs ? "Uploading documents..." : "Upload and mention files"}
side="bottom"
variant="ghost"
size="icon"
className="size-[34px] rounded-full p-1 font-semibold text-xs hover:bg-muted-foreground/15 dark:border-muted-foreground/15 dark:hover:bg-muted-foreground/30"
aria-label="Upload files"
onClick={onUploadClick}
disabled={isUploadingDocs}
>
{isUploadingDocs ? (
<Spinner size="sm" className="text-muted-foreground" />
) : (
<Paperclip className="size-4" />
)}
</TooltipIconButton>
<ConnectorIndicator />
</div>
{/* Show processing indicator when attachments are being processed */}
{hasProcessingAttachments && (
{blockingUploadedMentionsCount > 0 && (
<div className="flex items-center gap-1.5 text-muted-foreground text-xs">
<Loader2 className="size-3 animate-spin" />
<span>Processing...</span>
{hasFailedUploadedMentions ? <FileWarning className="size-3" /> : <Spinner size="xs" />}
<span>
{hasFailedUploadedMentions
? "Remove or retry failed uploads"
: "Waiting for uploaded files to finish indexing"}
</span>
</div>
)}
{/* Show warning when no model is configured */}
{!hasModelConfigured && !hasProcessingAttachments && (
{!hasModelConfigured && blockingUploadedMentionsCount === 0 && (
<div className="flex items-center gap-1.5 text-amber-600 dark:text-amber-400 text-xs">
<AlertCircle className="size-3" />
<span>Select a model</span>
@ -561,13 +780,17 @@ const ComposerAction: FC<ComposerActionProps> = ({ isBlockedByOtherUser = false
tooltip={
isBlockedByOtherUser
? "Wait for AI to finish responding"
: !hasModelConfigured
? "Please select a model from the header to start chatting"
: hasProcessingAttachments
? "Wait for attachments to process"
: isComposerEmpty
? "Enter a message to send"
: "Send message"
: hasFailedUploadedMentions
? "Remove or retry failed uploads before sending"
: blockingUploadedMentionsCount > 0
? "Waiting for uploaded files to finish indexing"
: isUploadingDocs
? "Uploading documents..."
: !hasModelConfigured
? "Please select a model from the header to start chatting"
: isComposerEmpty
? "Enter a message to send"
: "Send message"
}
side="bottom"
type="submit"

View file

@ -3,7 +3,6 @@ import { useAtomValue } from "jotai";
import { FileText, PencilIcon } from "lucide-react";
import { type FC, useState } from "react";
import { messageDocumentsMapAtom } from "@/atoms/chat/mentioned-documents.atom";
import { UserMessageAttachments } from "@/components/assistant-ui/attachment";
import { TooltipIconButton } from "@/components/assistant-ui/tooltip-icon-button";
interface AuthorMetadata {
@ -48,9 +47,6 @@ export const UserMessage: FC = () => {
const mentionedDocs = messageId ? messageDocumentsMap[messageId] : undefined;
const metadata = useAssistantState(({ message }) => message?.metadata);
const author = metadata?.custom?.author as AuthorMetadata | undefined;
const hasAttachments = useAssistantState(
({ message }) => message?.attachments && message.attachments.length > 0
);
return (
<MessagePrimitive.Root
@ -59,11 +55,9 @@ export const UserMessage: FC = () => {
>
<div className="aui-user-message-content-wrapper col-start-2 min-w-0 flex items-end gap-2">
<div className="flex-1 min-w-0">
{/* Display attachments and mentioned documents */}
{(hasAttachments || (mentionedDocs && mentionedDocs.length > 0)) && (
{/* Display mentioned documents */}
{mentionedDocs && mentionedDocs.length > 0 && (
<div className="flex flex-wrap items-end gap-2 mb-2 justify-end">
{/* Attachments (images show as thumbnails, documents as chips) */}
<UserMessageAttachments />
{/* Mentioned documents as chips */}
{mentionedDocs?.map((doc) => (
<span

View file

@ -43,7 +43,7 @@ export function FooterNew() {
},
{
title: "LinkedIn",
href: "https://www.linkedin.com/in/rohan-verma-sde/",
href: "https://www.linkedin.com/company/surfsense/",
icon: IconBrandLinkedin,
},
{

View file

@ -34,8 +34,8 @@ const GoogleLogo = ({ className }: { className?: string }) => (
export function HeroSection() {
const containerRef = useRef<HTMLDivElement>(null);
const parentRef = useRef<HTMLDivElement>(null);
const heroVariant = useFeatureFlagVariantKey("notebooklm_flag");
const isNotebookLMVariant = heroVariant === "notebooklm";
const heroVariant = useFeatureFlagVariantKey("notebooklm_superpowers_flag");
const isNotebookLMVariant = heroVariant === "superpowers";
return (
<div
@ -89,25 +89,24 @@ export function HeroSection() {
{isNotebookLMVariant ? (
<div className="relative mx-auto inline-block w-max filter-[drop-shadow(0px_1px_3px_rgba(27,37,80,0.14))]">
<div className="text-black [text-shadow:0_0_rgba(0,0,0,0.1)] dark:text-white">
<span className="">NotebookLM for Teams</span>
<span className="">NotebookLM with Superpowers</span>
</div>
</div>
) : (
<>
The AI Workspace{" "}
<div className="relative mx-auto inline-block w-max filter-[drop-shadow(0px_1px_3px_rgba(27,37,80,0.14))]">
<div className="text-black [text-shadow:0_0_rgba(0,0,0,0.1)] dark:text-white">
<span className="">Built for Teams</span>
</div>
<div className="relative mx-auto inline-block w-max filter-[drop-shadow(0px_1px_3px_rgba(27,37,80,0.14))]">
<div className="text-black [text-shadow:0_0_rgba(0,0,0,0.1)] dark:text-white">
<span className="">NotebookLM for Teams</span>
</div>
</>
</div>
)}
</Balancer>
</h2>
{/* // TODO:aCTUAL DESCRITION */}
<p className="relative z-50 mx-auto mt-4 max-w-lg px-4 text-center text-base/6 text-gray-600 dark:text-gray-200">
Connect any LLM to your internal knowledge sources and chat with it in real time alongside
your team.
Connect any AI to your documents and knowledge sources.
</p>
<p className="relative z-50 mx-auto mt-0 max-w-lg px-4 text-center text-base/6 text-gray-600 dark:text-gray-200">
Then chat with it in real-time, even alongside your team.
</p>
<div className="mb-10 mt-8 flex w-full flex-col items-center justify-center gap-4 px-8 sm:flex-row md:mb-20">
<GetStartedButton />

View file

@ -1,5 +1,7 @@
"use client";
import React, { useEffect, useState } from "react";
import type React from "react";
import Image from "next/image";
interface Integration {
name: string;
@ -8,181 +10,210 @@ interface Integration {
const INTEGRATIONS: Integration[] = [
// Search
{ name: "Tavily", icon: "https://www.tavily.com/images/logo.svg" },
{
name: "LinkUp",
icon: "https://framerusercontent.com/images/7zeIm6t3f1HaSltkw8upEvsD80.png?scale-down-to=512",
},
{ name: "Elasticsearch", icon: "https://cdn.simpleicons.org/elastic/00A9E5" },
{ name: "Tavily", icon: "/connectors/tavily.svg" },
{ name: "Elasticsearch", icon: "/connectors/elasticsearch.svg" },
{ name: "Baidu Search", icon: "/connectors/baidu-search.svg" },
{ name: "SearXNG", icon: "/connectors/searxng.svg" },
// Communication
{
name: "Slack",
icon: "https://upload.wikimedia.org/wikipedia/commons/d/d5/Slack_icon_2019.svg",
},
{ name: "Discord", icon: "https://cdn.simpleicons.org/discord/5865F2" },
{ name: "Gmail", icon: "https://cdn.simpleicons.org/gmail/EA4335" },
{ name: "Slack", icon: "/connectors/slack.svg" },
{ name: "Discord", icon: "/connectors/discord.svg" },
{ name: "Gmail", icon: "/connectors/google-gmail.svg" },
{ name: "Microsoft Teams", icon: "/connectors/microsoft-teams.svg" },
// Project Management
{ name: "Linear", icon: "https://cdn.simpleicons.org/linear/5E6AD2" },
{ name: "Jira", icon: "https://cdn.simpleicons.org/jira/0052CC" },
{ name: "ClickUp", icon: "https://cdn.simpleicons.org/clickup/7B68EE" },
{ name: "Airtable", icon: "https://cdn.simpleicons.org/airtable/18BFFF" },
{ name: "Linear", icon: "/connectors/linear.svg" },
{ name: "Jira", icon: "/connectors/jira.svg" },
{ name: "ClickUp", icon: "/connectors/clickup.svg" },
{ name: "Airtable", icon: "/connectors/airtable.svg" },
// Documentation & Knowledge
{ name: "Confluence", icon: "https://cdn.simpleicons.org/confluence/172B4D" },
{ name: "Notion", icon: "https://cdn.simpleicons.org/notion/000000/ffffff" },
{ name: "Web Pages", icon: "https://cdn.jsdelivr.net/npm/lucide-static@0.294.0/icons/globe.svg" },
{ name: "Confluence", icon: "/connectors/confluence.svg" },
{ name: "Notion", icon: "/connectors/notion.svg" },
{ name: "BookStack", icon: "/connectors/bookstack.svg" },
{ name: "Obsidian", icon: "/connectors/obsidian.svg" },
// Cloud Storage
{ name: "Google Drive", icon: "https://cdn.simpleicons.org/googledrive/4285F4" },
{ name: "Dropbox", icon: "https://cdn.simpleicons.org/dropbox/0061FF" },
{
name: "Amazon S3",
icon: "https://upload.wikimedia.org/wikipedia/commons/b/bc/Amazon-S3-Logo.svg",
},
{ name: "Google Drive", icon: "/connectors/google-drive.svg" },
// Development
{ name: "GitHub", icon: "https://cdn.simpleicons.org/github/181717/ffffff" },
{ name: "GitHub", icon: "/connectors/github.svg" },
// Productivity
{ name: "Google Calendar", icon: "https://cdn.simpleicons.org/googlecalendar/4285F4" },
{ name: "Luma", icon: "https://images.lumacdn.com/social-images/default-social-202407.png" },
{ name: "Google Calendar", icon: "/connectors/google-calendar.svg" },
{ name: "Luma", icon: "/connectors/luma.svg" },
// Media
{ name: "YouTube", icon: "https://cdn.simpleicons.org/youtube/FF0000" },
{ name: "YouTube", icon: "/connectors/youtube.svg" },
// Search
{ name: "Linkup", icon: "/connectors/linkup.svg" },
// Meetings
{ name: "Circleback", icon: "/connectors/circleback.svg" },
// AI
{ name: "MCP", icon: "/connectors/modelcontextprotocol.svg" },
];
function SemiCircleOrbit({ radius, centerX, centerY, count, iconSize, startIndex }: any) {
// 5 vertical columns — 23 icons spread across categories
const COLUMNS: number[][] = [
[2, 5, 10, 0, 21, 11],
[1, 7, 20, 17],
[13, 6, 23, 4, 16],
[12, 8, 15, 18],
[3, 9, 14, 22, 19],
];
// Different scroll speeds per column for organic feel (seconds)
const SCROLL_DURATIONS = [26, 32, 22, 30, 28];
function IntegrationCard({ integration }: { integration: Integration }) {
return (
<>
{/* Semi-circle glow background */}
<div className="absolute inset-0 flex justify-center items-start overflow-visible">
<div
className="
w-[800px] h-[800px] rounded-full
bg-[radial-gradient(circle_at_center,rgba(0,0,0,0.15),transparent_70%)]
dark:bg-[radial-gradient(circle_at_center,rgba(255,255,255,0.15),transparent_70%)]
blur-3xl
pointer-events-none
"
style={{
zIndex: 0,
transform: "translateY(-20%)",
}}
/>
<div
className="w-[60px] h-[60px] sm:w-[80px] sm:h-[80px] md:w-[120px] md:h-[120px] lg:w-[140px] lg:h-[140px] rounded-[16px] sm:rounded-[20px] md:rounded-[24px] flex items-center justify-center shrink-0 select-none"
style={{
background: "linear-gradient(145deg, var(--card-from), var(--card-to))",
boxShadow: "inset 0 1px 0 0 var(--card-highlight), 0 4px 24px var(--card-shadow)",
}}
>
<Image
src={integration.icon}
alt={integration.name}
className="w-6 h-6 sm:w-7 sm:h-7 md:w-10 md:h-10 lg:w-12 lg:h-12 object-contain select-none pointer-events-none"
loading="lazy"
draggable={false}
width={48}
height={48}
/>
</div>
);
}
function ScrollingColumn({
cards,
scrollUp,
duration,
colIndex,
isEdge,
isEdgeAdjacent,
}: {
cards: number[];
scrollUp: boolean;
duration: number;
colIndex: number;
isEdge: boolean;
isEdgeAdjacent: boolean;
}) {
// Edge columns get a heavy vertical mask; edge-adjacent columns get a lighter one to smooth the transition
const columnMask = isEdge
? {
maskImage:
"linear-gradient(to bottom, transparent 0%, transparent 20%, black 40%, black 60%, transparent 80%, transparent 100%)",
WebkitMaskImage:
"linear-gradient(to bottom, transparent 0%, transparent 20%, black 40%, black 60%, transparent 80%, transparent 100%)",
}
: isEdgeAdjacent
? {
maskImage:
"linear-gradient(to bottom, transparent 0%, transparent 10%, black 30%, black 70%, transparent 90%, transparent 100%)",
WebkitMaskImage:
"linear-gradient(to bottom, transparent 0%, transparent 10%, black 30%, black 70%, transparent 90%, transparent 100%)",
}
: {};
const cardSet = cards.map((integrationIndex, i) => (
<IntegrationCard
key={`${INTEGRATIONS[integrationIndex].name}-c${colIndex}-${i}`}
integration={INTEGRATIONS[integrationIndex]}
/>
));
return (
<div
className="flex-shrink-0 overflow-hidden"
style={{ ...columnMask, contain: "layout style paint" }}
>
{/* Outer div has NO gap — each inner copy uses pb matching the gap so both halves are identical in height → seamless -50% loop */}
<div
className="flex flex-col"
style={{
animation: `${scrollUp ? "integrations-scroll-up" : "integrations-scroll-down"} ${duration}s linear infinite`,
willChange: "transform",
transform: "translateZ(0)",
}}
>
<div className="flex flex-col gap-2 sm:gap-3 md:gap-5 lg:gap-6 pb-2 sm:pb-3 md:pb-5 lg:pb-6">
{cardSet}
</div>
<div className="flex flex-col gap-2 sm:gap-3 md:gap-5 lg:gap-6 pb-2 sm:pb-3 md:pb-5 lg:pb-6">
{cardSet}
</div>
</div>
{/* Orbit icons */}
{Array.from({ length: count }).map((_, index) => {
const actualIndex = startIndex + index;
// Skip if we've run out of integrations
if (actualIndex >= INTEGRATIONS.length) return null;
const angle = (index / (count - 1)) * 180;
const x = radius * Math.cos((angle * Math.PI) / 180);
const y = radius * Math.sin((angle * Math.PI) / 180);
const integration = INTEGRATIONS[actualIndex];
// Tooltip positioning — above or below based on angle
const tooltipAbove = angle > 90;
return (
<div
key={index}
className="absolute flex flex-col items-center group"
style={{
left: `${centerX + x - iconSize / 2}px`,
top: `${centerY - y - iconSize / 2}px`,
zIndex: 5,
}}
>
<img
src={integration.icon}
alt={integration.name}
width={iconSize}
height={iconSize}
className="object-contain cursor-pointer transition-transform hover:scale-110"
style={{ minWidth: iconSize, minHeight: iconSize }} // fix accidental shrink
/>
{/* Tooltip */}
<div
className={`absolute ${
tooltipAbove ? "bottom-[calc(100%+8px)]" : "top-[calc(100%+8px)]"
} hidden group-hover:block w-auto min-w-max rounded-lg bg-black px-3 py-1.5 text-xs text-white shadow-lg text-center whitespace-nowrap`}
>
{integration.name}
<div
className={`absolute left-1/2 -translate-x-1/2 w-3 h-3 rotate-45 bg-black ${
tooltipAbove ? "top-full" : "bottom-full"
}`}
></div>
</div>
</div>
);
})}
</>
</div>
);
}
export default function ExternalIntegrations() {
const [size, setSize] = useState({ width: 0, height: 0 });
useEffect(() => {
const updateSize = () => setSize({ width: window.innerWidth, height: window.innerHeight });
updateSize();
window.addEventListener("resize", updateSize);
return () => window.removeEventListener("resize", updateSize);
}, []);
const baseWidth = Math.min(size.width * 0.8, 700);
const centerX = baseWidth / 2;
const centerY = baseWidth * 0.5;
const iconSize =
size.width < 480
? Math.max(24, baseWidth * 0.05)
: size.width < 768
? Math.max(28, baseWidth * 0.06)
: Math.max(32, baseWidth * 0.07);
return (
<section className="py-12 relative min-h-screen w-full overflow-visible">
<div className="relative flex flex-col items-center text-center z-10">
<h1 className="my-6 text-4xl font-bold lg:text-7xl">Integrations</h1>
<p className="mb-12 max-w-2xl text-gray-600 dark:text-gray-400 lg:text-xl">
Integrate with your team's most important tools
</p>
<section
className={[
"relative py-20 md:py-28 overflow-hidden",
// No explicit background — inherits the page gradient for seamless blending
// CSS custom properties — light mode (card styling)
"[--card-from:rgba(255,255,255,0.9)]",
"[--card-to:rgba(245,245,248,0.92)]",
"[--card-highlight:rgba(255,255,255,0.5)]",
"[--card-lowlight:transparent]",
"[--card-shadow:transparent]",
"[--card-border:transparent]",
// CSS custom properties — dark mode (card styling)
"dark:[--card-from:rgb(28,28,32)]",
"dark:[--card-to:rgb(28,28,32)]",
"dark:[--card-highlight:rgba(255,255,255,0.03)]",
"dark:[--card-lowlight:rgba(0,0,0,0.1)]",
"dark:[--card-shadow:rgba(0,0,0,0.15)]",
"dark:[--card-border:rgba(255,255,255,0.03)]",
].join(" ")}
>
{/* Heading */}
<div className="text-center mb-12 md:mb-16 relative z-20 px-4">
<h3 className="text-3xl sm:text-4xl md:text-5xl lg:text-6xl font-bold text-gray-900 dark:text-white leading-[1.1] tracking-tight">
Integrate with your
<br />
team&apos;s most important tools
</h3>
</div>
<div
className="relative overflow-visible"
style={{ width: baseWidth, height: baseWidth * 0.7, paddingBottom: "100px" }}
>
<SemiCircleOrbit
radius={baseWidth * 0.22}
centerX={centerX}
centerY={centerY}
count={5}
iconSize={iconSize}
startIndex={0}
/>
<SemiCircleOrbit
radius={baseWidth * 0.36}
centerX={centerX}
centerY={centerY}
count={6}
iconSize={iconSize}
startIndex={5}
/>
<SemiCircleOrbit
radius={baseWidth * 0.5}
centerX={centerX}
centerY={centerY}
count={8}
iconSize={iconSize}
startIndex={11}
/>
{/* Scrolling columns container — masked at edges so the page background shows through seamlessly */}
<div
className="relative"
style={
{
maskImage:
"linear-gradient(to bottom, transparent 0%, black 25%, black 70%, transparent 100%), " +
"linear-gradient(to right, transparent 0%, black 12%, black 88%, transparent 100%)",
WebkitMaskImage:
"linear-gradient(to bottom, transparent 0%, black 25%, black 75%, transparent 100%), " +
"linear-gradient(to right, transparent 0%, black 12%, black 88%, transparent 100%)",
maskComposite: "intersect",
WebkitMaskComposite: "source-in",
} as React.CSSProperties
}
>
{/* 5 scrolling columns */}
<div className="flex justify-center gap-2 sm:gap-3 md:gap-5 lg:gap-6 h-[340px] sm:h-[420px] md:h-[560px] lg:h-[640px] overflow-hidden">
{COLUMNS.map((column, colIndex) => (
<ScrollingColumn
key={`col-${SCROLL_DURATIONS[colIndex]}-${colIndex}`}
cards={column}
scrollUp={colIndex % 2 === 0}
duration={SCROLL_DURATIONS[colIndex]}
colIndex={colIndex}
isEdge={colIndex === 0 || colIndex === COLUMNS.length - 1}
isEdgeAdjacent={colIndex === 1 || colIndex === COLUMNS.length - 2}
/>
))}
</div>
</div>
</section>

View file

@ -1,4 +1,4 @@
import { FileJson, Loader2 } from "lucide-react";
import { FileJson } from "lucide-react";
import React from "react";
import { defaultStyles, JsonView } from "react-json-view-lite";
import { Button } from "@/components/ui/button";
@ -9,6 +9,7 @@ import {
DialogTitle,
DialogTrigger,
} from "@/components/ui/dialog";
import { Spinner } from "@/components/ui/spinner";
import "react-json-view-lite/dist/index.css";
interface JsonMetadataViewerProps {
@ -58,7 +59,7 @@ export function JsonMetadataViewer({
<div className="mt-2 sm:mt-4 p-2 sm:p-4 bg-muted/30 rounded-md text-xs sm:text-sm">
{loading ? (
<div className="flex items-center justify-center py-12">
<Loader2 className="h-8 w-8 animate-spin text-muted-foreground" />
<Spinner size="lg" className="text-muted-foreground" />
</div>
) : (
<JsonView data={jsonData} style={defaultStyles} />

View file

@ -6,6 +6,7 @@ interface SidebarContextValue {
isCollapsed: boolean;
setIsCollapsed: (collapsed: boolean) => void;
toggleCollapsed: () => void;
sidebarWidth: number;
}
const SidebarContext = createContext<SidebarContextValue | null>(null);

View file

@ -0,0 +1,101 @@
"use client";
import { useCallback, useEffect, useRef, useState } from "react";
const SIDEBAR_WIDTH_COOKIE_NAME = "sidebar_width";
const SIDEBAR_WIDTH_COOKIE_MAX_AGE = 60 * 60 * 24 * 365; // 1 year
export const SIDEBAR_MIN_WIDTH = 240;
export const SIDEBAR_MAX_WIDTH = 480;
interface UseSidebarResizeReturn {
sidebarWidth: number;
handleMouseDown: (e: React.MouseEvent) => void;
isDragging: boolean;
}
export function useSidebarResize(defaultWidth = SIDEBAR_MIN_WIDTH): UseSidebarResizeReturn {
const [sidebarWidth, setSidebarWidth] = useState(defaultWidth);
const [isDragging, setIsDragging] = useState(false);
const startXRef = useRef(0);
const startWidthRef = useRef(defaultWidth);
// Initialize from cookie on mount
useEffect(() => {
try {
const match = document.cookie.match(/(?:^|; )sidebar_width=([^;]+)/);
if (match) {
const parsed = Number(match[1]);
if (!Number.isNaN(parsed) && parsed >= SIDEBAR_MIN_WIDTH && parsed <= SIDEBAR_MAX_WIDTH) {
setSidebarWidth(parsed);
}
}
} catch {
// Ignore cookie read errors
}
}, []);
// Persist width to cookie
const persistWidth = useCallback((width: number) => {
try {
document.cookie = `${SIDEBAR_WIDTH_COOKIE_NAME}=${width}; path=/; max-age=${SIDEBAR_WIDTH_COOKIE_MAX_AGE}`;
} catch {
// Ignore cookie write errors
}
}, []);
const handleMouseDown = useCallback(
(e: React.MouseEvent) => {
e.preventDefault();
startXRef.current = e.clientX;
startWidthRef.current = sidebarWidth;
setIsDragging(true);
document.body.style.cursor = "col-resize";
document.body.style.userSelect = "none";
},
[sidebarWidth]
);
useEffect(() => {
if (!isDragging) return;
const handleMouseMove = (e: MouseEvent) => {
const delta = e.clientX - startXRef.current;
const newWidth = Math.min(
SIDEBAR_MAX_WIDTH,
Math.max(SIDEBAR_MIN_WIDTH, startWidthRef.current + delta)
);
setSidebarWidth(newWidth);
};
const handleMouseUp = () => {
setIsDragging(false);
document.body.style.cursor = "";
document.body.style.userSelect = "";
// Persist the final width
setSidebarWidth((currentWidth) => {
persistWidth(currentWidth);
return currentWidth;
});
};
document.addEventListener("mousemove", handleMouseMove);
document.addEventListener("mouseup", handleMouseUp);
return () => {
document.removeEventListener("mousemove", handleMouseMove);
document.removeEventListener("mouseup", handleMouseUp);
document.body.style.cursor = "";
document.body.style.userSelect = "";
};
}, [isDragging, persistWidth]);
return {
sidebarWidth,
handleMouseDown,
isDragging,
};
}

View file

@ -25,16 +25,14 @@ import { Input } from "@/components/ui/input";
import { isPageLimitExceededMetadata } from "@/contracts/types/inbox.types";
import { useInbox } from "@/hooks/use-inbox";
import { searchSpacesApiService } from "@/lib/apis/search-spaces-api.service";
import { deleteThread, fetchThreads, updateThread } from "@/lib/chat/thread-persistence";
import { logout } from "@/lib/auth-utils";
import { deleteThread, fetchThreads, updateThread } from "@/lib/chat/thread-persistence";
import { cleanupElectric } from "@/lib/electric/client";
import { resetUser, trackLogout } from "@/lib/posthog/events";
import { cacheKeys } from "@/lib/query-client/cache-keys";
import type { ChatItem, NavItem, SearchSpace } from "../types/layout.types";
import { CreateSearchSpaceDialog } from "../ui/dialogs";
import { LayoutShell } from "../ui/shell";
import { AllPrivateChatsSidebar } from "../ui/sidebar/AllPrivateChatsSidebar";
import { AllSharedChatsSidebar } from "../ui/sidebar/AllSharedChatsSidebar";
interface LayoutDataProviderProps {
searchSpaceId: string;
@ -390,7 +388,13 @@ export function LayoutDataProvider({
(item: NavItem) => {
// Handle inbox specially - toggle sidebar instead of navigating
if (item.url === "#inbox") {
setIsInboxSidebarOpen((prev) => !prev);
setIsInboxSidebarOpen((prev) => {
if (!prev) {
setIsAllSharedChatsSidebarOpen(false);
setIsAllPrivateChatsSidebarOpen(false);
}
return !prev;
});
return;
}
router.push(item.url);
@ -490,10 +494,14 @@ export function LayoutDataProvider({
const handleViewAllSharedChats = useCallback(() => {
setIsAllSharedChatsSidebarOpen(true);
setIsAllPrivateChatsSidebarOpen(false);
setIsInboxSidebarOpen(false);
}, []);
const handleViewAllPrivateChats = useCallback(() => {
setIsAllPrivateChatsSidebarOpen(true);
setIsAllSharedChatsSidebarOpen(false);
setIsInboxSidebarOpen(false);
}, []);
// Delete handlers
@ -614,6 +622,16 @@ export function LayoutDataProvider({
isDocked: isInboxDocked,
onDockedChange: setIsInboxDocked,
}}
allSharedChatsPanel={{
open: isAllSharedChatsSidebarOpen,
onOpenChange: setIsAllSharedChatsSidebarOpen,
searchSpaceId,
}}
allPrivateChatsPanel={{
open: isAllPrivateChatsSidebarOpen,
onOpenChange: setIsAllPrivateChatsSidebarOpen,
searchSpaceId,
}}
>
{children}
</LayoutShell>
@ -796,20 +814,6 @@ export function LayoutDataProvider({
</DialogContent>
</Dialog>
{/* All Shared Chats Sidebar */}
<AllSharedChatsSidebar
open={isAllSharedChatsSidebarOpen}
onOpenChange={setIsAllSharedChatsSidebarOpen}
searchSpaceId={searchSpaceId}
/>
{/* All Private Chats Sidebar */}
<AllPrivateChatsSidebar
open={isAllPrivateChatsSidebarOpen}
onOpenChange={setIsAllPrivateChatsSidebarOpen}
searchSpaceId={searchSpaceId}
/>
{/* Create Search Space Dialog */}
<CreateSearchSpaceDialog
open={isCreateSearchSpaceDialogOpen}

View file

@ -1,8 +1,8 @@
"use client";
import { Settings, Trash2, Users } from "lucide-react";
import { useCallback, useRef, useState } from "react";
import { useTranslations } from "next-intl";
import { useCallback, useRef, useState } from "react";
import {
ContextMenu,
ContextMenuContent,

View file

@ -6,10 +6,18 @@ import type { InboxItem } from "@/hooks/use-inbox";
import { useIsMobile } from "@/hooks/use-mobile";
import { cn } from "@/lib/utils";
import { SidebarProvider, useSidebarState } from "../../hooks";
import { useSidebarResize } from "../../hooks/useSidebarResize";
import type { ChatItem, NavItem, PageUsage, SearchSpace, User } from "../../types/layout.types";
import { Header } from "../header";
import { IconRail } from "../icon-rail";
import { InboxSidebar, MobileSidebar, MobileSidebarTrigger, Sidebar } from "../sidebar";
import {
AllPrivateChatsSidebar,
AllSharedChatsSidebar,
InboxSidebar,
MobileSidebar,
MobileSidebarTrigger,
Sidebar,
} from "../sidebar";
// Tab-specific data source props
interface TabDataSource {
@ -75,6 +83,17 @@ interface LayoutShellProps {
// Inbox props
inbox?: InboxProps;
isLoadingChats?: boolean;
// All chats panel props
allSharedChatsPanel?: {
open: boolean;
onOpenChange: (open: boolean) => void;
searchSpaceId: string;
};
allPrivateChatsPanel?: {
open: boolean;
onOpenChange: (open: boolean) => void;
searchSpaceId: string;
};
}
export function LayoutShell({
@ -112,15 +131,22 @@ export function LayoutShell({
className,
inbox,
isLoadingChats = false,
allSharedChatsPanel,
allPrivateChatsPanel,
}: LayoutShellProps) {
const isMobile = useIsMobile();
const [mobileMenuOpen, setMobileMenuOpen] = useState(false);
const { isCollapsed, setIsCollapsed, toggleCollapsed } = useSidebarState(defaultCollapsed);
const {
sidebarWidth,
handleMouseDown: onResizeMouseDown,
isDragging: isResizing,
} = useSidebarResize();
// Memoize context value to prevent unnecessary re-renders
const sidebarContextValue = useMemo(
() => ({ isCollapsed, setIsCollapsed, toggleCollapsed }),
[isCollapsed, setIsCollapsed, toggleCollapsed]
() => ({ isCollapsed, setIsCollapsed, toggleCollapsed, sidebarWidth }),
[isCollapsed, setIsCollapsed, toggleCollapsed, sidebarWidth]
);
// Mobile layout
@ -236,6 +262,9 @@ export function LayoutShell({
setTheme={setTheme}
className="hidden md:flex border-r shrink-0"
isLoadingChats={isLoadingChats}
sidebarWidth={sidebarWidth}
onResizeMouseDown={onResizeMouseDown}
isResizing={isResizing}
/>
{/* Docked Inbox Sidebar - renders as flex sibling between sidebar and content */}
@ -275,6 +304,24 @@ export function LayoutShell({
onDockedChange={inbox.onDockedChange}
/>
)}
{/* All Shared Chats - slide-out panel */}
{allSharedChatsPanel && (
<AllSharedChatsSidebar
open={allSharedChatsPanel.open}
onOpenChange={allSharedChatsPanel.onOpenChange}
searchSpaceId={allSharedChatsPanel.searchSpaceId}
/>
)}
{/* All Private Chats - slide-out panel */}
{allPrivateChatsPanel && (
<AllPrivateChatsSidebar
open={allPrivateChatsPanel.open}
onOpenChange={allPrivateChatsPanel.onOpenChange}
searchSpaceId={allPrivateChatsPanel.searchSpaceId}
/>
)}
</div>
</div>
</TooltipProvider>

View file

@ -12,11 +12,9 @@ import {
User,
X,
} from "lucide-react";
import { AnimatePresence, motion } from "motion/react";
import { useParams, useRouter } from "next/navigation";
import { useTranslations } from "next-intl";
import { useCallback, useEffect, useMemo, useState } from "react";
import { createPortal } from "react-dom";
import { toast } from "sonner";
import { Button } from "@/components/ui/button";
import {
@ -40,6 +38,7 @@ import {
updateThread,
} from "@/lib/chat/thread-persistence";
import { cn } from "@/lib/utils";
import { SidebarSlideOutPanel } from "./SidebarSlideOutPanel";
interface AllPrivateChatsSidebarProps {
open: boolean;
@ -69,16 +68,11 @@ export function AllPrivateChatsSidebar({
const [archivingThreadId, setArchivingThreadId] = useState<number | null>(null);
const [searchQuery, setSearchQuery] = useState("");
const [showArchived, setShowArchived] = useState(false);
const [mounted, setMounted] = useState(false);
const [openDropdownId, setOpenDropdownId] = useState<number | null>(null);
const debouncedSearchQuery = useDebouncedValue(searchQuery, 300);
const isSearchMode = !!debouncedSearchQuery.trim();
useEffect(() => {
setMounted(true);
}, []);
useEffect(() => {
const handleEscape = (e: KeyboardEvent) => {
if (e.key === "Escape" && open) {
@ -89,17 +83,6 @@ export function AllPrivateChatsSidebar({
return () => document.removeEventListener("keydown", handleEscape);
}, [open, onOpenChange]);
useEffect(() => {
if (open) {
document.body.style.overflow = "hidden";
} else {
document.body.style.overflow = "";
}
return () => {
document.body.style.overflow = "";
};
}, [open]);
const {
data: threadsData,
error: threadsError,
@ -214,248 +197,221 @@ export function AllPrivateChatsSidebar({
const activeCount = activeChats.length;
const archivedCount = archivedChats.length;
if (!mounted) return null;
return (
<SidebarSlideOutPanel
open={open}
onOpenChange={onOpenChange}
ariaLabel={t("chats") || "Private Chats"}
>
<div className="shrink-0 p-4 pb-2 space-y-3">
<div className="flex items-center gap-2">
<User className="h-5 w-5 text-primary" />
<h2 className="text-lg font-semibold">{t("chats") || "Private Chats"}</h2>
</div>
return createPortal(
<AnimatePresence>
{open && (
<>
<motion.div
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
exit={{ opacity: 0 }}
transition={{ duration: 0.2 }}
className="fixed inset-0 z-70 bg-black/50"
onClick={() => onOpenChange(false)}
aria-hidden="true"
<div className="relative">
<Search className="absolute left-3 top-1/2 -translate-y-1/2 h-4 w-4 text-muted-foreground" />
<Input
type="text"
placeholder={t("search_chats") || "Search chats..."}
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
className="pl-9 pr-8 h-9"
/>
{searchQuery && (
<Button
variant="ghost"
size="icon"
className="absolute right-1 top-1/2 -translate-y-1/2 h-6 w-6"
onClick={handleClearSearch}
>
<X className="h-3.5 w-3.5" />
<span className="sr-only">{t("clear_search") || "Clear search"}</span>
</Button>
)}
</div>
</div>
<motion.div
initial={{ x: "-100%" }}
animate={{ x: 0 }}
exit={{ x: "-100%" }}
transition={{ type: "tween", duration: 0.3, ease: "easeOut" }}
className="fixed inset-y-0 left-0 z-70 w-80 bg-background shadow-xl flex flex-col pointer-events-auto isolate"
role="dialog"
aria-modal="true"
aria-label={t("chats") || "Private Chats"}
>
<div className="shrink-0 p-4 pb-2 space-y-3">
<div className="flex items-center gap-2">
<User className="h-5 w-5 text-primary" />
<h2 className="text-lg font-semibold">{t("chats") || "Private Chats"}</h2>
</div>
<div className="relative">
<Search className="absolute left-3 top-1/2 -translate-y-1/2 h-4 w-4 text-muted-foreground" />
<Input
type="text"
placeholder={t("search_chats") || "Search chats..."}
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
className="pl-9 pr-8 h-9"
/>
{searchQuery && (
<Button
variant="ghost"
size="icon"
className="absolute right-1 top-1/2 -translate-y-1/2 h-6 w-6"
onClick={handleClearSearch}
>
<X className="h-3.5 w-3.5" />
<span className="sr-only">{t("clear_search") || "Clear search"}</span>
</Button>
)}
</div>
</div>
{!isSearchMode && (
<Tabs
value={showArchived ? "archived" : "active"}
onValueChange={(value) => setShowArchived(value === "archived")}
className="shrink-0 mx-4"
>
<TabsList className="w-full h-auto p-0 bg-transparent rounded-none border-b">
<TabsTrigger
value="active"
className="flex-1 rounded-none border-b-2 border-transparent px-1 py-2 text-xs font-medium data-[state=active]:border-primary data-[state=active]:bg-transparent data-[state=active]:shadow-none"
>
<span className="w-full inline-flex items-center justify-center gap-1.5 px-3 py-1.5 rounded-lg hover:bg-muted transition-colors">
<MessageCircleMore className="h-4 w-4" />
<span>Active</span>
<span className="inline-flex items-center justify-center min-w-5 h-5 px-1.5 rounded-full bg-primary/20 text-muted-foreground text-xs font-medium">
{activeCount}
</span>
</span>
</TabsTrigger>
<TabsTrigger
value="archived"
className="flex-1 rounded-none border-b-2 border-transparent px-1 py-2 text-xs font-medium data-[state=active]:border-primary data-[state=active]:bg-transparent data-[state=active]:shadow-none"
>
<span className="w-full inline-flex items-center justify-center gap-1.5 px-3 py-1.5 rounded-lg hover:bg-muted transition-colors">
<ArchiveIcon className="h-4 w-4" />
<span>Archived</span>
<span className="inline-flex items-center justify-center min-w-5 h-5 px-1.5 rounded-full bg-primary/20 text-muted-foreground text-xs font-medium">
{archivedCount}
</span>
</span>
</TabsTrigger>
</TabsList>
</Tabs>
)}
<div className="flex-1 overflow-y-auto overflow-x-hidden p-2">
{isLoading ? (
<div className="space-y-1">
{[75, 90, 55, 80, 65, 85].map((titleWidth, i) => (
<div
key={`skeleton-${i}`}
className="flex items-center gap-2 rounded-md px-2 py-1.5"
>
<Skeleton className="h-4 w-4 shrink-0 rounded" />
<Skeleton className="h-4 rounded" style={{ width: `${titleWidth}%` }} />
</div>
))}
</div>
) : error ? (
<div className="text-center py-8 text-sm text-destructive">
{t("error_loading_chats") || "Error loading chats"}
</div>
) : threads.length > 0 ? (
<div className="space-y-1">
{threads.map((thread) => {
const isDeleting = deletingThreadId === thread.id;
const isArchiving = archivingThreadId === thread.id;
const isBusy = isDeleting || isArchiving;
const isActive = currentChatId === thread.id;
return (
<div
key={thread.id}
className={cn(
"group flex items-center gap-2 rounded-md px-2 py-1.5 text-sm",
"hover:bg-accent hover:text-accent-foreground",
"transition-colors cursor-pointer",
isActive && "bg-accent text-accent-foreground",
isBusy && "opacity-50 pointer-events-none"
)}
>
{isMobile ? (
<button
type="button"
onClick={() => handleThreadClick(thread.id)}
disabled={isBusy}
className="flex items-center gap-2 flex-1 min-w-0 text-left overflow-hidden"
>
<MessageCircleMore className="h-4 w-4 shrink-0 text-muted-foreground" />
<span className="truncate">{thread.title || "New Chat"}</span>
</button>
) : (
<Tooltip>
<TooltipTrigger asChild>
<button
type="button"
onClick={() => handleThreadClick(thread.id)}
disabled={isBusy}
className="flex items-center gap-2 flex-1 min-w-0 text-left overflow-hidden"
>
<MessageCircleMore className="h-4 w-4 shrink-0 text-muted-foreground" />
<span className="truncate">{thread.title || "New Chat"}</span>
</button>
</TooltipTrigger>
<TooltipContent side="bottom" align="start">
<p>
{t("updated") || "Updated"}:{" "}
{format(new Date(thread.updatedAt), "MMM d, yyyy 'at' h:mm a")}
</p>
</TooltipContent>
</Tooltip>
)}
<DropdownMenu
open={openDropdownId === thread.id}
onOpenChange={(isOpen) => setOpenDropdownId(isOpen ? thread.id : null)}
>
<DropdownMenuTrigger asChild>
<Button
variant="ghost"
size="icon"
className={cn(
"h-6 w-6 shrink-0",
"md:opacity-0 md:group-hover:opacity-100 md:focus:opacity-100",
"transition-opacity"
)}
disabled={isBusy}
>
{isDeleting ? (
<Spinner size="xs" />
) : (
<MoreHorizontal className="h-3.5 w-3.5 text-muted-foreground" />
)}
<span className="sr-only">{t("more_options") || "More options"}</span>
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end" className="w-40 z-80">
<DropdownMenuItem
onClick={() => handleToggleArchive(thread.id, thread.archived)}
disabled={isArchiving}
>
{thread.archived ? (
<>
<RotateCcwIcon className="mr-2 h-4 w-4" />
<span>{t("unarchive") || "Restore"}</span>
</>
) : (
<>
<ArchiveIcon className="mr-2 h-4 w-4" />
<span>{t("archive") || "Archive"}</span>
</>
)}
</DropdownMenuItem>
<DropdownMenuSeparator />
<DropdownMenuItem
onClick={() => handleDeleteThread(thread.id)}
className="text-destructive focus:text-destructive"
>
<Trash2 className="mr-2 h-4 w-4" />
<span>{t("delete") || "Delete"}</span>
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</div>
);
})}
</div>
) : isSearchMode ? (
<div className="text-center py-8">
<Search className="h-12 w-12 mx-auto text-muted-foreground mb-3" />
<p className="text-sm text-muted-foreground">
{t("no_chats_found") || "No chats found"}
</p>
<p className="text-xs text-muted-foreground/70 mt-1">
{t("try_different_search") || "Try a different search term"}
</p>
</div>
) : (
<div className="text-center py-8">
<User className="h-12 w-12 mx-auto text-muted-foreground mb-3" />
<p className="text-sm text-muted-foreground">
{showArchived
? t("no_archived_chats") || "No archived chats"
: t("no_chats") || "No private chats"}
</p>
{!showArchived && (
<p className="text-xs text-muted-foreground/70 mt-1">
{t("start_new_chat_hint") || "Start a new chat from the chat page"}
</p>
)}
</div>
)}
</div>
</motion.div>
</>
{!isSearchMode && (
<Tabs
value={showArchived ? "archived" : "active"}
onValueChange={(value) => setShowArchived(value === "archived")}
className="shrink-0 mx-4"
>
<TabsList className="w-full h-auto p-0 bg-transparent rounded-none border-b">
<TabsTrigger
value="active"
className="flex-1 rounded-none border-b-2 border-transparent px-1 py-2 text-xs font-medium data-[state=active]:border-primary data-[state=active]:bg-transparent data-[state=active]:shadow-none"
>
<span className="w-full inline-flex items-center justify-center gap-1.5 px-3 py-1.5 rounded-lg hover:bg-muted transition-colors">
<MessageCircleMore className="h-4 w-4" />
<span>Active</span>
<span className="inline-flex items-center justify-center min-w-5 h-5 px-1.5 rounded-full bg-primary/20 text-muted-foreground text-xs font-medium">
{activeCount}
</span>
</span>
</TabsTrigger>
<TabsTrigger
value="archived"
className="flex-1 rounded-none border-b-2 border-transparent px-1 py-2 text-xs font-medium data-[state=active]:border-primary data-[state=active]:bg-transparent data-[state=active]:shadow-none"
>
<span className="w-full inline-flex items-center justify-center gap-1.5 px-3 py-1.5 rounded-lg hover:bg-muted transition-colors">
<ArchiveIcon className="h-4 w-4" />
<span>Archived</span>
<span className="inline-flex items-center justify-center min-w-5 h-5 px-1.5 rounded-full bg-primary/20 text-muted-foreground text-xs font-medium">
{archivedCount}
</span>
</span>
</TabsTrigger>
</TabsList>
</Tabs>
)}
</AnimatePresence>,
document.body
<div className="flex-1 overflow-y-auto overflow-x-hidden p-2">
{isLoading ? (
<div className="space-y-1">
{[75, 90, 55, 80, 65, 85].map((titleWidth, i) => (
<div key={`skeleton-${i}`} className="flex items-center gap-2 rounded-md px-2 py-1.5">
<Skeleton className="h-4 w-4 shrink-0 rounded" />
<Skeleton className="h-4 rounded" style={{ width: `${titleWidth}%` }} />
</div>
))}
</div>
) : error ? (
<div className="text-center py-8 text-sm text-destructive">
{t("error_loading_chats") || "Error loading chats"}
</div>
) : threads.length > 0 ? (
<div className="space-y-1">
{threads.map((thread) => {
const isDeleting = deletingThreadId === thread.id;
const isArchiving = archivingThreadId === thread.id;
const isBusy = isDeleting || isArchiving;
const isActive = currentChatId === thread.id;
return (
<div
key={thread.id}
className={cn(
"group flex items-center gap-2 rounded-md px-2 py-1.5 text-sm",
"hover:bg-accent hover:text-accent-foreground",
"transition-colors cursor-pointer",
isActive && "bg-accent text-accent-foreground",
isBusy && "opacity-50 pointer-events-none"
)}
>
{isMobile ? (
<button
type="button"
onClick={() => handleThreadClick(thread.id)}
disabled={isBusy}
className="flex items-center gap-2 flex-1 min-w-0 text-left overflow-hidden"
>
<MessageCircleMore className="h-4 w-4 shrink-0 text-muted-foreground" />
<span className="truncate">{thread.title || "New Chat"}</span>
</button>
) : (
<Tooltip>
<TooltipTrigger asChild>
<button
type="button"
onClick={() => handleThreadClick(thread.id)}
disabled={isBusy}
className="flex items-center gap-2 flex-1 min-w-0 text-left overflow-hidden"
>
<MessageCircleMore className="h-4 w-4 shrink-0 text-muted-foreground" />
<span className="truncate">{thread.title || "New Chat"}</span>
</button>
</TooltipTrigger>
<TooltipContent side="bottom" align="start">
<p>
{t("updated") || "Updated"}:{" "}
{format(new Date(thread.updatedAt), "MMM d, yyyy 'at' h:mm a")}
</p>
</TooltipContent>
</Tooltip>
)}
<DropdownMenu
open={openDropdownId === thread.id}
onOpenChange={(isOpen) => setOpenDropdownId(isOpen ? thread.id : null)}
>
<DropdownMenuTrigger asChild>
<Button
variant="ghost"
size="icon"
className={cn(
"h-6 w-6 shrink-0",
"md:opacity-0 md:group-hover:opacity-100 md:focus:opacity-100",
"transition-opacity"
)}
disabled={isBusy}
>
{isDeleting ? (
<Spinner size="xs" />
) : (
<MoreHorizontal className="h-3.5 w-3.5 text-muted-foreground" />
)}
<span className="sr-only">{t("more_options") || "More options"}</span>
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end" className="w-40 z-80">
<DropdownMenuItem
onClick={() => handleToggleArchive(thread.id, thread.archived)}
disabled={isArchiving}
>
{thread.archived ? (
<>
<RotateCcwIcon className="mr-2 h-4 w-4" />
<span>{t("unarchive") || "Restore"}</span>
</>
) : (
<>
<ArchiveIcon className="mr-2 h-4 w-4" />
<span>{t("archive") || "Archive"}</span>
</>
)}
</DropdownMenuItem>
<DropdownMenuSeparator />
<DropdownMenuItem
onClick={() => handleDeleteThread(thread.id)}
className="text-destructive focus:text-destructive"
>
<Trash2 className="mr-2 h-4 w-4" />
<span>{t("delete") || "Delete"}</span>
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</div>
);
})}
</div>
) : isSearchMode ? (
<div className="text-center py-8">
<Search className="h-12 w-12 mx-auto text-muted-foreground mb-3" />
<p className="text-sm text-muted-foreground">
{t("no_chats_found") || "No chats found"}
</p>
<p className="text-xs text-muted-foreground/70 mt-1">
{t("try_different_search") || "Try a different search term"}
</p>
</div>
) : (
<div className="text-center py-8">
<User className="h-12 w-12 mx-auto text-muted-foreground mb-3" />
<p className="text-sm text-muted-foreground">
{showArchived
? t("no_archived_chats") || "No archived chats"
: t("no_chats") || "No private chats"}
</p>
{!showArchived && (
<p className="text-xs text-muted-foreground/70 mt-1">
{t("start_new_chat_hint") || "Start a new chat from the chat page"}
</p>
)}
</div>
)}
</div>
</SidebarSlideOutPanel>
);
}

View file

@ -12,11 +12,9 @@ import {
Users,
X,
} from "lucide-react";
import { AnimatePresence, motion } from "motion/react";
import { useParams, useRouter } from "next/navigation";
import { useTranslations } from "next-intl";
import { useCallback, useEffect, useMemo, useState } from "react";
import { createPortal } from "react-dom";
import { toast } from "sonner";
import { Button } from "@/components/ui/button";
import {
@ -40,6 +38,7 @@ import {
updateThread,
} from "@/lib/chat/thread-persistence";
import { cn } from "@/lib/utils";
import { SidebarSlideOutPanel } from "./SidebarSlideOutPanel";
interface AllSharedChatsSidebarProps {
open: boolean;
@ -69,16 +68,11 @@ export function AllSharedChatsSidebar({
const [archivingThreadId, setArchivingThreadId] = useState<number | null>(null);
const [searchQuery, setSearchQuery] = useState("");
const [showArchived, setShowArchived] = useState(false);
const [mounted, setMounted] = useState(false);
const [openDropdownId, setOpenDropdownId] = useState<number | null>(null);
const debouncedSearchQuery = useDebouncedValue(searchQuery, 300);
const isSearchMode = !!debouncedSearchQuery.trim();
useEffect(() => {
setMounted(true);
}, []);
useEffect(() => {
const handleEscape = (e: KeyboardEvent) => {
if (e.key === "Escape" && open) {
@ -89,17 +83,6 @@ export function AllSharedChatsSidebar({
return () => document.removeEventListener("keydown", handleEscape);
}, [open, onOpenChange]);
useEffect(() => {
if (open) {
document.body.style.overflow = "hidden";
} else {
document.body.style.overflow = "";
}
return () => {
document.body.style.overflow = "";
};
}, [open]);
const {
data: threadsData,
error: threadsError,
@ -214,248 +197,221 @@ export function AllSharedChatsSidebar({
const activeCount = activeChats.length;
const archivedCount = archivedChats.length;
if (!mounted) return null;
return (
<SidebarSlideOutPanel
open={open}
onOpenChange={onOpenChange}
ariaLabel={t("shared_chats") || "Shared Chats"}
>
<div className="shrink-0 p-4 pb-2 space-y-3">
<div className="flex items-center gap-2">
<Users className="h-5 w-5 text-primary" />
<h2 className="text-lg font-semibold">{t("shared_chats") || "Shared Chats"}</h2>
</div>
return createPortal(
<AnimatePresence>
{open && (
<>
<motion.div
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
exit={{ opacity: 0 }}
transition={{ duration: 0.2 }}
className="fixed inset-0 z-70 bg-black/50"
onClick={() => onOpenChange(false)}
aria-hidden="true"
<div className="relative">
<Search className="absolute left-3 top-1/2 -translate-y-1/2 h-4 w-4 text-muted-foreground" />
<Input
type="text"
placeholder={t("search_chats") || "Search chats..."}
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
className="pl-9 pr-8 h-9"
/>
{searchQuery && (
<Button
variant="ghost"
size="icon"
className="absolute right-1 top-1/2 -translate-y-1/2 h-6 w-6"
onClick={handleClearSearch}
>
<X className="h-3.5 w-3.5" />
<span className="sr-only">{t("clear_search") || "Clear search"}</span>
</Button>
)}
</div>
</div>
<motion.div
initial={{ x: "-100%" }}
animate={{ x: 0 }}
exit={{ x: "-100%" }}
transition={{ type: "tween", duration: 0.3, ease: "easeOut" }}
className="fixed inset-y-0 left-0 z-70 w-80 bg-background shadow-xl flex flex-col pointer-events-auto isolate"
role="dialog"
aria-modal="true"
aria-label={t("shared_chats") || "Shared Chats"}
>
<div className="shrink-0 p-4 pb-2 space-y-3">
<div className="flex items-center gap-2">
<Users className="h-5 w-5 text-primary" />
<h2 className="text-lg font-semibold">{t("shared_chats") || "Shared Chats"}</h2>
</div>
<div className="relative">
<Search className="absolute left-3 top-1/2 -translate-y-1/2 h-4 w-4 text-muted-foreground" />
<Input
type="text"
placeholder={t("search_chats") || "Search chats..."}
value={searchQuery}
onChange={(e) => setSearchQuery(e.target.value)}
className="pl-9 pr-8 h-9"
/>
{searchQuery && (
<Button
variant="ghost"
size="icon"
className="absolute right-1 top-1/2 -translate-y-1/2 h-6 w-6"
onClick={handleClearSearch}
>
<X className="h-3.5 w-3.5" />
<span className="sr-only">{t("clear_search") || "Clear search"}</span>
</Button>
)}
</div>
</div>
{!isSearchMode && (
<Tabs
value={showArchived ? "archived" : "active"}
onValueChange={(value) => setShowArchived(value === "archived")}
className="shrink-0 mx-4"
>
<TabsList className="w-full h-auto p-0 bg-transparent rounded-none border-b">
<TabsTrigger
value="active"
className="flex-1 rounded-none border-b-2 border-transparent px-1 py-2 text-xs font-medium data-[state=active]:border-primary data-[state=active]:bg-transparent data-[state=active]:shadow-none"
>
<span className="w-full inline-flex items-center justify-center gap-1.5 px-3 py-1.5 rounded-lg hover:bg-muted transition-colors">
<MessageCircleMore className="h-4 w-4" />
<span>Active</span>
<span className="inline-flex items-center justify-center min-w-5 h-5 px-1.5 rounded-full bg-primary/20 text-muted-foreground text-xs font-medium">
{activeCount}
</span>
</span>
</TabsTrigger>
<TabsTrigger
value="archived"
className="flex-1 rounded-none border-b-2 border-transparent px-1 py-2 text-xs font-medium data-[state=active]:border-primary data-[state=active]:bg-transparent data-[state=active]:shadow-none"
>
<span className="w-full inline-flex items-center justify-center gap-1.5 px-3 py-1.5 rounded-lg hover:bg-muted transition-colors">
<ArchiveIcon className="h-4 w-4" />
<span>Archived</span>
<span className="inline-flex items-center justify-center min-w-5 h-5 px-1.5 rounded-full bg-primary/20 text-muted-foreground text-xs font-medium">
{archivedCount}
</span>
</span>
</TabsTrigger>
</TabsList>
</Tabs>
)}
<div className="flex-1 overflow-y-auto overflow-x-hidden p-2">
{isLoading ? (
<div className="space-y-1">
{[75, 90, 55, 80, 65, 85].map((titleWidth, i) => (
<div
key={`skeleton-${i}`}
className="flex items-center gap-2 rounded-md px-2 py-1.5"
>
<Skeleton className="h-4 w-4 shrink-0 rounded" />
<Skeleton className="h-4 rounded" style={{ width: `${titleWidth}%` }} />
</div>
))}
</div>
) : error ? (
<div className="text-center py-8 text-sm text-destructive">
{t("error_loading_chats") || "Error loading chats"}
</div>
) : threads.length > 0 ? (
<div className="space-y-1">
{threads.map((thread) => {
const isDeleting = deletingThreadId === thread.id;
const isArchiving = archivingThreadId === thread.id;
const isBusy = isDeleting || isArchiving;
const isActive = currentChatId === thread.id;
return (
<div
key={thread.id}
className={cn(
"group flex items-center gap-2 rounded-md px-2 py-1.5 text-sm",
"hover:bg-accent hover:text-accent-foreground",
"transition-colors cursor-pointer",
isActive && "bg-accent text-accent-foreground",
isBusy && "opacity-50 pointer-events-none"
)}
>
{isMobile ? (
<button
type="button"
onClick={() => handleThreadClick(thread.id)}
disabled={isBusy}
className="flex items-center gap-2 flex-1 min-w-0 text-left overflow-hidden"
>
<MessageCircleMore className="h-4 w-4 shrink-0 text-muted-foreground" />
<span className="truncate">{thread.title || "New Chat"}</span>
</button>
) : (
<Tooltip>
<TooltipTrigger asChild>
<button
type="button"
onClick={() => handleThreadClick(thread.id)}
disabled={isBusy}
className="flex items-center gap-2 flex-1 min-w-0 text-left overflow-hidden"
>
<MessageCircleMore className="h-4 w-4 shrink-0 text-muted-foreground" />
<span className="truncate">{thread.title || "New Chat"}</span>
</button>
</TooltipTrigger>
<TooltipContent side="bottom" align="start">
<p>
{t("updated") || "Updated"}:{" "}
{format(new Date(thread.updatedAt), "MMM d, yyyy 'at' h:mm a")}
</p>
</TooltipContent>
</Tooltip>
)}
<DropdownMenu
open={openDropdownId === thread.id}
onOpenChange={(isOpen) => setOpenDropdownId(isOpen ? thread.id : null)}
>
<DropdownMenuTrigger asChild>
<Button
variant="ghost"
size="icon"
className={cn(
"h-6 w-6 shrink-0",
"md:opacity-0 md:group-hover:opacity-100 md:focus:opacity-100",
"transition-opacity"
)}
disabled={isBusy}
>
{isDeleting ? (
<Spinner size="xs" />
) : (
<MoreHorizontal className="h-3.5 w-3.5 text-muted-foreground" />
)}
<span className="sr-only">{t("more_options") || "More options"}</span>
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end" className="w-40 z-80">
<DropdownMenuItem
onClick={() => handleToggleArchive(thread.id, thread.archived)}
disabled={isArchiving}
>
{thread.archived ? (
<>
<RotateCcwIcon className="mr-2 h-4 w-4" />
<span>{t("unarchive") || "Restore"}</span>
</>
) : (
<>
<ArchiveIcon className="mr-2 h-4 w-4" />
<span>{t("archive") || "Archive"}</span>
</>
)}
</DropdownMenuItem>
<DropdownMenuSeparator />
<DropdownMenuItem
onClick={() => handleDeleteThread(thread.id)}
className="text-destructive focus:text-destructive"
>
<Trash2 className="mr-2 h-4 w-4" />
<span>{t("delete") || "Delete"}</span>
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</div>
);
})}
</div>
) : isSearchMode ? (
<div className="text-center py-8">
<Search className="h-12 w-12 mx-auto text-muted-foreground mb-3" />
<p className="text-sm text-muted-foreground">
{t("no_chats_found") || "No chats found"}
</p>
<p className="text-xs text-muted-foreground/70 mt-1">
{t("try_different_search") || "Try a different search term"}
</p>
</div>
) : (
<div className="text-center py-8">
<Users className="h-12 w-12 mx-auto text-muted-foreground mb-3" />
<p className="text-sm text-muted-foreground">
{showArchived
? t("no_archived_chats") || "No archived chats"
: t("no_shared_chats") || "No shared chats"}
</p>
{!showArchived && (
<p className="text-xs text-muted-foreground/70 mt-1">
Share a chat to collaborate with your team
</p>
)}
</div>
)}
</div>
</motion.div>
</>
{!isSearchMode && (
<Tabs
value={showArchived ? "archived" : "active"}
onValueChange={(value) => setShowArchived(value === "archived")}
className="shrink-0 mx-4"
>
<TabsList className="w-full h-auto p-0 bg-transparent rounded-none border-b">
<TabsTrigger
value="active"
className="flex-1 rounded-none border-b-2 border-transparent px-1 py-2 text-xs font-medium data-[state=active]:border-primary data-[state=active]:bg-transparent data-[state=active]:shadow-none"
>
<span className="w-full inline-flex items-center justify-center gap-1.5 px-3 py-1.5 rounded-lg hover:bg-muted transition-colors">
<MessageCircleMore className="h-4 w-4" />
<span>Active</span>
<span className="inline-flex items-center justify-center min-w-5 h-5 px-1.5 rounded-full bg-primary/20 text-muted-foreground text-xs font-medium">
{activeCount}
</span>
</span>
</TabsTrigger>
<TabsTrigger
value="archived"
className="flex-1 rounded-none border-b-2 border-transparent px-1 py-2 text-xs font-medium data-[state=active]:border-primary data-[state=active]:bg-transparent data-[state=active]:shadow-none"
>
<span className="w-full inline-flex items-center justify-center gap-1.5 px-3 py-1.5 rounded-lg hover:bg-muted transition-colors">
<ArchiveIcon className="h-4 w-4" />
<span>Archived</span>
<span className="inline-flex items-center justify-center min-w-5 h-5 px-1.5 rounded-full bg-primary/20 text-muted-foreground text-xs font-medium">
{archivedCount}
</span>
</span>
</TabsTrigger>
</TabsList>
</Tabs>
)}
</AnimatePresence>,
document.body
<div className="flex-1 overflow-y-auto overflow-x-hidden p-2">
{isLoading ? (
<div className="space-y-1">
{[75, 90, 55, 80, 65, 85].map((titleWidth, i) => (
<div key={`skeleton-${i}`} className="flex items-center gap-2 rounded-md px-2 py-1.5">
<Skeleton className="h-4 w-4 shrink-0 rounded" />
<Skeleton className="h-4 rounded" style={{ width: `${titleWidth}%` }} />
</div>
))}
</div>
) : error ? (
<div className="text-center py-8 text-sm text-destructive">
{t("error_loading_chats") || "Error loading chats"}
</div>
) : threads.length > 0 ? (
<div className="space-y-1">
{threads.map((thread) => {
const isDeleting = deletingThreadId === thread.id;
const isArchiving = archivingThreadId === thread.id;
const isBusy = isDeleting || isArchiving;
const isActive = currentChatId === thread.id;
return (
<div
key={thread.id}
className={cn(
"group flex items-center gap-2 rounded-md px-2 py-1.5 text-sm",
"hover:bg-accent hover:text-accent-foreground",
"transition-colors cursor-pointer",
isActive && "bg-accent text-accent-foreground",
isBusy && "opacity-50 pointer-events-none"
)}
>
{isMobile ? (
<button
type="button"
onClick={() => handleThreadClick(thread.id)}
disabled={isBusy}
className="flex items-center gap-2 flex-1 min-w-0 text-left overflow-hidden"
>
<MessageCircleMore className="h-4 w-4 shrink-0 text-muted-foreground" />
<span className="truncate">{thread.title || "New Chat"}</span>
</button>
) : (
<Tooltip>
<TooltipTrigger asChild>
<button
type="button"
onClick={() => handleThreadClick(thread.id)}
disabled={isBusy}
className="flex items-center gap-2 flex-1 min-w-0 text-left overflow-hidden"
>
<MessageCircleMore className="h-4 w-4 shrink-0 text-muted-foreground" />
<span className="truncate">{thread.title || "New Chat"}</span>
</button>
</TooltipTrigger>
<TooltipContent side="bottom" align="start">
<p>
{t("updated") || "Updated"}:{" "}
{format(new Date(thread.updatedAt), "MMM d, yyyy 'at' h:mm a")}
</p>
</TooltipContent>
</Tooltip>
)}
<DropdownMenu
open={openDropdownId === thread.id}
onOpenChange={(isOpen) => setOpenDropdownId(isOpen ? thread.id : null)}
>
<DropdownMenuTrigger asChild>
<Button
variant="ghost"
size="icon"
className={cn(
"h-6 w-6 shrink-0",
"md:opacity-0 md:group-hover:opacity-100 md:focus:opacity-100",
"transition-opacity"
)}
disabled={isBusy}
>
{isDeleting ? (
<Spinner size="xs" />
) : (
<MoreHorizontal className="h-3.5 w-3.5 text-muted-foreground" />
)}
<span className="sr-only">{t("more_options") || "More options"}</span>
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent align="end" className="w-40 z-80">
<DropdownMenuItem
onClick={() => handleToggleArchive(thread.id, thread.archived)}
disabled={isArchiving}
>
{thread.archived ? (
<>
<RotateCcwIcon className="mr-2 h-4 w-4" />
<span>{t("unarchive") || "Restore"}</span>
</>
) : (
<>
<ArchiveIcon className="mr-2 h-4 w-4" />
<span>{t("archive") || "Archive"}</span>
</>
)}
</DropdownMenuItem>
<DropdownMenuSeparator />
<DropdownMenuItem
onClick={() => handleDeleteThread(thread.id)}
className="text-destructive focus:text-destructive"
>
<Trash2 className="mr-2 h-4 w-4" />
<span>{t("delete") || "Delete"}</span>
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
</div>
);
})}
</div>
) : isSearchMode ? (
<div className="text-center py-8">
<Search className="h-12 w-12 mx-auto text-muted-foreground mb-3" />
<p className="text-sm text-muted-foreground">
{t("no_chats_found") || "No chats found"}
</p>
<p className="text-xs text-muted-foreground/70 mt-1">
{t("try_different_search") || "Try a different search term"}
</p>
</div>
) : (
<div className="text-center py-8">
<Users className="h-12 w-12 mx-auto text-muted-foreground mb-3" />
<p className="text-sm text-muted-foreground">
{showArchived
? t("no_archived_chats") || "No archived chats"
: t("no_shared_chats") || "No shared chats"}
</p>
{!showArchived && (
<p className="text-xs text-muted-foreground/70 mt-1">
Share a chat to collaborate with your team
</p>
)}
</div>
)}
</div>
</SidebarSlideOutPanel>
);
}

View file

@ -19,7 +19,6 @@ import {
Search,
X,
} from "lucide-react";
import { AnimatePresence, motion } from "motion/react";
import { useParams, useRouter } from "next/navigation";
import { useTranslations } from "next-intl";
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
@ -53,17 +52,13 @@ import {
isNewMentionMetadata,
isPageLimitExceededMetadata,
} from "@/contracts/types/inbox.types";
import type { InboxItem } from "@/hooks/use-inbox";
import { useDebouncedValue } from "@/hooks/use-debounced-value";
import type { InboxItem } from "@/hooks/use-inbox";
import { useMediaQuery } from "@/hooks/use-media-query";
import { notificationsApiService } from "@/lib/apis/notifications-api.service";
import { cacheKeys } from "@/lib/query-client/cache-keys";
import { cn } from "@/lib/utils";
import { useSidebarContextSafe } from "../../hooks";
// Sidebar width constants
const SIDEBAR_COLLAPSED_WIDTH = 60;
const SIDEBAR_EXPANDED_WIDTH = 240;
import { SidebarSlideOutPanel } from "./SidebarSlideOutPanel";
/**
* Get initials from name or email for avatar fallback
@ -561,13 +556,6 @@ export function InboxSidebar({
};
};
// Get sidebar collapsed state from context (provided by LayoutShell)
const sidebarContext = useSidebarContextSafe();
const isCollapsed = sidebarContext?.isCollapsed ?? false;
// Calculate the left position for the inbox panel (relative to sidebar)
const sidebarWidth = isCollapsed ? SIDEBAR_COLLAPSED_WIDTH : SIDEBAR_EXPANDED_WIDTH;
if (!mounted) return null;
// Shared content component for both docked and floating modes
@ -1126,49 +1114,8 @@ export function InboxSidebar({
// FLOATING MODE: Render with animation and click-away layer
return (
<AnimatePresence>
{open && (
<>
{/* Click-away layer - only covers the content area, not the sidebar */}
<motion.div
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
exit={{ opacity: 0 }}
transition={{ duration: 0.15 }}
style={{
left: isMobile ? 0 : sidebarWidth,
}}
className="absolute inset-y-0 right-0"
onClick={() => onOpenChange(false)}
aria-hidden="true"
/>
{/* Clip container - positioned at sidebar edge with overflow hidden */}
<div
style={{
left: isMobile ? 0 : sidebarWidth,
width: isMobile ? "100%" : 360,
}}
className={cn("absolute z-10 overflow-hidden pointer-events-none", "inset-y-0")}
>
<motion.div
initial={{ x: "-100%" }}
animate={{ x: 0 }}
exit={{ x: "-100%" }}
transition={{ type: "tween", duration: 0.2, ease: [0.4, 0, 0.2, 1] }}
className={cn(
"h-full w-full bg-background flex flex-col pointer-events-auto",
"sm:border-r sm:shadow-xl"
)}
role="dialog"
aria-modal="true"
aria-label={t("inbox") || "Inbox"}
>
{inboxContent}
</motion.div>
</div>
</>
)}
</AnimatePresence>
<SidebarSlideOutPanel open={open} onOpenChange={onOpenChange} ariaLabel={t("inbox") || "Inbox"}>
{inboxContent}
</SidebarSlideOutPanel>
);
}

View file

@ -6,6 +6,7 @@ import { Button } from "@/components/ui/button";
import { Skeleton } from "@/components/ui/skeleton";
import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip";
import { cn } from "@/lib/utils";
import { SIDEBAR_MIN_WIDTH } from "../../hooks/useSidebarResize";
import type { ChatItem, NavItem, PageUsage, SearchSpace, User } from "../../types/layout.types";
import { ChatListItem } from "./ChatListItem";
import { NavSection } from "./NavSection";
@ -51,6 +52,9 @@ interface SidebarProps {
className?: string;
isLoadingChats?: boolean;
disableTooltips?: boolean;
sidebarWidth?: number;
onResizeMouseDown?: (e: React.MouseEvent) => void;
isResizing?: boolean;
}
export function Sidebar({
@ -80,17 +84,29 @@ export function Sidebar({
className,
isLoadingChats = false,
disableTooltips = false,
sidebarWidth = SIDEBAR_MIN_WIDTH,
onResizeMouseDown,
isResizing = false,
}: SidebarProps) {
const t = useTranslations("sidebar");
return (
<div
className={cn(
"flex h-full flex-col bg-sidebar text-sidebar-foreground transition-all duration-200 overflow-hidden",
isCollapsed ? "w-[60px]" : "w-[240px]",
"relative flex h-full flex-col bg-sidebar text-sidebar-foreground overflow-hidden",
isCollapsed ? "w-[60px] transition-all duration-200" : "",
!isCollapsed && !isResizing ? "transition-all duration-200" : "",
className
)}
style={!isCollapsed ? { width: sidebarWidth } : undefined}
>
{/* Resize handle on right border */}
{!isCollapsed && onResizeMouseDown && (
<div
onMouseDown={onResizeMouseDown}
className="absolute right-0 top-0 h-full w-1 cursor-col-resize hover:bg-border active:bg-border z-10"
/>
)}
{/* Header - search space name or collapse button when collapsed */}
{isCollapsed ? (
<div className="flex h-14 shrink-0 items-center justify-center border-b">

View file

@ -0,0 +1,82 @@
"use client";
import { AnimatePresence, motion } from "motion/react";
import { useMediaQuery } from "@/hooks/use-media-query";
import { cn } from "@/lib/utils";
import { useSidebarContextSafe } from "../../hooks";
const SIDEBAR_COLLAPSED_WIDTH = 60;
interface SidebarSlideOutPanelProps {
open: boolean;
onOpenChange: (open: boolean) => void;
ariaLabel: string;
width?: number;
children: React.ReactNode;
}
/**
* Reusable slide-out panel that appears from the right edge of the sidebar.
* Used by InboxSidebar (floating mode), AllSharedChatsSidebar, and AllPrivateChatsSidebar.
*
* Must be rendered inside a positioned container (the LayoutShell's relative flex container)
* and within the SidebarProvider context.
*/
export function SidebarSlideOutPanel({
open,
onOpenChange,
ariaLabel,
width = 360,
children,
}: SidebarSlideOutPanelProps) {
const isMobile = !useMediaQuery("(min-width: 640px)");
const sidebarContext = useSidebarContextSafe();
const isCollapsed = sidebarContext?.isCollapsed ?? false;
const sidebarWidth = isCollapsed
? SIDEBAR_COLLAPSED_WIDTH
: (sidebarContext?.sidebarWidth ?? 240);
return (
<AnimatePresence>
{open && (
<>
{/* Click-away layer - covers the full container including the sidebar */}
<motion.div
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
exit={{ opacity: 0 }}
transition={{ duration: 0.15 }}
className="absolute inset-0 z-[5]"
onClick={() => onOpenChange(false)}
aria-hidden="true"
/>
{/* Clip container - positioned at sidebar edge with overflow hidden */}
<div
style={{
left: isMobile ? 0 : sidebarWidth,
width: isMobile ? "100%" : width,
}}
className={cn("absolute z-10 overflow-hidden pointer-events-none", "inset-y-0")}
>
<motion.div
initial={{ x: "-100%" }}
animate={{ x: 0 }}
exit={{ x: "-100%" }}
transition={{ type: "tween", duration: 0.2, ease: [0.4, 0, 0.2, 1] }}
className={cn(
"h-full w-full bg-background flex flex-col pointer-events-auto",
"sm:border-r sm:shadow-xl"
)}
role="dialog"
aria-modal="true"
aria-label={ariaLabel}
>
{children}
</motion.div>
</div>
</>
)}
</AnimatePresence>
);
}

View file

@ -1,16 +1,6 @@
"use client";
import {
Check,
ChevronUp,
Languages,
Laptop,
Loader2,
LogOut,
Moon,
Settings,
Sun,
} from "lucide-react";
import { Check, ChevronUp, Languages, Laptop, LogOut, Moon, Settings, Sun } from "lucide-react";
import { useTranslations } from "next-intl";
import { useState } from "react";
import {
@ -25,6 +15,7 @@ import {
DropdownMenuSubTrigger,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu";
import { Spinner } from "@/components/ui/spinner";
import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip";
import { useLocaleContext } from "@/contexts/LocaleContext";
import { cn } from "@/lib/utils";
@ -266,7 +257,7 @@ export function SidebarUserProfile({
<DropdownMenuItem onClick={handleLogout} disabled={isLoggingOut}>
{isLoggingOut ? (
<Loader2 className="mr-2 h-4 w-4 animate-spin" />
<Spinner size="sm" className="mr-2" />
) : (
<LogOut className="mr-2 h-4 w-4" />
)}
@ -388,7 +379,7 @@ export function SidebarUserProfile({
<DropdownMenuItem onClick={handleLogout} disabled={isLoggingOut}>
{isLoggingOut ? (
<Loader2 className="mr-2 h-4 w-4 animate-spin" />
<Spinner size="sm" className="mr-2" />
) : (
<LogOut className="mr-2 h-4 w-4" />
)}

View file

@ -1,10 +1,11 @@
"use client";
import { Copy, Loader2 } from "lucide-react";
import { Copy } from "lucide-react";
import { useRouter, useSearchParams } from "next/navigation";
import { useCallback, useEffect, useRef, useState } from "react";
import { toast } from "sonner";
import { Button } from "@/components/ui/button";
import { Spinner } from "@/components/ui/spinner";
import { publicChatApiService } from "@/lib/apis/public-chat-api.service";
import { getBearerToken } from "@/lib/auth-utils";
@ -61,9 +62,14 @@ export function PublicChatFooter({ shareToken }: PublicChatFooterProps) {
};
return (
<div className="mx-auto flex max-w-(--thread-max-width) items-center justify-center px-4 py-4">
<Button size="lg" onClick={handleCopyAndContinue} disabled={isCloning} className="gap-2">
{isCloning ? <Loader2 className="size-4 animate-spin" /> : <Copy className="size-4" />}
<div className="fixed bottom-6 left-1/2 z-50 -translate-x-1/2">
<Button
size="lg"
onClick={handleCopyAndContinue}
disabled={isCloning}
className="gap-2 rounded-full px-6 shadow-lg transition-all duration-200 hover:scale-[1.02] hover:shadow-xl hover:brightness-110 hover:bg-primary"
>
{isCloning ? <Spinner size="sm" /> : <Copy className="size-4" />}
Copy and continue this chat
</Button>
</div>

View file

@ -1,12 +1,12 @@
"use client";
import { AssistantRuntimeProvider } from "@assistant-ui/react";
import { Loader2 } from "lucide-react";
import { Navbar } from "@/components/homepage/navbar";
import { DisplayImageToolUI } from "@/components/tool-ui/display-image";
import { GeneratePodcastToolUI } from "@/components/tool-ui/generate-podcast";
import { LinkPreviewToolUI } from "@/components/tool-ui/link-preview";
import { ScrapeWebpageToolUI } from "@/components/tool-ui/scrape-webpage";
import { Spinner } from "@/components/ui/spinner";
import { usePublicChat } from "@/hooks/use-public-chat";
import { usePublicChatRuntime } from "@/hooks/use-public-chat-runtime";
import { PublicChatFooter } from "./public-chat-footer";
@ -26,7 +26,7 @@ export function PublicChatView({ shareToken }: PublicChatViewProps) {
<main className="min-h-screen bg-linear-to-b from-gray-50 to-gray-100 text-gray-900 dark:from-black dark:to-gray-900 dark:text-white overflow-x-hidden">
<Navbar />
<div className="flex h-screen items-center justify-center">
<Loader2 className="size-8 animate-spin text-muted-foreground" />
<Spinner size="lg" className="text-muted-foreground" />
</div>
</main>
);

View file

@ -71,8 +71,8 @@ import { Skeleton } from "@/components/ui/skeleton";
import { Spinner } from "@/components/ui/spinner";
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip";
import {
IMAGE_GEN_PROVIDERS,
getImageGenModelsByProvider,
IMAGE_GEN_PROVIDERS,
} from "@/contracts/enums/image-gen-providers";
import type { ImageGenerationConfig } from "@/contracts/types/new-llm-config.types";
import { cn } from "@/lib/utils";

View file

@ -1,7 +1,7 @@
"use client";
import * as CheckboxPrimitive from "@radix-ui/react-checkbox";
import { CheckIcon } from "lucide-react";
import { CheckIcon, MinusIcon } from "lucide-react";
import type * as React from "react";
import { cn } from "@/lib/utils";
@ -11,16 +11,17 @@ function Checkbox({ className, ...props }: React.ComponentProps<typeof CheckboxP
<CheckboxPrimitive.Root
data-slot="checkbox"
className={cn(
"peer border-input data-[state=checked]:bg-primary data-[state=checked]:text-primary-foreground data-[state=checked]:border-primary focus-visible:border-ring focus-visible:ring-ring/50 aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive size-4 shrink-0 rounded-[4px] border shadow-xs transition-shadow outline-none focus-visible:ring-[3px] disabled:cursor-not-allowed disabled:opacity-50",
"peer border-input data-[state=checked]:bg-primary data-[state=checked]:text-primary-foreground data-[state=checked]:border-primary data-[state=indeterminate]:bg-transparent data-[state=indeterminate]:text-foreground data-[state=indeterminate]:border-foreground focus-visible:border-ring focus-visible:ring-ring/50 aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive size-4 shrink-0 rounded-[4px] border shadow-xs transition-shadow outline-none focus-visible:ring-[3px] disabled:cursor-not-allowed disabled:opacity-50",
className
)}
{...props}
>
<CheckboxPrimitive.Indicator
data-slot="checkbox-indicator"
className="flex items-center justify-center text-current transition-none"
className="group flex items-center justify-center text-current transition-none"
>
<CheckIcon className="size-3.5" />
<CheckIcon className="size-3.5 hidden group-data-[state=checked]:block" />
<MinusIcon className="size-3.5 hidden group-data-[state=indeterminate]:block" />
</CheckboxPrimitive.Indicator>
</CheckboxPrimitive.Root>
);

View file

@ -1,4 +1,4 @@
import { IconLinkPlus, IconUsersGroup } from "@tabler/icons-react";
import { IconUsersGroup } from "@tabler/icons-react";
import {
BookOpen,
File,
@ -15,11 +15,16 @@ import { EnumConnectorName } from "./connector";
export const getConnectorIcon = (connectorType: EnumConnectorName | string, className?: string) => {
const iconProps = { className: className || "h-4 w-4" };
const imgProps = { className: className || "h-5 w-5", width: 20, height: 20 };
const imgProps = {
className: `${className || "h-5 w-5"} select-none pointer-events-none`,
width: 20,
height: 20,
draggable: false as const,
};
switch (connectorType) {
case EnumConnectorName.LINKUP_API:
return <IconLinkPlus {...iconProps} />;
return <Image src="/connectors/linkup.svg" alt="Linkup" {...imgProps} />;
case EnumConnectorName.LINEAR_CONNECTOR:
return <Image src="/connectors/linear.svg" alt="Linear" {...imgProps} />;
case EnumConnectorName.GITHUB_CONNECTOR:
@ -63,7 +68,7 @@ export const getConnectorIcon = (connectorType: EnumConnectorName | string, clas
case EnumConnectorName.YOUTUBE_CONNECTOR:
return <Image src="/connectors/youtube.svg" alt="YouTube" {...imgProps} />;
case EnumConnectorName.CIRCLEBACK_CONNECTOR:
return <IconUsersGroup {...iconProps} />;
return <Image src="/connectors/circleback.svg" alt="Circleback" {...imgProps} />;
case EnumConnectorName.MCP_CONNECTOR:
return <Image src="/connectors/modelcontextprotocol.svg" alt="MCP" {...imgProps} />;
case EnumConnectorName.OBSIDIAN_CONNECTOR:

View file

@ -1477,6 +1477,78 @@ export const LLM_MODELS: LLMModel[] = [
provider: "DATABRICKS",
contextWindow: "128K",
},
// GitHub Models
{
value: "openai/gpt-5",
label: "GitHub GPT-5",
provider: "GITHUB_MODELS",
},
{
value: "openai/gpt-4.1",
label: "GitHub GPT-4.1",
provider: "GITHUB_MODELS",
contextWindow: "1048K",
},
{
value: "openai/gpt-4o",
label: "GitHub GPT-4o",
provider: "GITHUB_MODELS",
contextWindow: "128K",
},
{
value: "deepseek/DeepSeek-V3-0324",
label: "GitHub DeepSeek V3",
provider: "GITHUB_MODELS",
contextWindow: "64K",
},
{
value: "xai/grok-3",
label: "GitHub Grok 3",
provider: "GITHUB_MODELS",
contextWindow: "131K",
},
{
value: "openai/gpt-5-mini",
label: "GitHub GPT-5 Mini",
provider: "GITHUB_MODELS",
},
{
value: "openai/gpt-4.1-mini",
label: "GitHub GPT-4.1 Mini",
provider: "GITHUB_MODELS",
contextWindow: "1048K",
},
{
value: "meta/Llama-4-Scout-17B-16E-Instruct",
label: "GitHub Llama 4 Scout",
provider: "GITHUB_MODELS",
contextWindow: "512K",
},
{
value: "openai/gpt-4.1-nano",
label: "GitHub GPT-4.1 Nano",
provider: "GITHUB_MODELS",
contextWindow: "1048K",
},
{
value: "openai/gpt-4o-mini",
label: "GitHub GPT-4o Mini",
provider: "GITHUB_MODELS",
contextWindow: "128K",
},
{
value: "openai/o4-mini",
label: "GitHub O4 Mini",
provider: "GITHUB_MODELS",
contextWindow: "200K",
},
{
value: "deepseek/DeepSeek-R1",
label: "GitHub DeepSeek R1",
provider: "GITHUB_MODELS",
contextWindow: "64K",
},
];
// Helper function to get models by provider

View file

@ -174,6 +174,13 @@ export const LLM_PROVIDERS: LLMProvider[] = [
example: "databricks/databricks-meta-llama-3-3-70b-instruct",
description: "Databricks Model Serving",
},
{
value: "GITHUB_MODELS",
label: "GitHub Models",
example: "openai/gpt-5, meta/llama-3.1-405b-instruct",
description: "AI models from GitHub Marketplace",
apiBase: "https://models.github.ai/inference",
},
{
value: "CUSTOM",
label: "Custom Provider",

View file

@ -138,6 +138,37 @@ export const uploadDocumentRequest = z.object({
export const uploadDocumentResponse = z.object({
message: z.literal("Files uploaded for processing"),
document_ids: z.array(z.number()),
duplicate_document_ids: z.array(z.number()).optional(),
total_files: z.number().optional(),
pending_files: z.number().optional(),
skipped_duplicates: z.number().optional(),
});
/**
* Batch document status
*/
export const getDocumentsStatusRequest = z.object({
queryParams: z.object({
search_space_id: z.number(),
document_ids: z.array(z.number()).min(1),
}),
});
export const documentStatus = z.object({
state: z.enum(["ready", "pending", "processing", "failed"]),
reason: z.string().nullable().optional(),
});
export const documentStatusItem = z.object({
id: z.number(),
title: z.string(),
document_type: documentTypeEnum,
status: documentStatus,
});
export const getDocumentsStatusResponse = z.object({
items: z.array(documentStatusItem),
});
/**
@ -261,6 +292,10 @@ export type CreateDocumentRequest = z.infer<typeof createDocumentRequest>;
export type CreateDocumentResponse = z.infer<typeof createDocumentResponse>;
export type UploadDocumentRequest = z.infer<typeof uploadDocumentRequest>;
export type UploadDocumentResponse = z.infer<typeof uploadDocumentResponse>;
export type GetDocumentsStatusRequest = z.infer<typeof getDocumentsStatusRequest>;
export type GetDocumentsStatusResponse = z.infer<typeof getDocumentsStatusResponse>;
export type DocumentStatus = z.infer<typeof documentStatus>;
export type DocumentStatusItem = z.infer<typeof documentStatusItem>;
export type SearchDocumentsRequest = z.infer<typeof searchDocumentsRequest>;
export type SearchDocumentsResponse = z.infer<typeof searchDocumentsResponse>;
export type SearchDocumentTitlesRequest = z.infer<typeof searchDocumentTitlesRequest>;

View file

@ -3,7 +3,7 @@ import { z } from "zod";
/**
* Incentive task type enum - matches backend IncentiveTaskType
*/
export const incentiveTaskTypeEnum = z.enum(["GITHUB_STAR"]);
export const incentiveTaskTypeEnum = z.enum(["GITHUB_STAR", "REDDIT_FOLLOW", "DISCORD_JOIN"]);
/**
* Single incentive task info schema

View file

@ -33,6 +33,7 @@ export const liteLLMProviderEnum = z.enum([
"DATABRICKS",
"COMETAPI",
"HUGGINGFACE",
"GITHUB_MODELS",
"CUSTOM",
]);

View file

@ -8,6 +8,7 @@ import {
type GetDocumentByChunkRequest,
type GetDocumentRequest,
type GetDocumentsRequest,
type GetDocumentsStatusRequest,
type GetDocumentTypeCountsRequest,
type GetSurfsenseDocsRequest,
getDocumentByChunkRequest,
@ -16,6 +17,8 @@ import {
getDocumentResponse,
getDocumentsRequest,
getDocumentsResponse,
getDocumentsStatusRequest,
getDocumentsStatusResponse,
getDocumentTypeCountsRequest,
getDocumentTypeCountsResponse,
getSurfsenseDocsByChunkResponse,
@ -130,6 +133,30 @@ class DocumentsApiService {
});
};
/**
* Batch document status for async processing tracking
*/
getDocumentsStatus = async (request: GetDocumentsStatusRequest) => {
const parsedRequest = getDocumentsStatusRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
const { search_space_id, document_ids } = parsedRequest.data.queryParams;
const params = new URLSearchParams({
search_space_id: String(search_space_id),
document_ids: document_ids.join(","),
});
return baseApiService.get(
`/api/v1/documents/status?${params.toString()}`,
getDocumentsStatusResponse
);
};
/**
* Search documents by title
*/

View file

@ -2,12 +2,12 @@ import {
type CreateImageGenConfigRequest,
createImageGenConfigRequest,
createImageGenConfigResponse,
deleteImageGenConfigResponse,
getGlobalImageGenConfigsResponse,
getImageGenConfigsResponse,
type UpdateImageGenConfigRequest,
updateImageGenConfigRequest,
updateImageGenConfigResponse,
deleteImageGenConfigResponse,
getImageGenConfigsResponse,
getGlobalImageGenConfigsResponse,
} from "@/contracts/types/new-llm-config.types";
import { ValidationError } from "../error";
import { baseApiService } from "./base-api.service";

View file

@ -20,8 +20,8 @@ const AUTH_ERROR_MESSAGES: AuthErrorMapping = {
description: "Your account may be suspended or restricted",
},
"404": {
title: "Account not found",
description: "No account exists with this email address",
title: "Not found",
description: "The requested resource was not found",
},
"409": {
title: "Account conflict",
@ -31,6 +31,10 @@ const AUTH_ERROR_MESSAGES: AuthErrorMapping = {
title: "Too many attempts",
description: "Please wait before trying again",
},
RATE_LIMIT_EXCEEDED: {
title: "Too many attempts",
description: "You've made too many requests. Please wait a minute and try again.",
},
"500": {
title: "Server error",
description: "Something went wrong on our end. Please try again",
@ -42,8 +46,8 @@ const AUTH_ERROR_MESSAGES: AuthErrorMapping = {
// FastAPI specific errors
LOGIN_BAD_CREDENTIALS: {
title: "Invalid credentials",
description: "The email or password you entered is incorrect",
title: "Login failed",
description: "Invalid email or password. If you don't have an account, please sign up.",
},
LOGIN_USER_NOT_VERIFIED: {
title: "Account not verified",

View file

@ -10,28 +10,53 @@ const REFRESH_TOKEN_KEY = "surfsense_refresh_token";
let isRefreshing = false;
let refreshPromise: Promise<string | null> | null = null;
/** Path prefixes for routes that do not require auth (no current-user fetch, no redirect on 401) */
const PUBLIC_ROUTE_PREFIXES = [
"/login",
"/register",
"/auth",
"/docs",
"/public",
"/invite",
"/contact",
"/pricing",
"/privacy",
"/terms",
"/changelog",
];
/**
* Saves the current path and redirects to login page
* Call this when a 401 response is received
* Returns true if the pathname is a public route where we should not run auth checks
* or redirect to login on 401.
*/
export function isPublicRoute(pathname: string): boolean {
if (pathname === "/" || pathname === "") return true;
return PUBLIC_ROUTE_PREFIXES.some((prefix) => pathname.startsWith(prefix));
}
/**
* Clears tokens and optionally redirects to login.
* Call this when a 401 response is received.
* Only redirects when the current route is protected; on public routes we just clear tokens.
*/
export function handleUnauthorized(): void {
if (typeof window === "undefined") return;
// Save the current path (including search params and hash) for redirect after login
const currentPath = window.location.pathname + window.location.search + window.location.hash;
const pathname = window.location.pathname;
// Don't save auth-related paths
const excludedPaths = ["/auth", "/auth/callback", "/"];
if (!excludedPaths.includes(window.location.pathname)) {
localStorage.setItem(REDIRECT_PATH_KEY, currentPath);
}
// Clear both tokens
// Always clear tokens
localStorage.removeItem(BEARER_TOKEN_KEY);
localStorage.removeItem(REFRESH_TOKEN_KEY);
// Redirect to home page (which has login options)
window.location.href = "/login";
// Only redirect on protected routes; stay on public pages (e.g. /docs)
if (!isPublicRoute(pathname)) {
const currentPath = pathname + window.location.search + window.location.hash;
const excludedPaths = ["/auth", "/auth/callback", "/"];
if (!excludedPaths.includes(pathname)) {
localStorage.setItem(REDIRECT_PATH_KEY, currentPath);
}
window.location.href = "/login";
}
}
/**
@ -179,7 +204,6 @@ export function getAuthHeaders(additionalHeaders?: Record<string, string>): Reco
/**
* Attempts to refresh the access token using the stored refresh token.
* Returns the new access token if successful, null otherwise.
* Exported for use by API services.
*/
export async function refreshAccessToken(): Promise<string | null> {
// If already refreshing, wait for that request to complete

View file

@ -1,324 +0,0 @@
/**
* Attachment adapter for assistant-ui
*
* This adapter handles file uploads by:
* 1. Uploading the file to the backend /attachments/process endpoint
* 2. The backend extracts markdown content using the configured ETL service
* 3. The extracted content is stored in the attachment and sent with messages
*/
import type { AttachmentAdapter, CompleteAttachment, PendingAttachment } from "@assistant-ui/react";
import { getBearerToken } from "@/lib/auth-utils";
/**
* Supported file types for the attachment adapter
*
* - Text/Markdown: .md, .markdown, .txt
* - Audio (if STT configured): .mp3, .mp4, .mpeg, .mpga, .m4a, .wav, .webm
* - Documents (depends on ETL service): .pdf, .docx, .doc, .pptx, .xlsx, .html
* - Images: .jpg, .jpeg, .png, .gif, .webp
*/
const ACCEPTED_FILE_TYPES = [
// Text/Markdown (always supported)
".md",
".markdown",
".txt",
// Audio files
".mp3",
".mp4",
".mpeg",
".mpga",
".m4a",
".wav",
".webm",
// Document files (depends on ETL service)
".pdf",
".docx",
".doc",
".pptx",
".xlsx",
".html",
// Image files
".jpg",
".jpeg",
".png",
".gif",
".webp",
].join(",");
/**
* Response from the attachment processing endpoint
*/
interface ProcessAttachmentResponse {
id: string;
name: string;
type: "document" | "image" | "file";
content: string;
contentLength: number;
}
/**
* Extended CompleteAttachment with our custom extractedContent field
* We store the extracted text in a custom field so we can access it in onNew
* For images, we also store the data URL so it can be displayed after persistence
*/
export interface ChatAttachment extends CompleteAttachment {
extractedContent: string;
imageDataUrl?: string; // Base64 data URL for images (persists across page reloads)
}
/**
* Process a file through the backend ETL service
*/
async function processAttachment(file: File): Promise<ProcessAttachmentResponse> {
const token = getBearerToken();
if (!token) {
throw new Error("Not authenticated");
}
const backendUrl = process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL || "http://localhost:8000";
const formData = new FormData();
formData.append("file", file);
const response = await fetch(`${backendUrl}/api/v1/attachments/process`, {
method: "POST",
headers: {
Authorization: `Bearer ${token}`,
},
body: formData,
});
if (!response.ok) {
const errorText = await response.text();
console.error("[processAttachment] Error response:", errorText);
let errorDetail = "Unknown error";
try {
const errorJson = JSON.parse(errorText);
// FastAPI validation errors return detail as array
if (Array.isArray(errorJson.detail)) {
errorDetail = errorJson.detail
.map((err: { msg?: string; loc?: string[] }) => {
const field = err.loc?.join(".") || "unknown";
return `${field}: ${err.msg || "validation error"}`;
})
.join("; ");
} else if (typeof errorJson.detail === "string") {
errorDetail = errorJson.detail;
} else {
errorDetail = JSON.stringify(errorJson);
}
} catch {
errorDetail = errorText || `HTTP ${response.status}`;
}
throw new Error(errorDetail);
}
return response.json();
}
// Store processed results for the send() method
const processedAttachments = new Map<string, ProcessAttachmentResponse>();
// Store image data URLs for attachments (so they persist after File objects are lost)
const imageDataUrls = new Map<string, string>();
/**
* Convert a File to a data URL (base64) for images
*/
async function fileToDataUrl(file: File): Promise<string> {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => resolve(reader.result as string);
reader.onerror = reject;
reader.readAsDataURL(file);
});
}
/**
* Create the attachment adapter for assistant-ui
*
* This adapter:
* 1. Accepts file upload
* 2. Processes the file through the backend ETL service
* 3. Returns the attachment with extracted markdown content
*
* The content is stored in the attachment and will be sent with the message.
*/
export function createAttachmentAdapter(): AttachmentAdapter {
return {
accept: ACCEPTED_FILE_TYPES,
/**
* Async generator that yields pending states while processing
* and returns a pending attachment when done.
*
* IMPORTANT: The generator should return status: { type: "running", progress: 100 }
* NOT status: { type: "complete" }. The "complete" status is set by send().
* Returning "complete" from the generator will prevent send() from being called!
*
* This pattern allows the UI to show a loading indicator
* while the file is being processed by the backend.
* The send() method is called to finalize the attachment.
*/
async *add(input: File | { file: File }): AsyncGenerator<PendingAttachment, void> {
// Handle both direct File and { file: File } patterns
const file = input instanceof File ? input : input.file;
if (!file) {
console.error("[AttachmentAdapter] No file found in input:", input);
throw new Error("No file provided");
}
// Generate a unique ID for this attachment
const id = crypto.randomUUID();
// Determine attachment type from file
const attachmentType = file.type.startsWith("image/") ? "image" : "document";
// Yield initial pending state with "running" status (0% progress)
// This triggers the loading indicator in the UI
yield {
id,
type: attachmentType,
name: file.name,
file,
status: { type: "running", reason: "uploading", progress: 0 },
} as PendingAttachment;
try {
// For images, convert to data URL so we can display them after persistence
if (attachmentType === "image") {
const dataUrl = await fileToDataUrl(file);
imageDataUrls.set(id, dataUrl);
}
// Process the file through the backend ETL service
const result = await processAttachment(file);
// Verify we have the required fields
if (!result.content) {
console.error("[AttachmentAdapter] WARNING: No content received from backend!");
}
// Store the processed result for send()
processedAttachments.set(id, result);
// Create the final pending attachment
// IMPORTANT: Use "running" status with progress: 100 to indicate processing is done
// but attachment is still pending. The "complete" status will be set by send().
// Yield the final state to ensure it gets processed by the UI
yield {
id,
type: result.type,
name: result.name,
file,
status: { type: "running", reason: "uploading", progress: 100 },
} as PendingAttachment;
} catch (error) {
console.error("[AttachmentAdapter] Failed to process attachment:", error);
throw error;
}
},
/**
* Called when user sends the message.
* Converts the pending attachment to a complete attachment.
*/
async send(pendingAttachment: PendingAttachment): Promise<ChatAttachment> {
const result = processedAttachments.get(pendingAttachment.id);
const imageDataUrl = imageDataUrls.get(pendingAttachment.id);
if (result) {
// Clean up stored result
processedAttachments.delete(pendingAttachment.id);
if (imageDataUrl) {
imageDataUrls.delete(pendingAttachment.id);
}
return {
id: result.id,
type: result.type,
name: result.name,
contentType: "text/markdown",
status: { type: "complete" },
content: [
{
type: "text",
text: result.content,
},
],
extractedContent: result.content,
imageDataUrl, // Store data URL for images so they can be displayed after persistence
};
}
// Fallback if no processed result found
console.warn(
"[AttachmentAdapter] send() - No processed result found for attachment:",
pendingAttachment.id
);
return {
id: pendingAttachment.id,
type: pendingAttachment.type,
name: pendingAttachment.name,
contentType: "text/plain",
status: { type: "complete" },
content: [],
extractedContent: "",
imageDataUrl, // Still include data URL if available
};
},
async remove() {
// No server-side cleanup needed since we don't persist attachments
},
};
}
/**
* Extract attachment content for chat request
*
* This function extracts the content from attachments to be sent with the chat request.
* Only attachments that have been fully processed (have content) will be included.
*/
export function extractAttachmentContent(
attachments: Array<unknown>
): Array<{ id: string; name: string; type: string; content: string }> {
return attachments
.filter((att): att is ChatAttachment => {
if (!att || typeof att !== "object") return false;
const a = att as Record<string, unknown>;
// Check for our custom extractedContent field first
if (typeof a.extractedContent === "string" && a.extractedContent.length > 0) {
return true;
}
// Fallback: check if content array has text content
if (Array.isArray(a.content)) {
const textContent = (a.content as Array<{ type: string; text?: string }>).find(
(c) => c.type === "text" && typeof c.text === "string" && c.text.length > 0
);
return Boolean(textContent);
}
return false;
})
.map((att) => {
// Get content from extractedContent or from content array
let content = "";
if (typeof att.extractedContent === "string") {
content = att.extractedContent;
} else if (Array.isArray(att.content)) {
const textContent = (att.content as Array<{ type: string; text?: string }>).find(
(c) => c.type === "text"
);
content = textContent?.text || "";
}
return {
id: att.id,
name: att.name,
type: att.type,
content,
};
});
}

View file

@ -1,46 +1,9 @@
import type { ThreadMessageLike } from "@assistant-ui/react";
import { z } from "zod";
import type { MessageRecord } from "./thread-persistence";
/**
* Zod schema for persisted attachment info
*/
const PersistedAttachmentSchema = z.object({
id: z.string(),
name: z.string(),
type: z.string(),
contentType: z.string().optional(),
imageDataUrl: z.string().optional(),
extractedContent: z.string().optional(),
});
const AttachmentsPartSchema = z.object({
type: z.literal("attachments"),
items: z.array(PersistedAttachmentSchema),
});
type PersistedAttachment = z.infer<typeof PersistedAttachmentSchema>;
/**
* Extract persisted attachments from message content (type-safe with Zod)
*/
function extractPersistedAttachments(content: unknown): PersistedAttachment[] {
if (!Array.isArray(content)) return [];
for (const part of content) {
const result = AttachmentsPartSchema.safeParse(part);
if (result.success) {
return result.data.items;
}
}
return [];
}
/**
* Convert backend message to assistant-ui ThreadMessageLike format
* Filters out 'thinking-steps' part as it's handled separately via messageThinkingSteps
* Restores attachments for user messages from persisted data
*/
export function convertToThreadMessage(msg: MessageRecord): ThreadMessageLike {
let content: ThreadMessageLike["content"];
@ -52,7 +15,7 @@ export function convertToThreadMessage(msg: MessageRecord): ThreadMessageLike {
const filteredContent = msg.content.filter((part: unknown) => {
if (typeof part !== "object" || part === null || !("type" in part)) return true;
const partType = (part as { type: string }).type;
// Filter out thinking-steps, mentioned-documents, and attachments
// Filter out metadata parts not directly renderable by assistant-ui
return (
partType !== "thinking-steps" &&
partType !== "mentioned-documents" &&
@ -67,25 +30,6 @@ export function convertToThreadMessage(msg: MessageRecord): ThreadMessageLike {
content = [{ type: "text", text: String(msg.content) }];
}
// Restore attachments for user messages
let attachments: ThreadMessageLike["attachments"];
if (msg.role === "user") {
const persistedAttachments = extractPersistedAttachments(msg.content);
if (persistedAttachments.length > 0) {
attachments = persistedAttachments.map((att) => ({
id: att.id,
name: att.name,
type: att.type as "document" | "image" | "file",
contentType: att.contentType || "application/octet-stream",
status: { type: "complete" as const },
content: [],
// Custom fields for our ChatAttachment interface
imageDataUrl: att.imageDataUrl,
extractedContent: att.extractedContent,
}));
}
}
// Build metadata.custom for author display in shared chats
const metadata = msg.author_id
? {
@ -103,7 +47,6 @@ export function convertToThreadMessage(msg: MessageRecord): ThreadMessageLike {
role: msg.role,
content,
createdAt: new Date(msg.created_at),
attachments,
metadata,
};
}

View file

@ -1,15 +1,18 @@
// Helper function to get connector type display name
export const getConnectorTypeDisplay = (type: string): string => {
const typeMap: Record<string, string> = {
SERPER_API: "Serper API",
TAVILY_API: "Tavily API",
SEARXNG_API: "SearxNG",
LINKUP_API: "Linkup",
BAIDU_SEARCH_API: "Baidu Search",
SLACK_CONNECTOR: "Slack",
TEAMS_CONNECTOR: "Microsoft Teams",
NOTION_CONNECTOR: "Notion",
GITHUB_CONNECTOR: "GitHub",
LINEAR_CONNECTOR: "Linear",
JIRA_CONNECTOR: "Jira",
DISCORD_CONNECTOR: "Discord",
LINKUP_API: "Linkup",
CONFLUENCE_CONNECTOR: "Confluence",
BOOKSTACK_CONNECTOR: "BookStack",
CLICKUP_CONNECTOR: "ClickUp",
@ -23,8 +26,10 @@ export const getConnectorTypeDisplay = (type: string): string => {
LUMA_CONNECTOR: "Luma",
ELASTICSEARCH_CONNECTOR: "Elasticsearch",
WEBCRAWLER_CONNECTOR: "Web Pages",
YOUTUBE_CONNECTOR: "YouTube",
CIRCLEBACK_CONNECTOR: "Circleback",
OBSIDIAN_CONNECTOR: "Obsidian",
MCP_CONNECTOR: "MCP Server",
};
return typeMap[type] || type;
};

View file

@ -1,6 +1,6 @@
{
"name": "surfsense_web",
"version": "0.0.12",
"version": "0.0.13",
"private": true,
"description": "SurfSense Frontend",
"scripts": {

View file

@ -0,0 +1,19 @@
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg version="1.0" xmlns="http://www.w3.org/2000/svg"
width="340.000000pt" height="340.000000pt" viewBox="0 0 340.000000 340.000000"
preserveAspectRatio="xMidYMid meet">
<g transform="translate(0.000000,340.000000) scale(0.100000,-0.100000)" stroke="none">
<path fill="#F24D1D" d="M1385 2574 c-268 -38 -456 -142 -603 -335 -218 -286 -226 -755 -19
-1054 110 -159 268 -272 467 -336 67 -21 94 -24 240 -24 185 0 243 11 367 70
98 46 157 87 229 161 55 56 143 198 131 210 -8 8 -288 144 -296 144 -4 0 -16
-19 -28 -43 -34 -69 -117 -153 -186 -187 -214 -105 -484 -39 -620 152 -121
170 -141 455 -46 655 58 123 192 233 320 262 78 18 213 13 282 -10 101 -34
202 -120 248 -211 12 -24 23 -44 24 -46 2 -2 295 136 304 144 2 2 -12 33 -30
69 -82 165 -266 301 -477 356 -71 18 -247 31 -307 23z"/>
<path fill="#4162FE" d="M2575 1135 c-114 -40 -147 -185 -61 -271 65 -65 167 -65 232 0 83 83
59 213 -49 265 -50 24 -68 25 -122 6z"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1 KiB

View file

@ -1,12 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" fill="none">
<rect width="24" height="24" rx="6" fill="url(#composio-gradient)"/>
<path d="M12 6L17 9V15L12 18L7 15V9L12 6Z" stroke="white" stroke-width="1.5" stroke-linejoin="round"/>
<path d="M12 6V12M12 12L17 9M12 12L7 9M12 12V18" stroke="white" stroke-width="1.5" stroke-linecap="round"/>
<circle cx="12" cy="12" r="2" fill="white"/>
<defs>
<linearGradient id="composio-gradient" x1="0" y1="0" x2="24" y2="24" gradientUnits="userSpaceOnUse">
<stop stop-color="#8B5CF6"/>
<stop offset="1" stop-color="#A855F7"/>
</linearGradient>
</defs>
</svg>

Before

Width:  |  Height:  |  Size: 640 B

View file

@ -0,0 +1,50 @@
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
<svg version="1.0" xmlns="http://www.w3.org/2000/svg"
width="512.000000pt" height="130.000000pt" viewBox="0 0 512.000000 130.000000"
preserveAspectRatio="xMidYMid meet">
<g transform="translate(0.000000,130.000000) scale(0.100000,-0.100000)"
fill="#000000" stroke="none">
<path d="M470 1287 c-73 -21 -118 -47 -176 -99 -168 -154 -270 -444 -290 -825
-9 -174 -12 -172 168 -104 215 79 557 174 779 214 l77 14 6 -36 c3 -20 6 -66
6 -102 0 -95 8 -128 34 -135 50 -12 368 132 447 204 56 51 68 81 45 117 -22
33 -72 45 -190 45 -89 0 -98 2 -91 17 34 89 46 269 23 347 -16 53 -81 118
-134 134 -52 15 -161 16 -221 1 -45 -12 -45 -12 -71 22 -114 150 -278 225
-412 186z m201 -74 c51 -23 105 -67 149 -124 l31 -38 -73 -37 c-121 -61 -245
-153 -371 -274 -107 -103 -297 -324 -327 -380 -6 -12 -14 -20 -16 -17 -9 8 17
220 37 316 68 312 196 513 362 567 55 18 151 12 208 -13z m509 -202 c21 -11
45 -35 60 -62 21 -38 25 -56 24 -129 0 -55 -9 -114 -23 -166 l-23 -81 -45 -7
c-93 -12 -88 -16 -107 82 -21 103 -65 242 -101 313 -15 28 -24 53 -22 55 9 9
124 21 162 17 22 -3 56 -13 75 -22z m-260 -98 c18 -42 38 -97 46 -122 17 -55
54 -215 54 -234 0 -9 -32 -19 -97 -31 -160 -30 -467 -110 -643 -169 -91 -30
-166 -54 -167 -53 -1 1 37 53 85 116 105 140 318 354 429 433 85 60 224 137
248 137 7 0 27 -34 45 -77z m584 -405 c17 -9 16 -12 -10 -39 -29 -30 -174
-109 -280 -152 l-62 -25 54 112 53 111 58 6 c69 6 161 0 187 -13z m-314 -3 c0
-10 -81 -167 -84 -164 -2 2 -6 39 -10 81 l-7 77 28 4 c45 7 73 7 73 2z"/>
<path d="M1880 645 l0 -415 45 0 45 0 0 415 0 415 -45 0 -45 0 0 -415z"/>
<path d="M3180 645 l0 -415 45 0 45 0 0 125 c0 82 4 125 11 125 6 0 72 -56
147 -125 l136 -125 59 0 c32 0 57 4 55 9 -1 5 -72 71 -155 146 -84 76 -153
141 -153 145 0 5 68 68 150 140 83 72 150 133 150 136 0 2 -28 4 -62 4 l-63 0
-125 -115 c-69 -63 -131 -114 -137 -114 -10 -1 -13 53 -13 239 l0 240 -45 0
-45 0 0 -415z"/>
<path d="M2184 1002 c-23 -15 -35 -51 -24 -72 31 -58 110 -39 110 26 0 35 -57
66 -86 46z"/>
<path d="M4774 820 c-43 -10 -102 -47 -121 -76 -23 -36 -33 -29 -33 21 l0 45
-45 0 -45 0 0 -405 0 -405 45 0 45 0 0 160 c0 88 3 160 6 160 4 0 26 -18 49
-39 53 -48 108 -65 197 -59 152 11 248 129 248 303 0 97 -23 158 -83 220 -72
73 -163 99 -263 75z m171 -114 c52 -40 75 -95 75 -184 0 -87 -15 -127 -63
-169 -74 -65 -176 -69 -260 -11 -59 42 -92 165 -68 256 14 49 63 109 106 129
19 8 57 12 101 11 60 -3 76 -8 109 -32z"/>
<path d="M2654 802 c-22 -11 -55 -37 -72 -58 l-32 -39 0 53 0 52 -50 0 -50 0
2 -287 3 -288 45 0 45 0 5 190 c5 199 11 226 57 267 29 26 88 48 130 48 49 -1
118 -32 135 -62 9 -16 14 -85 18 -233 l5 -210 45 0 45 0 3 193 c1 109 -2 210
-8 231 -14 54 -61 115 -106 136 -57 28 -168 31 -220 7z"/>
<path d="M2170 520 l0 -290 45 0 45 0 0 290 0 290 -45 0 -45 0 0 -290z"/>
<path d="M3800 619 c0 -212 8 -260 52 -313 46 -56 100 -79 184 -79 81 0 136
24 176 77 l23 31 5 -50 5 -50 45 0 45 0 3 288 2 287 -50 0 -50 0 0 -177 c0
-193 -9 -232 -61 -280 -52 -47 -158 -59 -216 -23 -58 35 -63 57 -63 280 l0
200 -50 0 -50 0 0 -191z"/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 3 KiB