Merge pull request #752 from MODSetter/dev

chore: various QOL Updates
This commit is contained in:
Rohan Verma 2026-01-28 22:25:18 -08:00 committed by GitHub
commit 5d5f9d3bfb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
103 changed files with 5009 additions and 1527 deletions

View file

@ -0,0 +1,114 @@
"""Add public chat sharing and cloning features to new_chat_threads
Revision ID: 81
Revises: 80
Create Date: 2026-01-23
Adds columns for:
1. Public sharing via tokenized URLs (public_share_token, public_share_enabled)
2. Clone tracking for audit (cloned_from_thread_id, cloned_at)
3. History bootstrap flag for cloned chats (needs_history_bootstrap)
4. Clone pending flag for two-phase clone (clone_pending)
"""
from collections.abc import Sequence
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "81"
down_revision: str | None = "80"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
"""Add public sharing and cloning columns to new_chat_threads."""
op.execute(
"""
ALTER TABLE new_chat_threads
ADD COLUMN IF NOT EXISTS public_share_token VARCHAR(64);
"""
)
op.execute(
"""
ALTER TABLE new_chat_threads
ADD COLUMN IF NOT EXISTS public_share_enabled BOOLEAN NOT NULL DEFAULT FALSE;
"""
)
op.execute(
"""
CREATE UNIQUE INDEX IF NOT EXISTS ix_new_chat_threads_public_share_token
ON new_chat_threads(public_share_token)
WHERE public_share_token IS NOT NULL;
"""
)
op.execute(
"""
CREATE INDEX IF NOT EXISTS ix_new_chat_threads_public_share_enabled
ON new_chat_threads(public_share_enabled)
WHERE public_share_enabled = TRUE;
"""
)
op.execute(
"""
ALTER TABLE new_chat_threads
ADD COLUMN IF NOT EXISTS cloned_from_thread_id INTEGER
REFERENCES new_chat_threads(id) ON DELETE SET NULL;
"""
)
op.execute(
"""
ALTER TABLE new_chat_threads
ADD COLUMN IF NOT EXISTS cloned_at TIMESTAMP WITH TIME ZONE;
"""
)
op.execute(
"""
ALTER TABLE new_chat_threads
ADD COLUMN IF NOT EXISTS needs_history_bootstrap BOOLEAN NOT NULL DEFAULT FALSE;
"""
)
op.execute(
"""
ALTER TABLE new_chat_threads
ADD COLUMN IF NOT EXISTS clone_pending BOOLEAN NOT NULL DEFAULT FALSE;
"""
)
op.execute(
"""
CREATE INDEX IF NOT EXISTS ix_new_chat_threads_cloned_from_thread_id
ON new_chat_threads(cloned_from_thread_id)
WHERE cloned_from_thread_id IS NOT NULL;
"""
)
def downgrade() -> None:
"""Remove public sharing and cloning columns from new_chat_threads."""
op.execute("DROP INDEX IF EXISTS ix_new_chat_threads_cloned_from_thread_id")
op.execute("ALTER TABLE new_chat_threads DROP COLUMN IF EXISTS clone_pending")
op.execute(
"ALTER TABLE new_chat_threads DROP COLUMN IF EXISTS needs_history_bootstrap"
)
op.execute("ALTER TABLE new_chat_threads DROP COLUMN IF EXISTS cloned_at")
op.execute(
"ALTER TABLE new_chat_threads DROP COLUMN IF EXISTS cloned_from_thread_id"
)
op.execute("DROP INDEX IF EXISTS ix_new_chat_threads_public_share_enabled")
op.execute("DROP INDEX IF EXISTS ix_new_chat_threads_public_share_token")
op.execute(
"ALTER TABLE new_chat_threads DROP COLUMN IF EXISTS public_share_enabled"
)
op.execute("ALTER TABLE new_chat_threads DROP COLUMN IF EXISTS public_share_token")

View file

@ -0,0 +1,62 @@
"""Add status and thread_id to podcasts
Revision ID: 82
Revises: 81
Create Date: 2026-01-27
Adds status enum and thread_id FK to podcasts.
"""
from collections.abc import Sequence
from alembic import op
revision: str = "82"
down_revision: str | None = "81"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
op.execute(
"""
CREATE TYPE podcast_status AS ENUM ('pending', 'generating', 'ready', 'failed');
"""
)
op.execute(
"""
ALTER TABLE podcasts
ADD COLUMN IF NOT EXISTS status podcast_status NOT NULL DEFAULT 'ready';
"""
)
op.execute(
"""
ALTER TABLE podcasts
ADD COLUMN IF NOT EXISTS thread_id INTEGER
REFERENCES new_chat_threads(id) ON DELETE SET NULL;
"""
)
op.execute(
"""
CREATE INDEX IF NOT EXISTS ix_podcasts_thread_id
ON podcasts(thread_id);
"""
)
op.execute(
"""
CREATE INDEX IF NOT EXISTS ix_podcasts_status
ON podcasts(status);
"""
)
def downgrade() -> None:
op.execute("DROP INDEX IF EXISTS ix_podcasts_status")
op.execute("DROP INDEX IF EXISTS ix_podcasts_thread_id")
op.execute("ALTER TABLE podcasts DROP COLUMN IF EXISTS thread_id")
op.execute("ALTER TABLE podcasts DROP COLUMN IF EXISTS status")
op.execute("DROP TYPE IF EXISTS podcast_status")

View file

@ -0,0 +1,33 @@
"""Add REDDIT_FOLLOW to incentive task type enum
Revision ID: 83
Revises: 82
Changes:
1. Add REDDIT_FOLLOW value to incentivetasktype enum
"""
from collections.abc import Sequence
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "83"
down_revision: str | None = "82"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
"""Add REDDIT_FOLLOW to incentivetasktype enum."""
op.execute("ALTER TYPE incentivetasktype ADD VALUE IF NOT EXISTS 'REDDIT_FOLLOW'")
def downgrade() -> None:
"""Remove REDDIT_FOLLOW from incentivetasktype enum.
Note: PostgreSQL doesn't support removing values from enums directly.
This would require recreating the enum type, which is complex and risky.
For safety, we leave the enum value in place during downgrade.
"""
pass

View file

@ -120,6 +120,7 @@ async def create_surfsense_deep_agent(
connector_service: ConnectorService, connector_service: ConnectorService,
checkpointer: Checkpointer, checkpointer: Checkpointer,
user_id: str | None = None, user_id: str | None = None,
thread_id: int | None = None,
agent_config: AgentConfig | None = None, agent_config: AgentConfig | None = None,
enabled_tools: list[str] | None = None, enabled_tools: list[str] | None = None,
disabled_tools: list[str] | None = None, disabled_tools: list[str] | None = None,
@ -232,6 +233,7 @@ async def create_surfsense_deep_agent(
"connector_service": connector_service, "connector_service": connector_service,
"firecrawl_api_key": firecrawl_api_key, "firecrawl_api_key": firecrawl_api_key,
"user_id": user_id, # Required for memory tools "user_id": user_id, # Required for memory tools
"thread_id": thread_id, # For podcast tool
# Dynamic connector/document type discovery for knowledge base tool # Dynamic connector/document type discovery for knowledge base tool
"available_connectors": available_connectors, "available_connectors": available_connectors,
"available_document_types": available_document_types, "available_document_types": available_document_types,

View file

@ -18,6 +18,8 @@ import redis
from langchain_core.tools import tool from langchain_core.tools import tool
from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.ext.asyncio import AsyncSession
from app.db import Podcast, PodcastStatus
# Redis connection for tracking active podcast tasks # Redis connection for tracking active podcast tasks
# Uses the same Redis instance as Celery # Uses the same Redis instance as Celery
REDIS_URL = os.getenv("CELERY_BROKER_URL", "redis://localhost:6379/0") REDIS_URL = os.getenv("CELERY_BROKER_URL", "redis://localhost:6379/0")
@ -32,50 +34,46 @@ def get_redis_client() -> redis.Redis:
return _redis_client return _redis_client
def get_active_podcast_key(search_space_id: int) -> str: def _redis_key(search_space_id: int) -> str:
"""Generate Redis key for tracking active podcast task.""" return f"podcast:generating:{search_space_id}"
return f"podcast:active:{search_space_id}"
def get_active_podcast_task(search_space_id: int) -> str | None: def get_generating_podcast_id(search_space_id: int) -> int | None:
"""Check if there's an active podcast task for this search space.""" """Get the podcast ID currently being generated for this search space."""
try: try:
client = get_redis_client() client = get_redis_client()
return client.get(get_active_podcast_key(search_space_id)) value = client.get(_redis_key(search_space_id))
return int(value) if value else None
except Exception: except Exception:
# If Redis is unavailable, allow the request (fail open)
return None return None
def set_active_podcast_task(search_space_id: int, task_id: str) -> None: def set_generating_podcast(search_space_id: int, podcast_id: int) -> None:
"""Mark a podcast task as active for this search space.""" """Mark a podcast as currently generating for this search space."""
try: try:
client = get_redis_client() client = get_redis_client()
# Set with 30-minute expiry as safety net (podcast should complete before this) client.setex(_redis_key(search_space_id), 1800, str(podcast_id))
client.setex(get_active_podcast_key(search_space_id), 1800, task_id)
except Exception as e: except Exception as e:
print(f"[generate_podcast] Warning: Could not set active task in Redis: {e}") print(
f"[generate_podcast] Warning: Could not set generating podcast in Redis: {e}"
)
def clear_active_podcast_task(search_space_id: int) -> None:
"""Clear the active podcast task for this search space."""
try:
client = get_redis_client()
client.delete(get_active_podcast_key(search_space_id))
except Exception as e:
print(f"[generate_podcast] Warning: Could not clear active task in Redis: {e}")
def create_generate_podcast_tool( def create_generate_podcast_tool(
search_space_id: int, search_space_id: int,
db_session: AsyncSession, db_session: AsyncSession,
thread_id: int | None = None,
): ):
""" """
Factory function to create the generate_podcast tool with injected dependencies. Factory function to create the generate_podcast tool with injected dependencies.
Pre-creates podcast record with pending status so podcast_id is available
immediately for frontend polling.
Args: Args:
search_space_id: The user's search space ID search_space_id: The user's search space ID
db_session: Database session (not used - Celery creates its own) db_session: Database session for creating the podcast record
thread_id: The chat thread ID for associating the podcast
Returns: Returns:
A configured tool function for generating podcasts A configured tool function for generating podcasts
@ -98,76 +96,71 @@ def create_generate_podcast_tool(
- "Make a podcast about..." - "Make a podcast about..."
- "Turn this into a podcast" - "Turn this into a podcast"
The tool will start generating a podcast in the background.
The podcast will be available once generation completes.
IMPORTANT: Only one podcast can be generated at a time. If a podcast
is already being generated, this tool will return a message asking
the user to wait.
Args: Args:
source_content: The text content to convert into a podcast. source_content: The text content to convert into a podcast.
This can be a summary, research findings, or any text
the user wants transformed into an audio podcast.
podcast_title: Title for the podcast (default: "SurfSense Podcast") podcast_title: Title for the podcast (default: "SurfSense Podcast")
user_prompt: Optional instructions for podcast style, tone, or format. user_prompt: Optional instructions for podcast style, tone, or format.
For example: "Make it casual and fun" or "Focus on the key insights"
Returns: Returns:
A dictionary containing: A dictionary containing:
- status: "processing" (task submitted), "already_generating", or "error" - status: PodcastStatus value (pending, generating, or failed)
- task_id: The Celery task ID for polling status (if processing) - podcast_id: The podcast ID for polling (when status is pending or generating)
- title: The podcast title - title: The podcast title
- message: Status message for the user - message: Status message (or "error" field if status is failed)
""" """
try: try:
# Check if a podcast is already being generated for this search space generating_podcast_id = get_generating_podcast_id(search_space_id)
active_task_id = get_active_podcast_task(search_space_id) if generating_podcast_id:
if active_task_id:
print( print(
f"[generate_podcast] Blocked duplicate request. Active task: {active_task_id}" f"[generate_podcast] Blocked duplicate request. Generating podcast: {generating_podcast_id}"
) )
return { return {
"status": "already_generating", "status": PodcastStatus.GENERATING.value,
"task_id": active_task_id, "podcast_id": generating_podcast_id,
"title": podcast_title, "title": podcast_title,
"message": "A podcast is already being generated. Please wait for it to complete before requesting another one.", "message": "A podcast is already being generated. Please wait for it to complete.",
} }
# Import Celery task here to avoid circular imports podcast = Podcast(
title=podcast_title,
status=PodcastStatus.PENDING,
search_space_id=search_space_id,
thread_id=thread_id,
)
db_session.add(podcast)
await db_session.commit()
await db_session.refresh(podcast)
from app.tasks.celery_tasks.podcast_tasks import ( from app.tasks.celery_tasks.podcast_tasks import (
generate_content_podcast_task, generate_content_podcast_task,
) )
# Submit Celery task for background processing
task = generate_content_podcast_task.delay( task = generate_content_podcast_task.delay(
podcast_id=podcast.id,
source_content=source_content, source_content=source_content,
search_space_id=search_space_id, search_space_id=search_space_id,
podcast_title=podcast_title,
user_prompt=user_prompt, user_prompt=user_prompt,
) )
# Mark this task as active set_generating_podcast(search_space_id, podcast.id)
set_active_podcast_task(search_space_id, task.id)
print(f"[generate_podcast] Submitted Celery task: {task.id}") print(f"[generate_podcast] Created podcast {podcast.id}, task: {task.id}")
# Return immediately with task_id for polling
return { return {
"status": "processing", "status": PodcastStatus.PENDING.value,
"task_id": task.id, "podcast_id": podcast.id,
"title": podcast_title, "title": podcast_title,
"message": "Podcast generation started. This may take a few minutes.", "message": "Podcast generation started. This may take a few minutes.",
} }
except Exception as e: except Exception as e:
error_message = str(e) error_message = str(e)
print(f"[generate_podcast] Error submitting task: {error_message}") print(f"[generate_podcast] Error: {error_message}")
return { return {
"status": "error", "status": PodcastStatus.FAILED.value,
"error": error_message, "error": error_message,
"title": podcast_title, "title": podcast_title,
"task_id": None, "podcast_id": None,
} }
return generate_podcast return generate_podcast

View file

@ -107,8 +107,9 @@ BUILTIN_TOOLS: list[ToolDefinition] = [
factory=lambda deps: create_generate_podcast_tool( factory=lambda deps: create_generate_podcast_tool(
search_space_id=deps["search_space_id"], search_space_id=deps["search_space_id"],
db_session=deps["db_session"], db_session=deps["db_session"],
thread_id=deps["thread_id"],
), ),
requires=["search_space_id", "db_session"], requires=["search_space_id", "db_session", "thread_id"],
), ),
# Link preview tool - fetches Open Graph metadata for URLs # Link preview tool - fetches Open Graph metadata for URLs
ToolDefinition( ToolDefinition(

View file

@ -4,6 +4,8 @@ Composio Google Drive Connector Module.
Provides Google Drive specific methods for data retrieval and indexing via Composio. Provides Google Drive specific methods for data retrieval and indexing via Composio.
""" """
import hashlib
import json
import logging import logging
import os import os
import tempfile import tempfile
@ -464,6 +466,55 @@ async def check_document_by_unique_identifier(
return existing_doc_result.scalars().first() return existing_doc_result.scalars().first()
async def check_document_by_content_hash(
session: AsyncSession, content_hash: str
) -> Document | None:
"""Check if a document with the given content hash already exists.
This is used to prevent duplicate content from being indexed, regardless
of which connector originally indexed it.
"""
from sqlalchemy.future import select
existing_doc_result = await session.execute(
select(Document).where(Document.content_hash == content_hash)
)
return existing_doc_result.scalars().first()
async def check_document_by_google_drive_file_id(
session: AsyncSession, file_id: str, search_space_id: int
) -> Document | None:
"""Check if a document with this Google Drive file ID exists (from any connector).
This checks both metadata key formats:
- 'google_drive_file_id' (normal Google Drive connector)
- 'file_id' (Composio Google Drive connector)
This allows detecting duplicates BEFORE downloading/ETL, saving expensive API calls.
"""
from sqlalchemy import String, cast, or_
from sqlalchemy.future import select
# When casting JSON to String, the result includes quotes: "value" instead of value
# So we need to compare with the quoted version
quoted_file_id = f'"{file_id}"'
existing_doc_result = await session.execute(
select(Document).where(
Document.search_space_id == search_space_id,
or_(
# Normal Google Drive connector format
cast(Document.document_metadata["google_drive_file_id"], String)
== quoted_file_id,
# Composio Google Drive connector format
cast(Document.document_metadata["file_id"], String) == quoted_file_id,
),
)
)
return existing_doc_result.scalars().first()
async def update_connector_last_indexed( async def update_connector_last_indexed(
session: AsyncSession, session: AsyncSession,
connector, connector,
@ -477,6 +528,33 @@ async def update_connector_last_indexed(
logger.info(f"Updated last_indexed_at to {connector.last_indexed_at}") logger.info(f"Updated last_indexed_at to {connector.last_indexed_at}")
def generate_indexing_settings_hash(
selected_folders: list[dict],
selected_files: list[dict],
indexing_options: dict,
) -> str:
"""Generate a hash of indexing settings to detect configuration changes.
This hash is used to determine if indexing settings have changed since
the last index, which would require a full re-scan instead of delta sync.
Args:
selected_folders: List of {id, name} for folders to index
selected_files: List of {id, name} for individual files to index
indexing_options: Dict with max_files_per_folder, include_subfolders, etc.
Returns:
MD5 hash string of the settings
"""
settings = {
"folders": sorted([f.get("id", "") for f in selected_folders]),
"files": sorted([f.get("id", "") for f in selected_files]),
"include_subfolders": indexing_options.get("include_subfolders", True),
"max_files_per_folder": indexing_options.get("max_files_per_folder", 100),
}
return hashlib.md5(json.dumps(settings, sort_keys=True).encode()).hexdigest()
async def index_composio_google_drive( async def index_composio_google_drive(
session: AsyncSession, session: AsyncSession,
connector, connector,
@ -487,12 +565,16 @@ async def index_composio_google_drive(
log_entry, log_entry,
update_last_indexed: bool = True, update_last_indexed: bool = True,
max_items: int = 1000, max_items: int = 1000,
) -> tuple[int, str]: ) -> tuple[int, int, str | None]:
"""Index Google Drive files via Composio with delta sync support. """Index Google Drive files via Composio with delta sync support.
Returns:
Tuple of (documents_indexed, documents_skipped, error_message or None)
Delta Sync Flow: Delta Sync Flow:
1. First sync: Full scan + get initial page token 1. First sync: Full scan + get initial page token
2. Subsequent syncs: Use LIST_CHANGES to process only changed files 2. Subsequent syncs: Use LIST_CHANGES to process only changed files
(unless settings changed or incremental_sync is disabled)
Supports folder/file selection via connector config: Supports folder/file selection via connector config:
- selected_folders: List of {id, name} for folders to index - selected_folders: List of {id, name} for folders to index
@ -508,12 +590,42 @@ async def index_composio_google_drive(
selected_files = connector_config.get("selected_files", []) selected_files = connector_config.get("selected_files", [])
indexing_options = connector_config.get("indexing_options", {}) indexing_options = connector_config.get("indexing_options", {})
# Check for stored page token for delta sync
stored_page_token = connector_config.get("drive_page_token")
use_delta_sync = stored_page_token and connector.last_indexed_at
max_files_per_folder = indexing_options.get("max_files_per_folder", 100) max_files_per_folder = indexing_options.get("max_files_per_folder", 100)
include_subfolders = indexing_options.get("include_subfolders", True) include_subfolders = indexing_options.get("include_subfolders", True)
incremental_sync = indexing_options.get("incremental_sync", True)
# Generate current settings hash to detect configuration changes
current_settings_hash = generate_indexing_settings_hash(
selected_folders, selected_files, indexing_options
)
last_settings_hash = connector_config.get("last_indexed_settings_hash")
# Detect if settings changed since last index
settings_changed = (
last_settings_hash is not None
and current_settings_hash != last_settings_hash
)
if settings_changed:
logger.info(
f"Indexing settings changed for connector {connector_id}. "
f"Will perform full re-scan to apply new configuration."
)
# Check for stored page token for delta sync
stored_page_token = connector_config.get("drive_page_token")
# Determine whether to use delta sync:
# - Must have a stored page token
# - Must have been indexed before (last_indexed_at exists)
# - User must have incremental_sync enabled
# - Settings must not have changed (folder/subfolder config)
use_delta_sync = (
incremental_sync
and stored_page_token
and connector.last_indexed_at
and not settings_changed
)
# Route to delta sync or full scan # Route to delta sync or full scan
if use_delta_sync: if use_delta_sync:
@ -588,6 +700,14 @@ async def index_composio_google_drive(
elif token_error: elif token_error:
logger.warning(f"Failed to get new page token: {token_error}") logger.warning(f"Failed to get new page token: {token_error}")
# Save current settings hash for future change detection
# This allows detecting when folder/subfolder settings change
if not connector.config:
connector.config = {}
connector.config["last_indexed_settings_hash"] = current_settings_hash
flag_modified(connector, "config")
logger.info(f"Saved indexing settings hash for connector {connector_id}")
# CRITICAL: Always update timestamp so Electric SQL syncs and UI shows indexed status # CRITICAL: Always update timestamp so Electric SQL syncs and UI shows indexed status
await update_connector_last_indexed(session, connector, update_last_indexed) await update_connector_last_indexed(session, connector, update_last_indexed)
@ -628,11 +748,11 @@ async def index_composio_google_drive(
}, },
) )
return documents_indexed, error_message return documents_indexed, documents_skipped, error_message
except Exception as e: except Exception as e:
logger.error(f"Failed to index Google Drive via Composio: {e!s}", exc_info=True) logger.error(f"Failed to index Google Drive via Composio: {e!s}", exc_info=True)
return 0, f"Failed to index Google Drive via Composio: {e!s}" return 0, 0, f"Failed to index Google Drive via Composio: {e!s}"
async def _index_composio_drive_delta_sync( async def _index_composio_drive_delta_sync(
@ -953,13 +1073,28 @@ async def _process_single_drive_file(
""" """
processing_errors = [] processing_errors = []
# ========== EARLY DUPLICATE CHECK BY FILE ID ==========
# Check if this Google Drive file was already indexed by ANY connector
# This happens BEFORE download/ETL to save expensive API calls
existing_by_file_id = await check_document_by_google_drive_file_id(
session, file_id, search_space_id
)
if existing_by_file_id:
logger.info(
f"Skipping file {file_name} (file_id={file_id}): already indexed "
f"by {existing_by_file_id.document_type.value} as '{existing_by_file_id.title}' "
f"(saved download & ETL cost)"
)
return 0, 1, processing_errors # Skip - NO download, NO ETL!
# ======================================================
# Generate unique identifier hash # Generate unique identifier hash
document_type = DocumentType(TOOLKIT_TO_DOCUMENT_TYPE["googledrive"]) document_type = DocumentType(TOOLKIT_TO_DOCUMENT_TYPE["googledrive"])
unique_identifier_hash = generate_unique_identifier_hash( unique_identifier_hash = generate_unique_identifier_hash(
document_type, f"drive_{file_id}", search_space_id document_type, f"drive_{file_id}", search_space_id
) )
# Check if document exists # Check if document exists by unique identifier (same connector, same file)
existing_document = await check_document_by_unique_identifier( existing_document = await check_document_by_unique_identifier(
session, unique_identifier_hash session, unique_identifier_hash
) )
@ -1000,7 +1135,7 @@ async def _process_single_drive_file(
if existing_document: if existing_document:
if existing_document.content_hash == content_hash: if existing_document.content_hash == content_hash:
return 0, 1, processing_errors # Skipped return 0, 1, processing_errors # Skipped - unchanged
# Update existing document # Update existing document
user_llm = await get_user_long_context_llm(session, user_id, search_space_id) user_llm = await get_user_long_context_llm(session, user_id, search_space_id)
@ -1039,7 +1174,19 @@ async def _process_single_drive_file(
existing_document.chunks = chunks existing_document.chunks = chunks
existing_document.updated_at = get_current_timestamp() existing_document.updated_at = get_current_timestamp()
return 1, 0, processing_errors # Indexed return 1, 0, processing_errors # Indexed - updated
# Check if content_hash already exists (from any connector)
# This prevents duplicate content and avoids IntegrityError on unique constraint
existing_by_content_hash = await check_document_by_content_hash(
session, content_hash
)
if existing_by_content_hash:
logger.info(
f"Skipping file {file_name} (file_id={file_id}): identical content "
f"already indexed as '{existing_by_content_hash.title}'"
)
return 0, 1, processing_errors # Skipped - duplicate content
# Create new document # Create new document
user_llm = await get_user_long_context_llm(session, user_id, search_space_id) user_llm = await get_user_long_context_llm(session, user_id, search_space_id)
@ -1085,7 +1232,7 @@ async def _process_single_drive_file(
) )
session.add(document) session.add(document)
return 1, 0, processing_errors # Indexed return 1, 0, processing_errors # Indexed - new
async def _fetch_folder_files_recursively( async def _fetch_folder_files_recursively(

View file

@ -1,6 +1,10 @@
import asyncio
import logging import logging
from collections.abc import Awaitable, Callable
from typing import Any, TypeVar
from notion_client import AsyncClient from notion_client import AsyncClient
from notion_client.errors import APIResponseError
from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select from sqlalchemy.future import select
@ -12,6 +16,43 @@ from app.utils.oauth_security import TokenEncryption
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Type variable for generic return type
T = TypeVar("T")
# ============================================================================
# Retry Configuration (per Notion API docs)
# https://developers.notion.com/reference/request-limits
# https://developers.notion.com/reference/status-codes
# ============================================================================
MAX_RETRIES = 5
BASE_RETRY_DELAY = 1.0 # seconds
MAX_RETRY_DELAY = 60.0 # seconds (Notion's max request timeout)
# Type alias for retry callback function
# Signature: async callback(retry_reason, attempt, max_attempts, wait_seconds) -> None
# retry_reason: 'rate_limit', 'server_error', 'timeout'
# This callback can be used to update notifications during retries
RetryCallbackType = Callable[[str, int, int, float], Awaitable[None]]
# HTTP status codes that should trigger a retry
# 429: rate_limited - Use Retry-After header
# 500: internal_server_error - Unexpected error
# 502: bad_gateway - Failed upstream connection
# 503: service_unavailable - Notion unavailable or timeout
# 504: gateway_timeout - Notion timed out
RETRYABLE_STATUS_CODES = frozenset({429, 500, 502, 503, 504})
# Known unsupported block types that Notion API doesn't expose
# These will be skipped gracefully instead of failing the entire sync
UNSUPPORTED_BLOCK_TYPE_ERRORS = [
"transcription is not supported",
"ai_block is not supported",
"is not supported via the API",
]
# Known unsupported block types to check before API calls
UNSUPPORTED_BLOCK_TYPES = ["transcription", "ai_block"]
class NotionHistoryConnector: class NotionHistoryConnector:
def __init__( def __init__(
@ -32,6 +73,28 @@ class NotionHistoryConnector:
self._connector_id = connector_id self._connector_id = connector_id
self._credentials = credentials self._credentials = credentials
self._notion_client: AsyncClient | None = None self._notion_client: AsyncClient | None = None
# Track pages with skipped unsupported content (for user notifications)
self._pages_with_skipped_content: list[str] = []
# Optional callback to notify about retry progress (for user notifications)
self._on_retry_callback: RetryCallbackType | None = None
# Track if using legacy integration token (for upgrade notification)
self._using_legacy_token: bool = False
def set_retry_callback(self, callback: RetryCallbackType | None) -> None:
"""
Set a callback function to be called when API calls are retried.
This allows the indexer to receive notifications about rate limits
and other transient errors, which can be used to update user-facing
notifications.
Args:
callback: Async function with signature:
callback(retry_reason, attempt, max_attempts, wait_seconds) -> None
retry_reason: 'rate_limit', 'server_error', or 'timeout'
Set to None to disable callbacks.
"""
self._on_retry_callback = callback
async def _get_valid_token(self) -> str: async def _get_valid_token(self) -> str:
""" """
@ -58,6 +121,18 @@ class NotionHistoryConnector:
config_data = connector.config.copy() config_data = connector.config.copy()
# Check for legacy integration token format first
# (for connectors created before OAuth was implemented)
legacy_token = config_data.get("NOTION_INTEGRATION_TOKEN")
raw_access_token = config_data.get("access_token")
# Validate that we have some form of token
if not raw_access_token and not legacy_token:
raise ValueError(
"Notion integration not properly connected. "
"Please remove and re-add the Notion connector."
)
# Decrypt credentials if they are encrypted # Decrypt credentials if they are encrypted
token_encrypted = config_data.get("_token_encrypted", False) token_encrypted = config_data.get("_token_encrypted", False)
if token_encrypted and config.SECRET_KEY: if token_encrypted and config.SECRET_KEY:
@ -82,13 +157,40 @@ class NotionHistoryConnector:
f"Failed to decrypt Notion credentials for connector {self._connector_id}: {e!s}" f"Failed to decrypt Notion credentials for connector {self._connector_id}: {e!s}"
) )
raise ValueError( raise ValueError(
f"Failed to decrypt Notion credentials: {e!s}" "Notion credentials could not be decrypted. "
"Please remove and re-add the Notion connector."
) from e ) from e
# Handle legacy format: convert NOTION_INTEGRATION_TOKEN to access_token
if not config_data.get("access_token") and legacy_token:
config_data["access_token"] = legacy_token
self._using_legacy_token = True
logger.info(
f"Using legacy NOTION_INTEGRATION_TOKEN for connector {self._connector_id}"
)
# Final validation: ensure we have a valid access_token after all processing
final_token = config_data.get("access_token")
if not final_token or (
isinstance(final_token, str) and not final_token.strip()
):
raise ValueError(
"Notion access token is invalid or empty. "
"Please remove and re-add the Notion connector."
)
try: try:
self._credentials = NotionAuthCredentialsBase.from_dict(config_data) self._credentials = NotionAuthCredentialsBase.from_dict(config_data)
except KeyError as e:
raise ValueError(
f"Notion credentials are incomplete (missing {e}). "
"Please reconnect your Notion account."
) from e
except Exception as e: except Exception as e:
raise ValueError(f"Invalid Notion credentials: {e!s}") from e raise ValueError(
f"Notion credentials format error: {e!s}. "
"Please reconnect your Notion account."
) from e
# Check if token is expired and refreshable # Check if token is expired and refreshable
if self._credentials.is_expired and self._credentials.is_refreshable: if self._credentials.is_expired and self._credentials.is_refreshable:
@ -157,12 +259,161 @@ class NotionHistoryConnector:
self._notion_client = AsyncClient(auth=token) self._notion_client = AsyncClient(auth=token)
return self._notion_client return self._notion_client
async def _api_call_with_retry(
self,
api_func: Callable[..., Awaitable[T]],
*args: Any,
on_retry: RetryCallbackType | None = None,
**kwargs: Any,
) -> T:
"""
Execute Notion API call with retry logic and exponential backoff.
Handles retryable errors per Notion API documentation:
- 429 rate_limited: Uses Retry-After header value
- 500 internal_server_error: Retries with exponential backoff
- 502 bad_gateway: Retries with exponential backoff
- 503 service_unavailable: Retries with exponential backoff
- 504 gateway_timeout: Retries with exponential backoff
Args:
api_func: The async Notion API function to call
*args: Positional arguments to pass to the API function
on_retry: Optional callback to notify about retry progress.
Signature: async callback(retry_reason, attempt, max_attempts, wait_seconds)
retry_reason is one of: 'rate_limit', 'server_error', 'timeout'
**kwargs: Keyword arguments to pass to the API function
Returns:
The result from the API call
Raises:
APIResponseError: If all retries are exhausted or error is not retryable
"""
last_exception: APIResponseError | None = None
retry_delay = BASE_RETRY_DELAY
for attempt in range(MAX_RETRIES):
try:
return await api_func(*args, **kwargs)
except APIResponseError as e:
last_exception = e
# Check if this error is retryable
if e.status not in RETRYABLE_STATUS_CODES:
# Not retryable (e.g., 400, 401, 403, 404) - raise immediately
raise
# Check if we've exhausted retries
if attempt == MAX_RETRIES - 1:
logger.error(
f"Notion API call failed after {MAX_RETRIES} retries. "
f"Last error: {e.status} {e.code}"
)
raise
# Determine retry reason and wait time based on status code
if e.status == 429:
# Rate limited - use Retry-After header if available
retry_reason = "rate_limit"
retry_after = e.headers.get("Retry-After") if e.headers else None
if retry_after:
try:
wait_time = float(retry_after)
except (ValueError, TypeError):
wait_time = retry_delay
else:
wait_time = retry_delay
logger.warning(
f"Notion API rate limited (429). "
f"Waiting {wait_time}s. Attempt {attempt + 1}/{MAX_RETRIES}"
)
elif e.status == 504:
# Gateway timeout
retry_reason = "timeout"
wait_time = min(retry_delay, MAX_RETRY_DELAY)
logger.warning(
f"Notion API timeout ({e.status}). "
f"Retrying in {wait_time}s. Attempt {attempt + 1}/{MAX_RETRIES}"
)
else:
# Server error (500/502/503) - use exponential backoff
retry_reason = "server_error"
wait_time = min(retry_delay, MAX_RETRY_DELAY)
logger.warning(
f"Notion API error {e.status} ({e.code}). "
f"Retrying in {wait_time}s. Attempt {attempt + 1}/{MAX_RETRIES}"
)
# Notify about retry via callback (for user notifications)
# Call before sleeping so user sees the message while we wait
if on_retry:
try:
await on_retry(
retry_reason,
attempt + 1, # 1-based for display
MAX_RETRIES,
wait_time,
)
except Exception as callback_error:
# Don't let callback errors break the retry logic
logger.warning(f"Retry callback failed: {callback_error}")
# Wait before retrying
await asyncio.sleep(wait_time)
# Exponential backoff for next attempt
retry_delay = min(retry_delay * 2, MAX_RETRY_DELAY)
# This should not be reached, but just in case
if last_exception:
raise last_exception
raise RuntimeError("Unexpected state in retry logic")
async def close(self): async def close(self):
"""Close the async client connection.""" """Close the async client connection."""
if self._notion_client: if self._notion_client:
await self._notion_client.aclose() await self._notion_client.aclose()
self._notion_client = None self._notion_client = None
def get_pages_with_skipped_content(self) -> list[str]:
"""
Get list of page titles that had unsupported content skipped.
Returns:
List of page titles with skipped content
"""
return self._pages_with_skipped_content
def get_skipped_content_count(self) -> int:
"""
Get count of pages that had unsupported content skipped.
Returns:
Number of pages with skipped content
"""
return len(self._pages_with_skipped_content)
def is_using_legacy_token(self) -> bool:
"""
Check if connector is using legacy integration token format.
Returns:
True if using legacy NOTION_INTEGRATION_TOKEN, False if using OAuth
"""
return self._using_legacy_token
def _record_skipped_content(self, page_title: str):
"""
Record that a page had unsupported content skipped.
Args:
page_title: Title of the page with skipped content
"""
if page_title not in self._pages_with_skipped_content:
self._pages_with_skipped_content.append(page_title)
async def __aenter__(self): async def __aenter__(self):
"""Async context manager entry.""" """Async context manager entry."""
return self return self
@ -186,7 +437,7 @@ class NotionHistoryConnector:
# Build the filter for the search # Build the filter for the search
# Note: Notion API requires specific filter structure # Note: Notion API requires specific filter structure
search_params = {} search_params: dict[str, Any] = {}
# Filter for pages only (not databases) # Filter for pages only (not databases)
search_params["filter"] = {"value": "page", "property": "object"} search_params["filter"] = {"value": "page", "property": "object"}
@ -214,29 +465,53 @@ class NotionHistoryConnector:
cursor = None cursor = None
while has_more: while has_more:
if cursor: try:
search_params["start_cursor"] = cursor if cursor:
search_params["start_cursor"] = cursor
search_results = await notion.search(**search_params) # Use retry wrapper for search API call
search_results = await self._api_call_with_retry(
notion.search, on_retry=self._on_retry_callback, **search_params
)
pages.extend(search_results["results"]) pages.extend(search_results["results"])
has_more = search_results.get("has_more", False) has_more = search_results.get("has_more", False)
if has_more: if has_more:
cursor = search_results.get("next_cursor") cursor = search_results.get("next_cursor")
except APIResponseError as e:
error_message = str(e)
# Handle invalid cursor - stop pagination gracefully
if "start_cursor provided is invalid" in error_message:
logger.warning(
f"Invalid pagination cursor encountered. "
f"Continuing with {len(pages)} pages already fetched."
)
has_more = False
continue
# Re-raise other errors
raise
all_page_data = [] all_page_data = []
for page in pages: for page in pages:
page_id = page["id"] page_id = page["id"]
page_title = self.get_page_title(page)
# Get detailed page information # Get detailed page information (pass title for skip tracking)
page_content = await self.get_page_content(page_id) page_content, had_skipped_content = await self.get_page_content(
page_id, page_title
)
# Record if this page had skipped content
if had_skipped_content:
self._record_skipped_content(page_title)
all_page_data.append( all_page_data.append(
{ {
"page_id": page_id, "page_id": page_id,
"title": self.get_page_title(page), "title": page_title,
"content": page_content, "content": page_content,
} }
) )
@ -265,46 +540,91 @@ class NotionHistoryConnector:
# If no title found, return the page ID as fallback # If no title found, return the page ID as fallback
return f"Untitled page ({page['id']})" return f"Untitled page ({page['id']})"
async def get_page_content(self, page_id): async def get_page_content(
self, page_id: str, page_title: str | None = None
) -> tuple[list, bool]:
""" """
Fetches the content (blocks) of a specific page. Fetches the content (blocks) of a specific page.
Args: Args:
page_id (str): The ID of the page to fetch page_id (str): The ID of the page to fetch
page_title (str, optional): Title of the page (for logging)
Returns: Returns:
list: List of processed blocks from the page tuple: (List of processed blocks, bool indicating if content was skipped)
""" """
notion = await self._get_client() notion = await self._get_client()
blocks = [] blocks = []
has_more = True has_more = True
cursor = None cursor = None
skipped_blocks_count = 0
had_skipped_content = False
# Paginate through all blocks # Paginate through all blocks
while has_more: while has_more:
if cursor: try:
response = await notion.blocks.children.list( # Use retry wrapper for blocks.children.list API call
block_id=page_id, start_cursor=cursor if cursor:
) response = await self._api_call_with_retry(
else: notion.blocks.children.list,
response = await notion.blocks.children.list(block_id=page_id) on_retry=self._on_retry_callback,
block_id=page_id,
start_cursor=cursor,
)
else:
response = await self._api_call_with_retry(
notion.blocks.children.list,
on_retry=self._on_retry_callback,
block_id=page_id,
)
blocks.extend(response["results"]) blocks.extend(response["results"])
has_more = response["has_more"] has_more = response["has_more"]
if has_more: if has_more:
cursor = response["next_cursor"] cursor = response["next_cursor"]
except APIResponseError as e:
error_message = str(e)
# Check if this is an unsupported block type error
if any(err in error_message for err in UNSUPPORTED_BLOCK_TYPE_ERRORS):
logger.warning(
f"Skipping page blocks due to unsupported block type in page {page_id}: {error_message}"
)
skipped_blocks_count += 1
had_skipped_content = True
# If we haven't fetched any blocks yet, return empty
# If we have some blocks, continue with what we have
has_more = False
continue
elif "Could not find block" in error_message:
logger.warning(
f"Block not found in page {page_id}, continuing with available blocks: {error_message}"
)
has_more = False
continue
# Re-raise other API errors (after retry exhaustion)
raise
if skipped_blocks_count > 0:
logger.info(
f"Page {page_id}: Skipped {skipped_blocks_count} unsupported block sections, "
f"successfully processed {len(blocks)} blocks"
)
# Process nested blocks recursively # Process nested blocks recursively
processed_blocks = [] processed_blocks = []
for block in blocks: for block in blocks:
processed_block = await self.process_block(block) processed_block, block_had_skips = await self.process_block(block)
processed_blocks.append(processed_block) if processed_block: # Only add if block was processed successfully
processed_blocks.append(processed_block)
if block_had_skips:
had_skipped_content = True
return processed_blocks return processed_blocks, had_skipped_content
async def process_block(self, block): async def process_block(self, block) -> tuple[dict | None, bool]:
""" """
Processes a block and recursively fetches any child blocks. Processes a block and recursively fetches any child blocks.
@ -312,12 +632,28 @@ class NotionHistoryConnector:
block (dict): The block to process block (dict): The block to process
Returns: Returns:
dict: Processed block with content and children tuple: (Processed block dict or None, bool indicating if content was skipped)
""" """
notion = await self._get_client() notion = await self._get_client()
block_id = block["id"] block_id = block["id"]
block_type = block["type"] block_type = block["type"]
had_skipped_content = False
# Check if this is a known unsupported block type before processing
if block_type in UNSUPPORTED_BLOCK_TYPES:
logger.debug(
f"Skipping unsupported block type: {block_type} (block_id: {block_id})"
)
return (
{
"id": block_id,
"type": block_type,
"content": f"[{block_type} block - not supported by Notion API]",
"children": [],
},
True, # Content was skipped
)
# Extract block content based on its type # Extract block content based on its type
content = self.extract_block_content(block) content = self.extract_block_content(block)
@ -327,17 +663,48 @@ class NotionHistoryConnector:
child_blocks = [] child_blocks = []
if has_children: if has_children:
# Fetch and process child blocks try:
children_response = await notion.blocks.children.list(block_id=block_id) # Use retry wrapper for blocks.children.list API call
for child_block in children_response["results"]: children_response = await self._api_call_with_retry(
child_blocks.append(await self.process_block(child_block)) notion.blocks.children.list,
on_retry=self._on_retry_callback,
block_id=block_id,
)
for child_block in children_response["results"]:
processed_child, child_had_skips = await self.process_block(
child_block
)
if processed_child:
child_blocks.append(processed_child)
if child_had_skips:
had_skipped_content = True
except APIResponseError as e:
error_message = str(e)
# Check if this is an unsupported block type error
if any(err in error_message for err in UNSUPPORTED_BLOCK_TYPE_ERRORS):
logger.warning(
f"Skipping children of block {block_id} due to unsupported block type: {error_message}"
)
had_skipped_content = True
# Continue without children instead of failing
elif "Could not find block" in error_message:
logger.warning(
f"Block {block_id} children not accessible, skipping: {error_message}"
)
# Continue without children
else:
# Re-raise other API errors (after retry exhaustion)
raise
return { return (
"id": block_id, {
"type": block_type, "id": block_id,
"content": content, "type": block_type,
"children": child_blocks, "content": content,
} "children": child_blocks,
},
had_skipped_content,
)
def extract_block_content(self, block): def extract_block_content(self, block):
""" """

View file

@ -93,6 +93,13 @@ class SearchSourceConnectorType(str, Enum):
COMPOSIO_GOOGLE_CALENDAR_CONNECTOR = "COMPOSIO_GOOGLE_CALENDAR_CONNECTOR" COMPOSIO_GOOGLE_CALENDAR_CONNECTOR = "COMPOSIO_GOOGLE_CALENDAR_CONNECTOR"
class PodcastStatus(str, Enum):
PENDING = "pending"
GENERATING = "generating"
READY = "ready"
FAILED = "failed"
class LiteLLMProvider(str, Enum): class LiteLLMProvider(str, Enum):
""" """
Enum for LLM providers supported by LiteLLM. Enum for LLM providers supported by LiteLLM.
@ -156,6 +163,7 @@ class IncentiveTaskType(str, Enum):
""" """
GITHUB_STAR = "GITHUB_STAR" GITHUB_STAR = "GITHUB_STAR"
REDDIT_FOLLOW = "REDDIT_FOLLOW"
# Future tasks can be added here: # Future tasks can be added here:
# GITHUB_ISSUE = "GITHUB_ISSUE" # GITHUB_ISSUE = "GITHUB_ISSUE"
# SOCIAL_SHARE = "SOCIAL_SHARE" # SOCIAL_SHARE = "SOCIAL_SHARE"
@ -171,6 +179,12 @@ INCENTIVE_TASKS_CONFIG = {
"pages_reward": 100, "pages_reward": 100,
"action_url": "https://github.com/MODSetter/SurfSense", "action_url": "https://github.com/MODSetter/SurfSense",
}, },
IncentiveTaskType.REDDIT_FOLLOW: {
"title": "Join our Subreddit",
"description": "Join the SurfSense community on Reddit",
"pages_reward": 100,
"action_url": "https://www.reddit.com/r/SurfSense/",
},
# Future tasks can be configured here: # Future tasks can be configured here:
# IncentiveTaskType.GITHUB_ISSUE: { # IncentiveTaskType.GITHUB_ISSUE: {
# "title": "Create an issue", # "title": "Create an issue",
@ -397,6 +411,47 @@ class NewChatThread(BaseModel, TimestampMixin):
index=True, index=True,
) )
# Public sharing - cryptographic token for public URL access
public_share_token = Column(
String(64),
nullable=True,
unique=True,
index=True,
)
# Whether public sharing is currently enabled for this thread
public_share_enabled = Column(
Boolean,
nullable=False,
default=False,
server_default="false",
)
# Clone tracking - for audit and history bootstrap
cloned_from_thread_id = Column(
Integer,
ForeignKey("new_chat_threads.id", ondelete="SET NULL"),
nullable=True,
index=True,
)
cloned_at = Column(
TIMESTAMP(timezone=True),
nullable=True,
)
# Flag to bootstrap LangGraph checkpointer with DB messages on first message
needs_history_bootstrap = Column(
Boolean,
nullable=False,
default=False,
server_default="false",
)
# Flag indicating content clone is pending (two-phase clone)
clone_pending = Column(
Boolean,
nullable=False,
default=False,
server_default="false",
)
# Relationships # Relationships
search_space = relationship("SearchSpace", back_populates="new_chat_threads") search_space = relationship("SearchSpace", back_populates="new_chat_threads")
created_by = relationship("User", back_populates="new_chat_threads") created_by = relationship("User", back_populates="new_chat_threads")
@ -709,14 +764,34 @@ class Podcast(BaseModel, TimestampMixin):
__tablename__ = "podcasts" __tablename__ = "podcasts"
title = Column(String(500), nullable=False) title = Column(String(500), nullable=False)
podcast_transcript = Column(JSONB, nullable=True) # List of transcript entries podcast_transcript = Column(JSONB, nullable=True)
file_location = Column(Text, nullable=True) # Path to the audio file file_location = Column(Text, nullable=True)
status = Column(
SQLAlchemyEnum(
PodcastStatus,
name="podcast_status",
create_type=False,
values_callable=lambda x: [e.value for e in x],
),
nullable=False,
default=PodcastStatus.READY,
server_default="ready",
index=True,
)
search_space_id = Column( search_space_id = Column(
Integer, ForeignKey("searchspaces.id", ondelete="CASCADE"), nullable=False Integer, ForeignKey("searchspaces.id", ondelete="CASCADE"), nullable=False
) )
search_space = relationship("SearchSpace", back_populates="podcasts") search_space = relationship("SearchSpace", back_populates="podcasts")
thread_id = Column(
Integer,
ForeignKey("new_chat_threads.id", ondelete="SET NULL"),
nullable=True,
index=True,
)
thread = relationship("NewChatThread")
class SearchSpace(BaseModel, TimestampMixin): class SearchSpace(BaseModel, TimestampMixin):
__tablename__ = "searchspaces" __tablename__ = "searchspaces"

View file

@ -31,6 +31,7 @@ from .notes_routes import router as notes_router
from .notifications_routes import router as notifications_router from .notifications_routes import router as notifications_router
from .notion_add_connector_route import router as notion_add_connector_router from .notion_add_connector_route import router as notion_add_connector_router
from .podcasts_routes import router as podcasts_router from .podcasts_routes import router as podcasts_router
from .public_chat_routes import router as public_chat_router
from .rbac_routes import router as rbac_router from .rbac_routes import router as rbac_router
from .search_source_connectors_routes import router as search_source_connectors_router from .search_source_connectors_routes import router as search_source_connectors_router
from .search_spaces_routes import router as search_spaces_router from .search_spaces_routes import router as search_spaces_router
@ -68,4 +69,5 @@ router.include_router(circleback_webhook_router) # Circleback meeting webhooks
router.include_router(surfsense_docs_router) # Surfsense documentation for citations router.include_router(surfsense_docs_router) # Surfsense documentation for citations
router.include_router(notifications_router) # Notifications with Electric SQL sync router.include_router(notifications_router) # Notifications with Electric SQL sync
router.include_router(composio_router) # Composio OAuth and toolkit management router.include_router(composio_router) # Composio OAuth and toolkit management
router.include_router(public_chat_router) # Public chat sharing and cloning
router.include_router(incentive_tasks_router) # Incentive tasks for earning free pages router.include_router(incentive_tasks_router) # Incentive tasks for earning free pages

View file

@ -37,6 +37,7 @@ from app.db import (
get_async_session, get_async_session,
) )
from app.schemas.new_chat import ( from app.schemas.new_chat import (
CompleteCloneResponse,
NewChatMessageAppend, NewChatMessageAppend,
NewChatMessageRead, NewChatMessageRead,
NewChatRequest, NewChatRequest,
@ -45,11 +46,14 @@ from app.schemas.new_chat import (
NewChatThreadUpdate, NewChatThreadUpdate,
NewChatThreadVisibilityUpdate, NewChatThreadVisibilityUpdate,
NewChatThreadWithMessages, NewChatThreadWithMessages,
PublicShareToggleRequest,
PublicShareToggleResponse,
RegenerateRequest, RegenerateRequest,
ThreadHistoryLoadResponse, ThreadHistoryLoadResponse,
ThreadListItem, ThreadListItem,
ThreadListResponse, ThreadListResponse,
) )
from app.services.public_chat_service import toggle_public_share
from app.tasks.chat.stream_new_chat import stream_new_chat from app.tasks.chat.stream_new_chat import stream_new_chat
from app.users import current_active_user from app.users import current_active_user
from app.utils.rbac import check_permission from app.utils.rbac import check_permission
@ -215,6 +219,7 @@ async def list_threads(
visibility=thread.visibility, visibility=thread.visibility,
created_by_id=thread.created_by_id, created_by_id=thread.created_by_id,
is_own_thread=is_own_thread, is_own_thread=is_own_thread,
public_share_enabled=thread.public_share_enabled,
created_at=thread.created_at, created_at=thread.created_at,
updated_at=thread.updated_at, updated_at=thread.updated_at,
) )
@ -316,6 +321,7 @@ async def search_threads(
thread.created_by_id == user.id thread.created_by_id == user.id
or (thread.created_by_id is None and is_search_space_owner) or (thread.created_by_id is None and is_search_space_owner)
), ),
public_share_enabled=thread.public_share_enabled,
created_at=thread.created_at, created_at=thread.created_at,
updated_at=thread.updated_at, updated_at=thread.updated_at,
) )
@ -664,6 +670,66 @@ async def delete_thread(
) from None ) from None
@router.post(
"/threads/{thread_id}/complete-clone", response_model=CompleteCloneResponse
)
async def complete_clone(
thread_id: int,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Complete the cloning process for a thread.
Copies messages and podcasts from the source thread.
Sets clone_pending=False and needs_history_bootstrap=True when done.
Requires authentication and ownership of the thread.
"""
from app.services.public_chat_service import complete_clone_content
try:
result = await session.execute(
select(NewChatThread).filter(NewChatThread.id == thread_id)
)
thread = result.scalars().first()
if not thread:
raise HTTPException(status_code=404, detail="Thread not found")
if thread.created_by_id != user.id:
raise HTTPException(status_code=403, detail="Not authorized")
if not thread.clone_pending:
raise HTTPException(status_code=400, detail="Clone already completed")
if not thread.cloned_from_thread_id:
raise HTTPException(
status_code=400, detail="No source thread to clone from"
)
message_count = await complete_clone_content(
session=session,
target_thread=thread,
source_thread_id=thread.cloned_from_thread_id,
target_search_space_id=thread.search_space_id,
)
return CompleteCloneResponse(
status="success",
message_count=message_count,
)
except HTTPException:
raise
except Exception as e:
await session.rollback()
raise HTTPException(
status_code=500,
detail=f"An unexpected error occurred while completing clone: {e!s}",
) from None
@router.patch("/threads/{thread_id}/visibility", response_model=NewChatThreadRead) @router.patch("/threads/{thread_id}/visibility", response_model=NewChatThreadRead)
async def update_thread_visibility( async def update_thread_visibility(
thread_id: int, thread_id: int,
@ -729,6 +795,32 @@ async def update_thread_visibility(
) from None ) from None
@router.patch(
"/threads/{thread_id}/public-share", response_model=PublicShareToggleResponse
)
async def update_thread_public_share(
thread_id: int,
request: Request,
toggle_request: PublicShareToggleRequest,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Enable or disable public sharing for a thread.
Only the creator of the thread can manage public sharing.
When enabled, returns a public URL that anyone can use to view the chat.
"""
base_url = str(request.base_url).rstrip("/")
return await toggle_public_share(
session=session,
thread_id=thread_id,
enabled=toggle_request.enabled,
user=user,
base_url=base_url,
)
# ============================================================================= # =============================================================================
# Message Endpoints # Message Endpoints
# ============================================================================= # =============================================================================
@ -996,6 +1088,7 @@ async def handle_new_chat(
attachments=request.attachments, attachments=request.attachments,
mentioned_document_ids=request.mentioned_document_ids, mentioned_document_ids=request.mentioned_document_ids,
mentioned_surfsense_doc_ids=request.mentioned_surfsense_doc_ids, mentioned_surfsense_doc_ids=request.mentioned_surfsense_doc_ids,
needs_history_bootstrap=thread.needs_history_bootstrap,
), ),
media_type="text/event-stream", media_type="text/event-stream",
headers={ headers={
@ -1223,6 +1316,7 @@ async def regenerate_response(
mentioned_document_ids=request.mentioned_document_ids, mentioned_document_ids=request.mentioned_document_ids,
mentioned_surfsense_doc_ids=request.mentioned_surfsense_doc_ids, mentioned_surfsense_doc_ids=request.mentioned_surfsense_doc_ids,
checkpoint_id=target_checkpoint_id, checkpoint_id=target_checkpoint_id,
needs_history_bootstrap=thread.needs_history_bootstrap,
): ):
yield chunk yield chunk
# If we get here, streaming completed successfully # If we get here, streaming completed successfully

View file

@ -6,6 +6,7 @@ For older items (beyond the sync window), use the list endpoint.
""" """
from datetime import UTC, datetime, timedelta from datetime import UTC, datetime, timedelta
from typing import Literal
from fastapi import APIRouter, Depends, HTTPException, Query, status from fastapi import APIRouter, Depends, HTTPException, Query, status
from pydantic import BaseModel from pydantic import BaseModel
@ -20,6 +21,9 @@ router = APIRouter(prefix="/notifications", tags=["notifications"])
# Must match frontend SYNC_WINDOW_DAYS in use-inbox.ts # Must match frontend SYNC_WINDOW_DAYS in use-inbox.ts
SYNC_WINDOW_DAYS = 14 SYNC_WINDOW_DAYS = 14
# Valid notification types - must match frontend InboxItemTypeEnum
NotificationType = Literal["connector_indexing", "document_processing", "new_mention"]
class NotificationResponse(BaseModel): class NotificationResponse(BaseModel):
"""Response model for a single notification.""" """Response model for a single notification."""
@ -73,6 +77,9 @@ class UnreadCountResponse(BaseModel):
@router.get("/unread-count", response_model=UnreadCountResponse) @router.get("/unread-count", response_model=UnreadCountResponse)
async def get_unread_count( async def get_unread_count(
search_space_id: int | None = Query(None, description="Filter by search space ID"), search_space_id: int | None = Query(None, description="Filter by search space ID"),
type_filter: NotificationType | None = Query(
None, alias="type", description="Filter by notification type"
),
user: User = Depends(current_active_user), user: User = Depends(current_active_user),
session: AsyncSession = Depends(get_async_session), session: AsyncSession = Depends(get_async_session),
) -> UnreadCountResponse: ) -> UnreadCountResponse:
@ -103,6 +110,10 @@ async def get_unread_count(
| (Notification.search_space_id.is_(None)) | (Notification.search_space_id.is_(None))
) )
# Filter by notification type if provided
if type_filter:
base_filter.append(Notification.type == type_filter)
# Total unread count (all time) # Total unread count (all time)
total_query = select(func.count(Notification.id)).where(*base_filter) total_query = select(func.count(Notification.id)).where(*base_filter)
total_result = await session.execute(total_query) total_result = await session.execute(total_query)
@ -125,7 +136,7 @@ async def get_unread_count(
@router.get("", response_model=NotificationListResponse) @router.get("", response_model=NotificationListResponse)
async def list_notifications( async def list_notifications(
search_space_id: int | None = Query(None, description="Filter by search space ID"), search_space_id: int | None = Query(None, description="Filter by search space ID"),
type_filter: str | None = Query( type_filter: NotificationType | None = Query(
None, alias="type", description="Filter by notification type" None, alias="type", description="Filter by notification type"
), ),
before_date: str | None = Query( before_date: str | None = Query(

View file

@ -1,21 +1,19 @@
""" """
Podcast routes for task status polling and audio retrieval. Podcast routes for CRUD operations and audio streaming.
These routes support the podcast generation feature in new-chat. These routes support the podcast generation feature in new-chat.
Note: The old Chat-based podcast generation has been removed. Frontend polls GET /podcasts/{podcast_id} to check status field.
""" """
import os import os
from pathlib import Path from pathlib import Path
from celery.result import AsyncResult
from fastapi import APIRouter, Depends, HTTPException from fastapi import APIRouter, Depends, HTTPException
from fastapi.responses import StreamingResponse from fastapi.responses import StreamingResponse
from sqlalchemy import select from sqlalchemy import select
from sqlalchemy.exc import SQLAlchemyError from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.ext.asyncio import AsyncSession
from app.celery_app import celery_app
from app.db import ( from app.db import (
Permission, Permission,
Podcast, Podcast,
@ -25,7 +23,7 @@ from app.db import (
get_async_session, get_async_session,
) )
from app.schemas import PodcastRead from app.schemas import PodcastRead
from app.users import current_active_user from app.users import current_active_user, current_optional_user
from app.utils.rbac import check_permission from app.utils.rbac import check_permission
router = APIRouter() router = APIRouter()
@ -84,12 +82,17 @@ async def read_podcasts(
async def read_podcast( async def read_podcast(
podcast_id: int, podcast_id: int,
session: AsyncSession = Depends(get_async_session), session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user), user: User | None = Depends(current_optional_user),
): ):
""" """
Get a specific podcast by ID. Get a specific podcast by ID.
Requires PODCASTS_READ permission for the search space.
Access is allowed if:
- User is authenticated with PODCASTS_READ permission, OR
- Podcast belongs to a publicly shared thread
""" """
from app.services.public_chat_service import is_podcast_publicly_accessible
try: try:
result = await session.execute(select(Podcast).filter(Podcast.id == podcast_id)) result = await session.execute(select(Podcast).filter(Podcast.id == podcast_id))
podcast = result.scalars().first() podcast = result.scalars().first()
@ -100,16 +103,20 @@ async def read_podcast(
detail="Podcast not found", detail="Podcast not found",
) )
# Check permission for the search space is_public = await is_podcast_publicly_accessible(session, podcast_id)
await check_permission(
session,
user,
podcast.search_space_id,
Permission.PODCASTS_READ.value,
"You don't have permission to read podcasts in this search space",
)
return podcast if not is_public:
if not user:
raise HTTPException(status_code=401, detail="Authentication required")
await check_permission(
session,
user,
podcast.search_space_id,
Permission.PODCASTS_READ.value,
"You don't have permission to read podcasts in this search space",
)
return PodcastRead.from_orm_with_entries(podcast)
except HTTPException as he: except HTTPException as he:
raise he raise he
except SQLAlchemyError: except SQLAlchemyError:
@ -161,46 +168,49 @@ async def delete_podcast(
async def stream_podcast( async def stream_podcast(
podcast_id: int, podcast_id: int,
session: AsyncSession = Depends(get_async_session), session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user), user: User | None = Depends(current_optional_user),
): ):
""" """
Stream a podcast audio file. Stream a podcast audio file.
Requires PODCASTS_READ permission for the search space.
Access is allowed if:
- User is authenticated with PODCASTS_READ permission, OR
- Podcast belongs to a publicly shared thread
Note: Both /stream and /audio endpoints are supported for compatibility. Note: Both /stream and /audio endpoints are supported for compatibility.
""" """
from app.services.public_chat_service import is_podcast_publicly_accessible
try: try:
result = await session.execute(select(Podcast).filter(Podcast.id == podcast_id)) result = await session.execute(select(Podcast).filter(Podcast.id == podcast_id))
podcast = result.scalars().first() podcast = result.scalars().first()
if not podcast: if not podcast:
raise HTTPException( raise HTTPException(status_code=404, detail="Podcast not found")
status_code=404,
detail="Podcast not found", is_public = await is_podcast_publicly_accessible(session, podcast_id)
if not is_public:
if not user:
raise HTTPException(status_code=401, detail="Authentication required")
await check_permission(
session,
user,
podcast.search_space_id,
Permission.PODCASTS_READ.value,
"You don't have permission to access podcasts in this search space",
) )
# Check permission for the search space
await check_permission(
session,
user,
podcast.search_space_id,
Permission.PODCASTS_READ.value,
"You don't have permission to access podcasts in this search space",
)
# Get the file path
file_path = podcast.file_location file_path = podcast.file_location
# Check if the file exists
if not file_path or not os.path.isfile(file_path): if not file_path or not os.path.isfile(file_path):
raise HTTPException(status_code=404, detail="Podcast audio file not found") raise HTTPException(status_code=404, detail="Podcast audio file not found")
# Define a generator function to stream the file
def iterfile(): def iterfile():
with open(file_path, mode="rb") as file_like: with open(file_path, mode="rb") as file_like:
yield from file_like yield from file_like
# Return a streaming response with appropriate headers
return StreamingResponse( return StreamingResponse(
iterfile(), iterfile(),
media_type="audio/mpeg", media_type="audio/mpeg",
@ -216,62 +226,3 @@ async def stream_podcast(
raise HTTPException( raise HTTPException(
status_code=500, detail=f"Error streaming podcast: {e!s}" status_code=500, detail=f"Error streaming podcast: {e!s}"
) from e ) from e
@router.get("/podcasts/task/{task_id}/status")
async def get_podcast_task_status(
task_id: str,
user: User = Depends(current_active_user),
):
"""
Get the status of a podcast generation task.
Used by new-chat frontend to poll for completion.
Returns:
- status: "processing" | "success" | "error"
- podcast_id: (only if status == "success")
- title: (only if status == "success")
- error: (only if status == "error")
"""
try:
result = AsyncResult(task_id, app=celery_app)
if result.ready():
# Task completed
if result.successful():
task_result = result.result
if isinstance(task_result, dict):
if task_result.get("status") == "success":
return {
"status": "success",
"podcast_id": task_result.get("podcast_id"),
"title": task_result.get("title"),
"transcript_entries": task_result.get("transcript_entries"),
}
else:
return {
"status": "error",
"error": task_result.get("error", "Unknown error"),
}
else:
return {
"status": "error",
"error": "Unexpected task result format",
}
else:
# Task failed
return {
"status": "error",
"error": str(result.result) if result.result else "Task failed",
}
else:
# Task still processing
return {
"status": "processing",
"state": result.state,
}
except Exception as e:
raise HTTPException(
status_code=500, detail=f"Error checking task status: {e!s}"
) from e

View file

@ -0,0 +1,84 @@
"""
Routes for public chat access (unauthenticated and mixed-auth endpoints).
"""
from datetime import UTC, datetime
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.ext.asyncio import AsyncSession
from app.db import ChatVisibility, NewChatThread, User, get_async_session
from app.schemas.new_chat import (
CloneInitResponse,
PublicChatResponse,
)
from app.services.public_chat_service import (
get_public_chat,
get_thread_by_share_token,
get_user_default_search_space,
)
from app.users import current_active_user
router = APIRouter(prefix="/public", tags=["public"])
@router.get("/{share_token}", response_model=PublicChatResponse)
async def read_public_chat(
share_token: str,
session: AsyncSession = Depends(get_async_session),
):
"""
Get a public chat by share token.
No authentication required.
Returns sanitized content (citations stripped).
"""
return await get_public_chat(session, share_token)
@router.post("/{share_token}/clone", response_model=CloneInitResponse)
async def clone_public_chat_endpoint(
share_token: str,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Initialize cloning a public chat to the user's account.
Creates an empty thread with clone_pending=True.
Frontend should redirect to the new thread and call /complete-clone.
Requires authentication.
"""
source_thread = await get_thread_by_share_token(session, share_token)
if not source_thread:
raise HTTPException(
status_code=404, detail="Chat not found or no longer public"
)
target_search_space_id = await get_user_default_search_space(session, user.id)
if target_search_space_id is None:
raise HTTPException(status_code=400, detail="No search space found for user")
new_thread = NewChatThread(
title=source_thread.title,
archived=False,
visibility=ChatVisibility.PRIVATE,
search_space_id=target_search_space_id,
created_by_id=user.id,
public_share_enabled=False,
cloned_from_thread_id=source_thread.id,
cloned_at=datetime.now(UTC),
clone_pending=True,
)
session.add(new_thread)
await session.commit()
await session.refresh(new_thread)
return CloneInitResponse(
thread_id=new_thread.id,
search_space_id=target_search_space_id,
share_token=share_token,
)

View file

@ -123,7 +123,9 @@ async def list_all_permissions(
for perm in Permission: for perm in Permission:
# Extract category from permission value (e.g., "documents:read" -> "documents") # Extract category from permission value (e.g., "documents:read" -> "documents")
category = perm.value.split(":")[0] if ":" in perm.value else "general" category = perm.value.split(":")[0] if ":" in perm.value else "general"
description = PERMISSION_DESCRIPTIONS.get(perm.value, f"Permission for {perm.value}") description = PERMISSION_DESCRIPTIONS.get(
perm.value, f"Permission for {perm.value}"
)
permissions.append( permissions.append(
PermissionInfo( PermissionInfo(

View file

@ -187,6 +187,7 @@ async def create_search_source_connector(
user_id=str(user.id), user_id=str(user.id),
connector_type=db_connector.connector_type, connector_type=db_connector.connector_type,
frequency_minutes=db_connector.indexing_frequency_minutes, frequency_minutes=db_connector.indexing_frequency_minutes,
connector_config=db_connector.config,
) )
if not success: if not success:
logger.warning( logger.warning(
@ -646,6 +647,7 @@ async def index_connector_content(
# Handle different connector types # Handle different connector types
response_message = "" response_message = ""
indexing_started = True
# Use UTC for consistency with last_indexed_at storage # Use UTC for consistency with last_indexed_at storage
today_str = datetime.now(UTC).strftime("%Y-%m-%d") today_str = datetime.now(UTC).strftime("%Y-%m-%d")
@ -921,14 +923,31 @@ async def index_connector_content(
elif connector.connector_type == SearchSourceConnectorType.WEBCRAWLER_CONNECTOR: elif connector.connector_type == SearchSourceConnectorType.WEBCRAWLER_CONNECTOR:
from app.tasks.celery_tasks.connector_tasks import index_crawled_urls_task from app.tasks.celery_tasks.connector_tasks import index_crawled_urls_task
from app.utils.webcrawler_utils import parse_webcrawler_urls
logger.info( # Check if URLs are configured before triggering indexing
f"Triggering web pages indexing for connector {connector_id} into search space {search_space_id} from {indexing_from} to {indexing_to}" connector_config = connector.config or {}
) urls = parse_webcrawler_urls(connector_config.get("INITIAL_URLS"))
index_crawled_urls_task.delay(
connector_id, search_space_id, str(user.id), indexing_from, indexing_to if not urls:
) # URLs are optional - skip indexing gracefully
response_message = "Web page indexing started in the background." logger.info(
f"Webcrawler connector {connector_id} has no URLs configured, skipping indexing"
)
response_message = "No URLs configured for this connector. Add URLs in the connector settings to enable indexing."
indexing_started = False
else:
logger.info(
f"Triggering web pages indexing for connector {connector_id} into search space {search_space_id} from {indexing_from} to {indexing_to}"
)
index_crawled_urls_task.delay(
connector_id,
search_space_id,
str(user.id),
indexing_from,
indexing_to,
)
response_message = "Web page indexing started in the background."
elif connector.connector_type == SearchSourceConnectorType.OBSIDIAN_CONNECTOR: elif connector.connector_type == SearchSourceConnectorType.OBSIDIAN_CONNECTOR:
from app.config import config as app_config from app.config import config as app_config
@ -1025,6 +1044,7 @@ async def index_connector_content(
return { return {
"message": response_message, "message": response_message,
"indexing_started": indexing_started,
"connector_id": connector_id, "connector_id": connector_id,
"search_space_id": search_space_id, "search_space_id": search_space_id,
"indexing_from": indexing_from, "indexing_from": indexing_from,
@ -1129,6 +1149,7 @@ async def _run_indexing_with_notifications(
end_date: str, end_date: str,
indexing_function, indexing_function,
update_timestamp_func=None, update_timestamp_func=None,
supports_retry_callback: bool = False,
): ):
""" """
Generic helper to run indexing with real-time notifications. Generic helper to run indexing with real-time notifications.
@ -1142,10 +1163,14 @@ async def _run_indexing_with_notifications(
end_date: End date for indexing end_date: End date for indexing
indexing_function: Async function that performs the indexing indexing_function: Async function that performs the indexing
update_timestamp_func: Optional function to update connector timestamp update_timestamp_func: Optional function to update connector timestamp
supports_retry_callback: Whether the indexing function supports on_retry_callback
""" """
from uuid import UUID from uuid import UUID
notification = None notification = None
# Track indexed count for retry notifications
current_indexed_count = 0
try: try:
# Get connector info for notification # Get connector info for notification
connector_result = await session.execute( connector_result = await session.execute(
@ -1179,16 +1204,54 @@ async def _run_indexing_with_notifications(
stage="fetching", stage="fetching",
) )
# Create retry callback for connectors that support it
async def on_retry_callback(
retry_reason: str, attempt: int, max_attempts: int, wait_seconds: float
) -> None:
"""Callback to update notification during API retries (rate limits, etc.)"""
nonlocal notification
if notification:
try:
await session.refresh(notification)
await NotificationService.connector_indexing.notify_retry_progress(
session=session,
notification=notification,
indexed_count=current_indexed_count,
retry_reason=retry_reason,
attempt=attempt,
max_attempts=max_attempts,
wait_seconds=wait_seconds,
)
await session.commit()
except Exception as e:
# Don't let notification errors break the indexing
logger.warning(f"Failed to update retry notification: {e}")
# Build kwargs for indexing function
indexing_kwargs = {
"session": session,
"connector_id": connector_id,
"search_space_id": search_space_id,
"user_id": user_id,
"start_date": start_date,
"end_date": end_date,
"update_last_indexed": False,
}
# Add retry callback for connectors that support it
if supports_retry_callback:
indexing_kwargs["on_retry_callback"] = on_retry_callback
# Run the indexing function # Run the indexing function
documents_processed, error_or_warning = await indexing_function( # Some indexers return (indexed, error), others return (indexed, skipped, error)
session=session, result = await indexing_function(**indexing_kwargs)
connector_id=connector_id,
search_space_id=search_space_id, # Handle both 2-tuple and 3-tuple returns for backwards compatibility
user_id=user_id, if len(result) == 3:
start_date=start_date, documents_processed, documents_skipped, error_or_warning = result
end_date=end_date, else:
update_last_indexed=False, documents_processed, error_or_warning = result
) documents_skipped = None
# Update connector timestamp if function provided and indexing was successful # Update connector timestamp if function provided and indexing was successful
if documents_processed > 0 and update_timestamp_func: if documents_processed > 0 and update_timestamp_func:
@ -1216,6 +1279,7 @@ async def _run_indexing_with_notifications(
notification=notification, notification=notification,
indexed_count=documents_processed, indexed_count=documents_processed,
error_message=error_or_warning, # Show errors even if some documents were indexed error_message=error_or_warning, # Show errors even if some documents were indexed
skipped_count=documents_skipped,
) )
await ( await (
session.commit() session.commit()
@ -1242,6 +1306,7 @@ async def _run_indexing_with_notifications(
notification=notification, notification=notification,
indexed_count=documents_processed, indexed_count=documents_processed,
error_message=error_or_warning, # Show errors even if some documents were indexed error_message=error_or_warning, # Show errors even if some documents were indexed
skipped_count=documents_skipped,
) )
await ( await (
session.commit() session.commit()
@ -1260,8 +1325,15 @@ async def _run_indexing_with_notifications(
"no " in error_or_warning_lower "no " in error_or_warning_lower
and "found" in error_or_warning_lower and "found" in error_or_warning_lower
) )
# Informational warnings - sync succeeded but some content couldn't be synced
# These are NOT errors, just notifications about API limitations or recommendations
is_info_warning = (
"couldn't be synced" in error_or_warning_lower
or "using legacy token" in error_or_warning_lower
or "(api limitation)" in error_or_warning_lower
)
if is_duplicate_warning or is_empty_result: if is_duplicate_warning or is_empty_result or is_info_warning:
# These are success cases - sync worked, just found nothing new # These are success cases - sync worked, just found nothing new
logger.info(f"Indexing completed successfully: {error_or_warning}") logger.info(f"Indexing completed successfully: {error_or_warning}")
# Still update timestamp so ElectricSQL syncs and clears "Syncing" UI # Still update timestamp so ElectricSQL syncs and clears "Syncing" UI
@ -1283,6 +1355,7 @@ async def _run_indexing_with_notifications(
indexed_count=0, indexed_count=0,
error_message=notification_message, # Pass as warning, not error error_message=notification_message, # Pass as warning, not error
is_warning=True, # Flag to indicate this is a warning, not an error is_warning=True, # Flag to indicate this is a warning, not an error
skipped_count=documents_skipped,
) )
await ( await (
session.commit() session.commit()
@ -1298,6 +1371,7 @@ async def _run_indexing_with_notifications(
notification=notification, notification=notification,
indexed_count=0, indexed_count=0,
error_message=error_or_warning, error_message=error_or_warning,
skipped_count=documents_skipped,
) )
await ( await (
session.commit() session.commit()
@ -1319,6 +1393,7 @@ async def _run_indexing_with_notifications(
notification=notification, notification=notification,
indexed_count=0, indexed_count=0,
error_message=None, # No error - sync succeeded error_message=None, # No error - sync succeeded
skipped_count=documents_skipped,
) )
await ( await (
session.commit() session.commit()
@ -1336,6 +1411,7 @@ async def _run_indexing_with_notifications(
notification=notification, notification=notification,
indexed_count=0, indexed_count=0,
error_message=str(e), error_message=str(e),
skipped_count=None, # Unknown on exception
) )
except Exception as notif_error: except Exception as notif_error:
logger.error(f"Failed to update notification: {notif_error!s}") logger.error(f"Failed to update notification: {notif_error!s}")
@ -1362,6 +1438,7 @@ async def run_notion_indexing_with_new_session(
end_date=end_date, end_date=end_date,
indexing_function=index_notion_pages, indexing_function=index_notion_pages,
update_timestamp_func=_update_connector_timestamp_by_id, update_timestamp_func=_update_connector_timestamp_by_id,
supports_retry_callback=True, # Notion connector supports retry notifications
) )
@ -1393,6 +1470,7 @@ async def run_notion_indexing(
end_date=end_date, end_date=end_date,
indexing_function=index_notion_pages, indexing_function=index_notion_pages,
update_timestamp_func=_update_connector_timestamp_by_id, update_timestamp_func=_update_connector_timestamp_by_id,
supports_retry_callback=True, # Notion connector supports retry notifications
) )

View file

@ -95,6 +95,9 @@ class NewChatThreadRead(NewChatThreadBase, IDModel):
search_space_id: int search_space_id: int
visibility: ChatVisibility visibility: ChatVisibility
created_by_id: UUID | None = None created_by_id: UUID | None = None
public_share_enabled: bool = False
public_share_token: str | None = None
clone_pending: bool = False
created_at: datetime created_at: datetime
updated_at: datetime updated_at: datetime
@ -133,7 +136,8 @@ class ThreadListItem(BaseModel):
archived: bool archived: bool
visibility: ChatVisibility visibility: ChatVisibility
created_by_id: UUID | None = None created_by_id: UUID | None = None
is_own_thread: bool = False # True if the current user created this thread is_own_thread: bool = False
public_share_enabled: bool = False
created_at: datetime = Field(alias="createdAt") created_at: datetime = Field(alias="createdAt")
updated_at: datetime = Field(alias="updatedAt") updated_at: datetime = Field(alias="updatedAt")
@ -204,3 +208,60 @@ class RegenerateRequest(BaseModel):
attachments: list[ChatAttachment] | None = None attachments: list[ChatAttachment] | None = None
mentioned_document_ids: list[int] | None = None mentioned_document_ids: list[int] | None = None
mentioned_surfsense_doc_ids: list[int] | None = None mentioned_surfsense_doc_ids: list[int] | None = None
# =============================================================================
# Public Sharing Schemas
# =============================================================================
class PublicShareToggleRequest(BaseModel):
"""Request to enable/disable public sharing for a thread."""
enabled: bool
class PublicShareToggleResponse(BaseModel):
"""Response after toggling public sharing."""
enabled: bool
public_url: str | None = None
share_token: str | None = None
# =============================================================================
# Public Chat View Schemas (for unauthenticated access)
# =============================================================================
class PublicAuthor(BaseModel):
display_name: str | None = None
avatar_url: str | None = None
class PublicChatMessage(BaseModel):
role: NewChatMessageRole
content: Any
author: PublicAuthor | None = None
created_at: datetime
class PublicChatThread(BaseModel):
title: str
created_at: datetime
class PublicChatResponse(BaseModel):
thread: PublicChatThread
messages: list[PublicChatMessage]
class CloneInitResponse(BaseModel):
thread_id: int
search_space_id: int
share_token: str
class CompleteCloneResponse(BaseModel):
status: str
message_count: int

View file

@ -1,11 +1,19 @@
"""Podcast schemas for API responses.""" """Podcast schemas for API responses."""
from datetime import datetime from datetime import datetime
from enum import Enum
from typing import Any from typing import Any
from pydantic import BaseModel from pydantic import BaseModel
class PodcastStatusEnum(str, Enum):
PENDING = "pending"
GENERATING = "generating"
READY = "ready"
FAILED = "failed"
class PodcastBase(BaseModel): class PodcastBase(BaseModel):
"""Base podcast schema.""" """Base podcast schema."""
@ -33,7 +41,26 @@ class PodcastRead(PodcastBase):
"""Schema for reading a podcast.""" """Schema for reading a podcast."""
id: int id: int
status: PodcastStatusEnum = PodcastStatusEnum.READY
created_at: datetime created_at: datetime
transcript_entries: int | None = None
class Config: class Config:
from_attributes = True from_attributes = True
@classmethod
def from_orm_with_entries(cls, obj):
"""Create PodcastRead with transcript_entries computed."""
data = {
"id": obj.id,
"title": obj.title,
"podcast_transcript": obj.podcast_transcript,
"file_location": obj.file_location,
"search_space_id": obj.search_space_id,
"status": obj.status,
"created_at": obj.created_at,
"transcript_entries": len(obj.podcast_transcript)
if obj.podcast_transcript
else None,
}
return cls(**data)

View file

@ -329,6 +329,90 @@ class ConnectorIndexingNotificationHandler(BaseNotificationHandler):
metadata_updates=metadata_updates, metadata_updates=metadata_updates,
) )
async def notify_retry_progress(
self,
session: AsyncSession,
notification: Notification,
indexed_count: int,
retry_reason: str,
attempt: int,
max_attempts: int,
wait_seconds: float | None = None,
service_name: str | None = None,
) -> Notification:
"""
Update notification when a connector is retrying due to rate limits or errors.
This method provides user-friendly feedback when external service limitations
(rate limits, temporary outages) cause delays. Users see that the delay is
not our fault and the sync is still progressing.
This method can be used by ANY connector (Notion, Slack, Airtable, etc.)
when they hit rate limits or transient errors.
Args:
session: Database session
notification: Notification to update
indexed_count: Number of items indexed so far
retry_reason: Reason for retry ('rate_limit', 'server_error', 'timeout')
attempt: Current retry attempt number (1-based)
max_attempts: Maximum number of retry attempts
wait_seconds: Seconds to wait before retry (optional, for display)
service_name: Name of the external service (e.g., 'Notion', 'Slack')
If not provided, extracts from notification metadata
Returns:
Updated notification
"""
# Get service name from notification if not provided
if not service_name:
service_name = notification.notification_metadata.get(
"connector_name", "Service"
)
# Extract just the service name if it's "Notion - My Workspace"
if " - " in service_name:
service_name = service_name.split(" - ")[0]
# User-friendly messages for different retry reasons
# These make it clear the delay is due to the external service, not SurfSense
retry_messages = {
"rate_limit": f"{service_name} rate limit reached",
"server_error": f"{service_name} is slow to respond",
"timeout": f"{service_name} took too long",
"temporary_error": f"{service_name} temporarily unavailable",
}
base_message = retry_messages.get(retry_reason, f"Waiting for {service_name}")
# Add wait time and progress info
if wait_seconds and wait_seconds > 5:
# Only show wait time if it's significant
message = f"{base_message}. Retrying in {int(wait_seconds)}s..."
else:
message = f"{base_message}. Retrying..."
# Add progress count if we have any
if indexed_count > 0:
item_text = "item" if indexed_count == 1 else "items"
message = f"{message} ({indexed_count} {item_text} synced so far)"
metadata_updates = {
"indexed_count": indexed_count,
"sync_stage": "waiting_retry",
"retry_attempt": attempt,
"retry_max_attempts": max_attempts,
"retry_reason": retry_reason,
"retry_wait_seconds": wait_seconds,
}
return await self.update_notification(
session=session,
notification=notification,
message=message,
status="in_progress",
metadata_updates=metadata_updates,
)
async def notify_indexing_completed( async def notify_indexing_completed(
self, self,
session: AsyncSession, session: AsyncSession,
@ -336,6 +420,7 @@ class ConnectorIndexingNotificationHandler(BaseNotificationHandler):
indexed_count: int, indexed_count: int,
error_message: str | None = None, error_message: str | None = None,
is_warning: bool = False, is_warning: bool = False,
skipped_count: int | None = None,
) -> Notification: ) -> Notification:
""" """
Update notification when connector indexing completes. Update notification when connector indexing completes.
@ -346,6 +431,7 @@ class ConnectorIndexingNotificationHandler(BaseNotificationHandler):
indexed_count: Total number of items indexed indexed_count: Total number of items indexed
error_message: Error message if indexing failed, or warning message (optional) error_message: Error message if indexing failed, or warning message (optional)
is_warning: If True, treat error_message as a warning (success case) rather than an error is_warning: If True, treat error_message as a warning (success case) rather than an error
skipped_count: Number of items skipped (e.g., duplicates) - optional
Returns: Returns:
Updated notification Updated notification
@ -354,6 +440,14 @@ class ConnectorIndexingNotificationHandler(BaseNotificationHandler):
"connector_name", "Connector" "connector_name", "Connector"
) )
# Build the skipped text if there are skipped items
skipped_text = ""
if skipped_count and skipped_count > 0:
skipped_item_text = "item" if skipped_count == 1 else "items"
skipped_text = (
f" ({skipped_count} {skipped_item_text} skipped - already indexed)"
)
# If there's an error message but items were indexed, treat it as a warning (partial success) # If there's an error message but items were indexed, treat it as a warning (partial success)
# If is_warning is True, treat it as success even with 0 items (e.g., duplicates found) # If is_warning is True, treat it as success even with 0 items (e.g., duplicates found)
# Otherwise, treat it as a failure # Otherwise, treat it as a failure
@ -362,12 +456,12 @@ class ConnectorIndexingNotificationHandler(BaseNotificationHandler):
# Partial success with warnings (e.g., duplicate content from other connectors) # Partial success with warnings (e.g., duplicate content from other connectors)
title = f"Ready: {connector_name}" title = f"Ready: {connector_name}"
item_text = "item" if indexed_count == 1 else "items" item_text = "item" if indexed_count == 1 else "items"
message = f"Now searchable! {indexed_count} {item_text} synced. Note: {error_message}" message = f"Now searchable! {indexed_count} {item_text} synced{skipped_text}. Note: {error_message}"
status = "completed" status = "completed"
elif is_warning: elif is_warning:
# Warning case (e.g., duplicates found) - treat as success # Warning case (e.g., duplicates found) - treat as success
title = f"Ready: {connector_name}" title = f"Ready: {connector_name}"
message = f"Sync completed. {error_message}" message = f"Sync completed{skipped_text}. {error_message}"
status = "completed" status = "completed"
else: else:
# Complete failure # Complete failure
@ -377,14 +471,21 @@ class ConnectorIndexingNotificationHandler(BaseNotificationHandler):
else: else:
title = f"Ready: {connector_name}" title = f"Ready: {connector_name}"
if indexed_count == 0: if indexed_count == 0:
message = "Already up to date! No new items to sync." if skipped_count and skipped_count > 0:
skipped_item_text = "item" if skipped_count == 1 else "items"
message = f"Already up to date! {skipped_count} {skipped_item_text} skipped (already indexed)."
else:
message = "Already up to date! No new items to sync."
else: else:
item_text = "item" if indexed_count == 1 else "items" item_text = "item" if indexed_count == 1 else "items"
message = f"Now searchable! {indexed_count} {item_text} synced." message = (
f"Now searchable! {indexed_count} {item_text} synced{skipped_text}."
)
status = "completed" status = "completed"
metadata_updates = { metadata_updates = {
"indexed_count": indexed_count, "indexed_count": indexed_count,
"skipped_count": skipped_count or 0,
"sync_stage": "completed" "sync_stage": "completed"
if (not error_message or is_warning or indexed_count > 0) if (not error_message or is_warning or indexed_count > 0)
else "failed", else "failed",

View file

@ -0,0 +1,379 @@
"""
Service layer for public chat sharing and cloning.
"""
import re
import secrets
from uuid import UUID
from fastapi import HTTPException
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
from app.db import NewChatThread, User
UI_TOOLS = {
"display_image",
"link_preview",
"generate_podcast",
"scrape_webpage",
"multi_link_preview",
}
def strip_citations(text: str) -> str:
"""
Remove [citation:X] and [citation:doc-X] patterns from text.
Preserves newlines to maintain markdown formatting.
"""
# Remove citation patterns
text = re.sub(r"[\[【]\u200B?citation:(doc-)?\d+\u200B?[\]】]", "", text)
# Collapse multiple spaces/tabs (but NOT newlines) into single space
text = re.sub(r"[^\S\n]+", " ", text)
# Normalize excessive blank lines (3+ newlines → 2)
text = re.sub(r"\n{3,}", "\n\n", text)
# Clean up spaces around newlines
text = re.sub(r" *\n *", "\n", text)
return text.strip()
def sanitize_content_for_public(content: list | str | None) -> list:
"""
Filter message content for public view.
Strips citations and filters to UI-relevant tools.
"""
if content is None:
return []
if isinstance(content, str):
clean_text = strip_citations(content)
return [{"type": "text", "text": clean_text}] if clean_text else []
if not isinstance(content, list):
return []
sanitized = []
for part in content:
if not isinstance(part, dict):
continue
part_type = part.get("type")
if part_type == "text":
clean_text = strip_citations(part.get("text", ""))
if clean_text:
sanitized.append({"type": "text", "text": clean_text})
elif part_type == "tool-call":
tool_name = part.get("toolName")
if tool_name not in UI_TOOLS:
continue
sanitized.append(part)
return sanitized
async def get_author_display(
session: AsyncSession,
author_id: UUID | None,
user_cache: dict[UUID, dict],
) -> dict | None:
"""Transform author UUID to display info."""
if author_id is None:
return None
if author_id not in user_cache:
result = await session.execute(select(User).filter(User.id == author_id))
user = result.scalars().first()
if user:
user_cache[author_id] = {
"display_name": user.display_name or "User",
"avatar_url": user.avatar_url,
}
else:
user_cache[author_id] = {
"display_name": "Unknown User",
"avatar_url": None,
}
return user_cache[author_id]
async def toggle_public_share(
session: AsyncSession,
thread_id: int,
enabled: bool,
user: User,
base_url: str,
) -> dict:
"""
Enable or disable public sharing for a thread.
Only the thread owner can toggle public sharing.
When enabling, generates a new token if one doesn't exist.
When disabling, keeps the token for potential re-enable.
"""
result = await session.execute(
select(NewChatThread).filter(NewChatThread.id == thread_id)
)
thread = result.scalars().first()
if not thread:
raise HTTPException(status_code=404, detail="Thread not found")
if thread.created_by_id != user.id:
raise HTTPException(
status_code=403,
detail="Only the creator of this chat can manage public sharing",
)
if enabled and not thread.public_share_token:
thread.public_share_token = secrets.token_urlsafe(48)
thread.public_share_enabled = enabled
await session.commit()
await session.refresh(thread)
if enabled:
return {
"enabled": True,
"public_url": f"{base_url}/public/{thread.public_share_token}",
"share_token": thread.public_share_token,
}
return {
"enabled": False,
"public_url": None,
"share_token": None,
}
async def get_public_chat(
session: AsyncSession,
share_token: str,
) -> dict:
"""
Get a public chat by share token.
Returns sanitized content suitable for public viewing.
"""
result = await session.execute(
select(NewChatThread)
.options(selectinload(NewChatThread.messages))
.filter(
NewChatThread.public_share_token == share_token,
NewChatThread.public_share_enabled.is_(True),
)
)
thread = result.scalars().first()
if not thread:
raise HTTPException(status_code=404, detail="Not found")
user_cache: dict[UUID, dict] = {}
messages = []
for msg in sorted(thread.messages, key=lambda m: m.created_at):
author = await get_author_display(session, msg.author_id, user_cache)
sanitized_content = sanitize_content_for_public(msg.content)
messages.append(
{
"role": msg.role,
"content": sanitized_content,
"author": author,
"created_at": msg.created_at,
}
)
return {
"thread": {
"title": thread.title,
"created_at": thread.created_at,
},
"messages": messages,
}
async def get_thread_by_share_token(
session: AsyncSession,
share_token: str,
) -> NewChatThread | None:
"""Get a thread by its public share token if sharing is enabled."""
result = await session.execute(
select(NewChatThread)
.options(selectinload(NewChatThread.messages))
.filter(
NewChatThread.public_share_token == share_token,
NewChatThread.public_share_enabled.is_(True),
)
)
return result.scalars().first()
async def get_user_default_search_space(
session: AsyncSession,
user_id: UUID,
) -> int | None:
"""
Get user's default search space for cloning.
Returns the first search space where user is owner, or None if not found.
"""
from app.db import SearchSpaceMembership
result = await session.execute(
select(SearchSpaceMembership)
.filter(
SearchSpaceMembership.user_id == user_id,
SearchSpaceMembership.is_owner.is_(True),
)
.limit(1)
)
membership = result.scalars().first()
if membership:
return membership.search_space_id
return None
async def complete_clone_content(
session: AsyncSession,
target_thread: NewChatThread,
source_thread_id: int,
target_search_space_id: int,
) -> int:
"""
Copy messages and podcasts from source thread to target thread.
Sets clone_pending=False and needs_history_bootstrap=True when done.
Returns the number of messages copied.
"""
from app.db import NewChatMessage
result = await session.execute(
select(NewChatThread)
.options(selectinload(NewChatThread.messages))
.filter(NewChatThread.id == source_thread_id)
)
source_thread = result.scalars().first()
if not source_thread:
raise ValueError("Source thread not found")
podcast_id_map: dict[int, int] = {}
message_count = 0
for msg in sorted(source_thread.messages, key=lambda m: m.created_at):
new_content = sanitize_content_for_public(msg.content)
if isinstance(new_content, list):
for part in new_content:
if (
isinstance(part, dict)
and part.get("type") == "tool-call"
and part.get("toolName") == "generate_podcast"
):
result_data = part.get("result", {})
old_podcast_id = result_data.get("podcast_id")
if old_podcast_id and old_podcast_id not in podcast_id_map:
new_podcast_id = await _clone_podcast(
session,
old_podcast_id,
target_search_space_id,
target_thread.id,
)
if new_podcast_id:
podcast_id_map[old_podcast_id] = new_podcast_id
if old_podcast_id and old_podcast_id in podcast_id_map:
result_data["podcast_id"] = podcast_id_map[old_podcast_id]
elif old_podcast_id:
# Podcast couldn't be cloned (not ready), remove reference
result_data.pop("podcast_id", None)
new_message = NewChatMessage(
thread_id=target_thread.id,
role=msg.role,
content=new_content,
author_id=msg.author_id,
created_at=msg.created_at,
)
session.add(new_message)
message_count += 1
target_thread.clone_pending = False
target_thread.needs_history_bootstrap = True
await session.commit()
return message_count
async def _clone_podcast(
session: AsyncSession,
podcast_id: int,
target_search_space_id: int,
target_thread_id: int,
) -> int | None:
"""Clone a podcast record and its audio file. Only clones ready podcasts."""
import shutil
import uuid
from pathlib import Path
from app.db import Podcast, PodcastStatus
result = await session.execute(select(Podcast).filter(Podcast.id == podcast_id))
original = result.scalars().first()
if not original or original.status != PodcastStatus.READY:
return None
new_file_path = None
if original.file_location:
original_path = Path(original.file_location)
if original_path.exists():
new_filename = f"{uuid.uuid4()}_podcast.mp3"
new_dir = Path("podcasts")
new_dir.mkdir(parents=True, exist_ok=True)
new_file_path = str(new_dir / new_filename)
shutil.copy2(original.file_location, new_file_path)
new_podcast = Podcast(
title=original.title,
podcast_transcript=original.podcast_transcript,
file_location=new_file_path,
status=PodcastStatus.READY,
search_space_id=target_search_space_id,
thread_id=target_thread_id,
)
session.add(new_podcast)
await session.flush()
return new_podcast.id
async def is_podcast_publicly_accessible(
session: AsyncSession,
podcast_id: int,
) -> bool:
"""
Check if a podcast belongs to a publicly shared thread.
Uses the thread_id foreign key for efficient lookup.
"""
from app.db import Podcast
result = await session.execute(
select(Podcast)
.options(selectinload(Podcast.thread))
.filter(Podcast.id == podcast_id)
)
podcast = result.scalars().first()
if not podcast or not podcast.thread:
return False
return podcast.thread.public_share_enabled

View file

@ -4,15 +4,15 @@ import asyncio
import logging import logging
import sys import sys
from sqlalchemy import select
from sqlalchemy.ext.asyncio import async_sessionmaker, create_async_engine from sqlalchemy.ext.asyncio import async_sessionmaker, create_async_engine
from sqlalchemy.pool import NullPool from sqlalchemy.pool import NullPool
# Import for content-based podcast (new-chat)
from app.agents.podcaster.graph import graph as podcaster_graph from app.agents.podcaster.graph import graph as podcaster_graph
from app.agents.podcaster.state import State as PodcasterState from app.agents.podcaster.state import State as PodcasterState
from app.celery_app import celery_app from app.celery_app import celery_app
from app.config import config from app.config import config
from app.db import Podcast from app.db import Podcast, PodcastStatus
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -44,8 +44,8 @@ def get_celery_session_maker():
# ============================================================================= # =============================================================================
def _clear_active_podcast_redis_key(search_space_id: int) -> None: def _clear_generating_podcast(search_space_id: int) -> None:
"""Clear the active podcast task key from Redis when task completes.""" """Clear the generating podcast marker from Redis when task completes."""
import os import os
import redis import redis
@ -53,34 +53,26 @@ def _clear_active_podcast_redis_key(search_space_id: int) -> None:
try: try:
redis_url = os.getenv("CELERY_BROKER_URL", "redis://localhost:6379/0") redis_url = os.getenv("CELERY_BROKER_URL", "redis://localhost:6379/0")
client = redis.from_url(redis_url, decode_responses=True) client = redis.from_url(redis_url, decode_responses=True)
key = f"podcast:active:{search_space_id}" key = f"podcast:generating:{search_space_id}"
client.delete(key) client.delete(key)
logger.info(f"Cleared active podcast key for search_space_id={search_space_id}") logger.info(
f"Cleared generating podcast key for search_space_id={search_space_id}"
)
except Exception as e: except Exception as e:
logger.warning(f"Could not clear active podcast key: {e}") logger.warning(f"Could not clear generating podcast key: {e}")
@celery_app.task(name="generate_content_podcast", bind=True) @celery_app.task(name="generate_content_podcast", bind=True)
def generate_content_podcast_task( def generate_content_podcast_task(
self, self,
podcast_id: int,
source_content: str, source_content: str,
search_space_id: int, search_space_id: int,
podcast_title: str = "SurfSense Podcast",
user_prompt: str | None = None, user_prompt: str | None = None,
) -> dict: ) -> dict:
""" """
Celery task to generate podcast from source content (for new-chat). Celery task to generate podcast from source content.
Updates existing podcast record created by the tool.
This task generates a podcast directly from provided content.
Args:
source_content: The text content to convert into a podcast
search_space_id: ID of the search space
podcast_title: Title for the podcast
user_prompt: Optional instructions for podcast style/tone
Returns:
dict with podcast_id on success, or error info on failure
""" """
loop = asyncio.new_event_loop() loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop) asyncio.set_event_loop(loop)
@ -88,9 +80,9 @@ def generate_content_podcast_task(
try: try:
result = loop.run_until_complete( result = loop.run_until_complete(
_generate_content_podcast( _generate_content_podcast(
podcast_id,
source_content, source_content,
search_space_id, search_space_id,
podcast_title,
user_prompt, user_prompt,
) )
) )
@ -98,46 +90,67 @@ def generate_content_podcast_task(
return result return result
except Exception as e: except Exception as e:
logger.error(f"Error generating content podcast: {e!s}") logger.error(f"Error generating content podcast: {e!s}")
return {"status": "error", "error": str(e)} loop.run_until_complete(_mark_podcast_failed(podcast_id))
return {"status": "failed", "podcast_id": podcast_id}
finally: finally:
# Always clear the active podcast key when task completes (success or failure) _clear_generating_podcast(search_space_id)
_clear_active_podcast_redis_key(search_space_id)
asyncio.set_event_loop(None) asyncio.set_event_loop(None)
loop.close() loop.close()
async def _generate_content_podcast( async def _mark_podcast_failed(podcast_id: int) -> None:
source_content: str, """Mark a podcast as failed in the database."""
search_space_id: int,
podcast_title: str = "SurfSense Podcast",
user_prompt: str | None = None,
) -> dict:
"""Generate content-based podcast with new session."""
async with get_celery_session_maker()() as session: async with get_celery_session_maker()() as session:
try: try:
# Configure the podcaster graph result = await session.execute(
select(Podcast).filter(Podcast.id == podcast_id)
)
podcast = result.scalars().first()
if podcast:
podcast.status = PodcastStatus.FAILED
await session.commit()
except Exception as e:
logger.error(f"Failed to mark podcast as failed: {e}")
async def _generate_content_podcast(
podcast_id: int,
source_content: str,
search_space_id: int,
user_prompt: str | None = None,
) -> dict:
"""Generate content-based podcast and update existing record."""
async with get_celery_session_maker()() as session:
result = await session.execute(select(Podcast).filter(Podcast.id == podcast_id))
podcast = result.scalars().first()
if not podcast:
raise ValueError(f"Podcast {podcast_id} not found")
try:
podcast.status = PodcastStatus.GENERATING
await session.commit()
graph_config = { graph_config = {
"configurable": { "configurable": {
"podcast_title": podcast_title, "podcast_title": podcast.title,
"search_space_id": search_space_id, "search_space_id": search_space_id,
"user_prompt": user_prompt, "user_prompt": user_prompt,
} }
} }
# Initialize the podcaster state with the source content
initial_state = PodcasterState( initial_state = PodcasterState(
source_content=source_content, source_content=source_content,
db_session=session, db_session=session,
) )
# Run the podcaster graph graph_result = await podcaster_graph.ainvoke(
result = await podcaster_graph.ainvoke(initial_state, config=graph_config) initial_state, config=graph_config
)
# Extract results podcast_transcript = graph_result.get("podcast_transcript", [])
podcast_transcript = result.get("podcast_transcript", []) file_path = graph_result.get("final_podcast_file_path", "")
file_path = result.get("final_podcast_file_path", "")
# Convert transcript to serializable format
serializable_transcript = [] serializable_transcript = []
for entry in podcast_transcript: for entry in podcast_transcript:
if hasattr(entry, "speaker_id"): if hasattr(entry, "speaker_id"):
@ -152,27 +165,22 @@ async def _generate_content_podcast(
} }
) )
# Save podcast to database podcast.podcast_transcript = serializable_transcript
podcast = Podcast( podcast.file_location = file_path
title=podcast_title, podcast.status = PodcastStatus.READY
podcast_transcript=serializable_transcript,
file_location=file_path,
search_space_id=search_space_id,
)
session.add(podcast)
await session.commit() await session.commit()
await session.refresh(podcast)
logger.info(f"Successfully generated content podcast: {podcast.id}") logger.info(f"Successfully generated podcast: {podcast.id}")
return { return {
"status": "success", "status": "ready",
"podcast_id": podcast.id, "podcast_id": podcast.id,
"title": podcast_title, "title": podcast.title,
"transcript_entries": len(serializable_transcript), "transcript_entries": len(serializable_transcript),
} }
except Exception as e: except Exception as e:
logger.error(f"Error in _generate_content_podcast: {e!s}") logger.error(f"Error in _generate_content_podcast: {e!s}")
await session.rollback() podcast.status = PodcastStatus.FAILED
await session.commit()
raise raise

View file

@ -156,6 +156,41 @@ async def _check_and_trigger_schedules():
) )
await session.commit() await session.commit()
continue continue
# Special handling for Webcrawler - skip if no URLs configured
elif (
connector.connector_type
== SearchSourceConnectorType.WEBCRAWLER_CONNECTOR
):
from app.utils.webcrawler_utils import parse_webcrawler_urls
connector_config = connector.config or {}
urls = parse_webcrawler_urls(
connector_config.get("INITIAL_URLS")
)
if urls:
task.delay(
connector.id,
connector.search_space_id,
str(connector.user_id),
None, # start_date
None, # end_date
)
else:
# No URLs configured - skip indexing but still update next_scheduled_at
logger.info(
f"Webcrawler connector {connector.id} has no URLs configured, "
"skipping periodic indexing (will check again at next scheduled time)"
)
from datetime import timedelta
connector.next_scheduled_at = now + timedelta(
minutes=connector.indexing_frequency_minutes
)
await session.commit()
continue
else: else:
task.delay( task.delay(
connector.id, connector.id,

View file

@ -34,6 +34,7 @@ from app.services.chat_session_state_service import (
) )
from app.services.connector_service import ConnectorService from app.services.connector_service import ConnectorService
from app.services.new_streaming_service import VercelStreamingService from app.services.new_streaming_service import VercelStreamingService
from app.utils.content_utils import bootstrap_history_from_db
def format_attachments_as_context(attachments: list[ChatAttachment]) -> str: def format_attachments_as_context(attachments: list[ChatAttachment]) -> str:
@ -205,13 +206,13 @@ async def stream_new_chat(
mentioned_document_ids: list[int] | None = None, mentioned_document_ids: list[int] | None = None,
mentioned_surfsense_doc_ids: list[int] | None = None, mentioned_surfsense_doc_ids: list[int] | None = None,
checkpoint_id: str | None = None, checkpoint_id: str | None = None,
needs_history_bootstrap: bool = False,
) -> AsyncGenerator[str, None]: ) -> AsyncGenerator[str, None]:
""" """
Stream chat responses from the new SurfSense deep agent. Stream chat responses from the new SurfSense deep agent.
This uses the Vercel AI SDK Data Stream Protocol (SSE format) for streaming. This uses the Vercel AI SDK Data Stream Protocol (SSE format) for streaming.
The chat_id is used as LangGraph's thread_id for memory/checkpointing. The chat_id is used as LangGraph's thread_id for memory/checkpointing.
Message history can be passed from the frontend for context.
Args: Args:
user_query: The user's query user_query: The user's query
@ -221,6 +222,7 @@ async def stream_new_chat(
user_id: The current user's UUID string (for memory tools and session state) user_id: The current user's UUID string (for memory tools and session state)
llm_config_id: The LLM configuration ID (default: -1 for first global config) llm_config_id: The LLM configuration ID (default: -1 for first global config)
attachments: Optional attachments with extracted content attachments: Optional attachments with extracted content
needs_history_bootstrap: If True, load message history from DB (for cloned chats)
mentioned_document_ids: Optional list of document IDs mentioned with @ in the chat mentioned_document_ids: Optional list of document IDs mentioned with @ in the chat
mentioned_surfsense_doc_ids: Optional list of SurfSense doc IDs mentioned with @ in the chat mentioned_surfsense_doc_ids: Optional list of SurfSense doc IDs mentioned with @ in the chat
checkpoint_id: Optional checkpoint ID to rewind/fork from (for edit/reload operations) checkpoint_id: Optional checkpoint ID to rewind/fork from (for edit/reload operations)
@ -300,13 +302,29 @@ async def stream_new_chat(
connector_service=connector_service, connector_service=connector_service,
checkpointer=checkpointer, checkpointer=checkpointer,
user_id=user_id, # Pass user ID for memory tools user_id=user_id, # Pass user ID for memory tools
thread_id=chat_id, # Pass chat ID for podcast association
agent_config=agent_config, # Pass prompt configuration agent_config=agent_config, # Pass prompt configuration
firecrawl_api_key=firecrawl_api_key, # Pass Firecrawl API key if configured firecrawl_api_key=firecrawl_api_key, # Pass Firecrawl API key if configured
) )
# Build input with message history from frontend # Build input with message history
langchain_messages = [] langchain_messages = []
# Bootstrap history for cloned chats (no LangGraph checkpoint exists yet)
if needs_history_bootstrap:
langchain_messages = await bootstrap_history_from_db(session, chat_id)
# Clear the flag so we don't bootstrap again on next message
from app.db import NewChatThread
thread_result = await session.execute(
select(NewChatThread).filter(NewChatThread.id == chat_id)
)
thread = thread_result.scalars().first()
if thread:
thread.needs_history_bootstrap = False
await session.commit()
# Fetch mentioned documents if any (with chunks for proper citations) # Fetch mentioned documents if any (with chunks for proper citations)
mentioned_documents: list[Document] = [] mentioned_documents: list[Document] = []
if mentioned_document_ids: if mentioned_document_ids:

View file

@ -86,7 +86,7 @@ async def index_composio_connector(
end_date: str | None = None, end_date: str | None = None,
update_last_indexed: bool = True, update_last_indexed: bool = True,
max_items: int = 1000, max_items: int = 1000,
) -> tuple[int, str]: ) -> tuple[int, int, str | None]:
""" """
Index content from a Composio connector. Index content from a Composio connector.
@ -104,7 +104,7 @@ async def index_composio_connector(
max_items: Maximum number of items to fetch max_items: Maximum number of items to fetch
Returns: Returns:
Tuple of (number_of_indexed_items, error_message or None) Tuple of (number_of_indexed_items, number_of_skipped_items, error_message or None)
""" """
task_logger = TaskLoggingService(session, search_space_id) task_logger = TaskLoggingService(session, search_space_id)
@ -132,14 +132,14 @@ async def index_composio_connector(
await task_logger.log_task_failure( await task_logger.log_task_failure(
log_entry, error_msg, {"error_type": "InvalidConnectorType"} log_entry, error_msg, {"error_type": "InvalidConnectorType"}
) )
return 0, error_msg return 0, 0, error_msg
if not connector: if not connector:
error_msg = f"Composio connector with ID {connector_id} not found" error_msg = f"Composio connector with ID {connector_id} not found"
await task_logger.log_task_failure( await task_logger.log_task_failure(
log_entry, error_msg, {"error_type": "ConnectorNotFound"} log_entry, error_msg, {"error_type": "ConnectorNotFound"}
) )
return 0, error_msg return 0, 0, error_msg
# Get toolkit ID from config # Get toolkit ID from config
toolkit_id = connector.config.get("toolkit_id") toolkit_id = connector.config.get("toolkit_id")
@ -150,7 +150,7 @@ async def index_composio_connector(
await task_logger.log_task_failure( await task_logger.log_task_failure(
log_entry, error_msg, {"error_type": "MissingToolkitId"} log_entry, error_msg, {"error_type": "MissingToolkitId"}
) )
return 0, error_msg return 0, 0, error_msg
# Check if toolkit is indexable # Check if toolkit is indexable
if toolkit_id not in INDEXABLE_TOOLKITS: if toolkit_id not in INDEXABLE_TOOLKITS:
@ -158,7 +158,7 @@ async def index_composio_connector(
await task_logger.log_task_failure( await task_logger.log_task_failure(
log_entry, error_msg, {"error_type": "ToolkitNotIndexable"} log_entry, error_msg, {"error_type": "ToolkitNotIndexable"}
) )
return 0, error_msg return 0, 0, error_msg
# Get indexer function from registry # Get indexer function from registry
try: try:
@ -167,7 +167,7 @@ async def index_composio_connector(
await task_logger.log_task_failure( await task_logger.log_task_failure(
log_entry, str(e), {"error_type": "NoIndexerImplemented"} log_entry, str(e), {"error_type": "NoIndexerImplemented"}
) )
return 0, str(e) return 0, 0, str(e)
# Build kwargs for the indexer function # Build kwargs for the indexer function
kwargs = { kwargs = {
@ -199,7 +199,7 @@ async def index_composio_connector(
{"error_type": "SQLAlchemyError"}, {"error_type": "SQLAlchemyError"},
) )
logger.error(f"Database error: {db_error!s}", exc_info=True) logger.error(f"Database error: {db_error!s}", exc_info=True)
return 0, f"Database error: {db_error!s}" return 0, 0, f"Database error: {db_error!s}"
except Exception as e: except Exception as e:
await session.rollback() await session.rollback()
await task_logger.log_task_failure( await task_logger.log_task_failure(
@ -209,4 +209,4 @@ async def index_composio_connector(
{"error_type": type(e).__name__}, {"error_type": type(e).__name__},
) )
logger.error(f"Failed to index Composio connector: {e!s}", exc_info=True) logger.error(f"Failed to index Composio connector: {e!s}", exc_info=True)
return 0, f"Failed to index Composio connector: {e!s}" return 0, 0, f"Failed to index Composio connector: {e!s}"

View file

@ -20,6 +20,7 @@ from app.utils.document_converters import (
from .base import ( from .base import (
calculate_date_range, calculate_date_range,
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -317,6 +318,24 @@ async def index_airtable_records(
) )
continue continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = (
await check_duplicate_document_by_hash(
session, content_hash
)
)
if duplicate_by_content:
logger.info(
f"Airtable record {record_id} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
documents_skipped += 1
continue
# Document doesn't exist - create new one # Document doesn't exist - create new one
# Generate document summary # Generate document summary
user_llm = await get_user_long_context_llm( user_llm = await get_user_long_context_llm(

View file

@ -22,6 +22,7 @@ from app.utils.document_converters import (
from .base import ( from .base import (
calculate_date_range, calculate_date_range,
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -308,6 +309,22 @@ async def index_bookstack_pages(
logger.info(f"Successfully updated BookStack page {page_name}") logger.info(f"Successfully updated BookStack page {page_name}")
continue continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = await check_duplicate_document_by_hash(
session, content_hash
)
if duplicate_by_content:
logger.info(
f"BookStack page {page_name} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
documents_skipped += 1
continue
# Document doesn't exist - create new one # Document doesn't exist - create new one
# Generate summary with metadata # Generate summary with metadata
user_llm = await get_user_long_context_llm( user_llm = await get_user_long_context_llm(

View file

@ -22,6 +22,7 @@ from app.utils.document_converters import (
from .base import ( from .base import (
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -302,6 +303,22 @@ async def index_clickup_tasks(
) )
continue continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = await check_duplicate_document_by_hash(
session, content_hash
)
if duplicate_by_content:
logger.info(
f"ClickUp task {task_name} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
documents_skipped += 1
continue
# Document doesn't exist - create new one # Document doesn't exist - create new one
# Generate summary with metadata # Generate summary with metadata
user_llm = await get_user_long_context_llm( user_llm = await get_user_long_context_llm(

View file

@ -23,6 +23,7 @@ from app.utils.document_converters import (
from .base import ( from .base import (
calculate_date_range, calculate_date_range,
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -306,6 +307,22 @@ async def index_confluence_pages(
) )
continue continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = await check_duplicate_document_by_hash(
session, content_hash
)
if duplicate_by_content:
logger.info(
f"Confluence page {page_title} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
documents_skipped += 1
continue
# Document doesn't exist - create new one # Document doesn't exist - create new one
# Generate summary with metadata # Generate summary with metadata
user_llm = await get_user_long_context_llm( user_llm = await get_user_long_context_llm(

View file

@ -21,6 +21,7 @@ from app.utils.document_converters import (
from .base import ( from .base import (
build_document_metadata_markdown, build_document_metadata_markdown,
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -454,6 +455,24 @@ async def index_discord_messages(
) )
continue continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = (
await check_duplicate_document_by_hash(
session, content_hash
)
)
if duplicate_by_content:
logger.info(
f"Discord message {msg_id} in {guild_name}#{channel_name} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
documents_skipped += 1
continue
# Document doesn't exist - create new one # Document doesn't exist - create new one
# Process chunks # Process chunks
chunks = await create_document_chunks( chunks = await create_document_chunks(

View file

@ -24,6 +24,7 @@ from app.utils.document_converters import (
from .base import ( from .base import (
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -319,6 +320,21 @@ async def _process_repository_digest(
# Delete existing document to replace with new one # Delete existing document to replace with new one
await session.delete(existing_document) await session.delete(existing_document)
await session.flush() await session.flush()
else:
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = await check_duplicate_document_by_hash(
session, content_hash
)
if duplicate_by_content:
logger.info(
f"Repository {repo_full_name} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
return 0
# Generate summary using LLM (ONE call per repository!) # Generate summary using LLM (ONE call per repository!)
user_llm = await get_user_long_context_llm(session, user_id, search_space_id) user_llm = await get_user_long_context_llm(session, user_id, search_space_id)

View file

@ -24,7 +24,9 @@ from app.utils.document_converters import (
) )
from .base import ( from .base import (
calculate_date_range,
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -163,10 +165,22 @@ async def index_google_gmail_messages(
credentials, session, user_id, connector_id credentials, session, user_id, connector_id
) )
# Calculate date range using last_indexed_at if dates not provided
# This ensures Gmail uses the same date logic as other connectors
# (uses last_indexed_at → now, or 365 days back for first-time indexing)
calculated_start_date, calculated_end_date = calculate_date_range(
connector, start_date, end_date, default_days_back=365
)
# Fetch recent Google gmail messages # Fetch recent Google gmail messages
logger.info(f"Fetching recent emails for connector {connector_id}") logger.info(
f"Fetching emails for connector {connector_id} "
f"from {calculated_start_date} to {calculated_end_date}"
)
messages, error = await gmail_connector.get_recent_messages( messages, error = await gmail_connector.get_recent_messages(
max_results=max_messages, start_date=start_date, end_date=end_date max_results=max_messages,
start_date=calculated_start_date,
end_date=calculated_end_date,
) )
if error: if error:
@ -316,6 +330,22 @@ async def index_google_gmail_messages(
logger.info(f"Successfully updated Gmail message {subject}") logger.info(f"Successfully updated Gmail message {subject}")
continue continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = await check_duplicate_document_by_hash(
session, content_hash
)
if duplicate_by_content:
logger.info(
f"Gmail message {subject} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
documents_skipped += 1
continue
# Document doesn't exist - create new one # Document doesn't exist - create new one
# Generate summary with metadata # Generate summary with metadata
user_llm = await get_user_long_context_llm( user_llm = await get_user_long_context_llm(

View file

@ -23,6 +23,7 @@ from app.utils.document_converters import (
from .base import ( from .base import (
calculate_date_range, calculate_date_range,
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -284,6 +285,22 @@ async def index_jira_issues(
) )
continue continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = await check_duplicate_document_by_hash(
session, content_hash
)
if duplicate_by_content:
logger.info(
f"Jira issue {issue_identifier} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
documents_skipped += 1
continue
# Document doesn't exist - create new one # Document doesn't exist - create new one
# Generate summary with metadata # Generate summary with metadata
user_llm = await get_user_long_context_llm( user_llm = await get_user_long_context_llm(

View file

@ -22,6 +22,7 @@ from app.utils.document_converters import (
from .base import ( from .base import (
calculate_date_range, calculate_date_range,
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -315,6 +316,22 @@ async def index_linear_issues(
) )
continue continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = await check_duplicate_document_by_hash(
session, content_hash
)
if duplicate_by_content:
logger.info(
f"Linear issue {issue_identifier} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
documents_skipped += 1
continue
# Document doesn't exist - create new one # Document doesn't exist - create new one
# Generate summary with metadata # Generate summary with metadata
user_llm = await get_user_long_context_llm( user_llm = await get_user_long_context_llm(

View file

@ -21,6 +21,7 @@ from app.utils.document_converters import (
from .base import ( from .base import (
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -363,6 +364,22 @@ async def index_luma_events(
logger.info(f"Successfully updated Luma event {event_name}") logger.info(f"Successfully updated Luma event {event_name}")
continue continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = await check_duplicate_document_by_hash(
session, content_hash
)
if duplicate_by_content:
logger.info(
f"Luma event {event_name} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
documents_skipped += 1
continue
# Document doesn't exist - create new one # Document doesn't exist - create new one
# Generate summary with metadata # Generate summary with metadata
user_llm = await get_user_long_context_llm( user_llm = await get_user_long_context_llm(

View file

@ -2,6 +2,7 @@
Notion connector indexer. Notion connector indexer.
""" """
from collections.abc import Awaitable, Callable
from datetime import datetime from datetime import datetime
from sqlalchemy.exc import SQLAlchemyError from sqlalchemy.exc import SQLAlchemyError
@ -22,12 +23,17 @@ from .base import (
build_document_metadata_string, build_document_metadata_string,
calculate_date_range, calculate_date_range,
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
update_connector_last_indexed, update_connector_last_indexed,
) )
# Type alias for retry callback
# Signature: async callback(retry_reason, attempt, max_attempts, wait_seconds) -> None
RetryCallbackType = Callable[[str, int, int, float], Awaitable[None]]
async def index_notion_pages( async def index_notion_pages(
session: AsyncSession, session: AsyncSession,
@ -37,6 +43,7 @@ async def index_notion_pages(
start_date: str | None = None, start_date: str | None = None,
end_date: str | None = None, end_date: str | None = None,
update_last_indexed: bool = True, update_last_indexed: bool = True,
on_retry_callback: RetryCallbackType | None = None,
) -> tuple[int, str | None]: ) -> tuple[int, str | None]:
""" """
Index Notion pages from all accessible pages. Index Notion pages from all accessible pages.
@ -49,6 +56,9 @@ async def index_notion_pages(
start_date: Start date for indexing (YYYY-MM-DD format) start_date: Start date for indexing (YYYY-MM-DD format)
end_date: End date for indexing (YYYY-MM-DD format) end_date: End date for indexing (YYYY-MM-DD format)
update_last_indexed: Whether to update the last_indexed_at timestamp (default: True) update_last_indexed: Whether to update the last_indexed_at timestamp (default: True)
on_retry_callback: Optional callback for retry progress notifications.
Signature: async callback(retry_reason, attempt, max_attempts, wait_seconds)
retry_reason is one of: 'rate_limit', 'server_error', 'timeout'
Returns: Returns:
Tuple containing (number of documents indexed, error message or None) Tuple containing (number of documents indexed, error message or None)
@ -138,6 +148,10 @@ async def index_notion_pages(
session=session, connector_id=connector_id session=session, connector_id=connector_id
) )
# Set retry callback if provided (for user notifications during rate limits)
if on_retry_callback:
notion_client.set_retry_callback(on_retry_callback)
logger.info(f"Fetching Notion pages from {start_date_iso} to {end_date_iso}") logger.info(f"Fetching Notion pages from {start_date_iso} to {end_date_iso}")
await task_logger.log_task_progress( await task_logger.log_task_progress(
@ -156,6 +170,20 @@ async def index_notion_pages(
start_date=start_date_iso, end_date=end_date_iso start_date=start_date_iso, end_date=end_date_iso
) )
logger.info(f"Found {len(pages)} Notion pages") logger.info(f"Found {len(pages)} Notion pages")
# Get count of pages that had unsupported content skipped
pages_with_skipped_content = notion_client.get_skipped_content_count()
if pages_with_skipped_content > 0:
logger.info(
f"{pages_with_skipped_content} pages had Notion AI content skipped (not available via API)"
)
# Check if using legacy integration token and log warning
if notion_client.is_using_legacy_token():
logger.warning(
f"Connector {connector_id} is using legacy integration token. "
"Recommend reconnecting with OAuth."
)
except Exception as e: except Exception as e:
await task_logger.log_task_failure( await task_logger.log_task_failure(
log_entry, log_entry,
@ -170,12 +198,13 @@ async def index_notion_pages(
if not pages: if not pages:
await task_logger.log_task_success( await task_logger.log_task_success(
log_entry, log_entry,
f"No Notion pages found for connector {connector_id}", f"No Notion pages found for connector {connector_id}. "
"Ensure pages are shared with the Notion integration.",
{"pages_found": 0}, {"pages_found": 0},
) )
logger.info("No Notion pages found to index") logger.info("No Notion pages found to index")
await notion_client.close() await notion_client.close()
return 0, "No Notion pages found" return 0, None # Success with 0 pages, not an error
# Track the number of documents indexed # Track the number of documents indexed
documents_indexed = 0 documents_indexed = 0
@ -360,6 +389,22 @@ async def index_notion_pages(
continue continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = await check_duplicate_document_by_hash(
session, content_hash
)
if duplicate_by_content:
logger.info(
f"Notion page {page_title} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
documents_skipped += 1
continue
# Document doesn't exist - create new one # Document doesn't exist - create new one
# Get user's long context LLM # Get user's long context LLM
user_llm = await get_user_long_context_llm( user_llm = await get_user_long_context_llm(
@ -437,13 +482,23 @@ async def index_notion_pages(
logger.info(f"Final commit: Total {documents_indexed} documents processed") logger.info(f"Final commit: Total {documents_indexed} documents processed")
await session.commit() await session.commit()
# Prepare result message # Get final count of pages with skipped Notion AI content
pages_with_skipped_ai_content = notion_client.get_skipped_content_count()
# Prepare result message with user-friendly notification about skipped content
result_message = None result_message = None
if skipped_pages: if skipped_pages:
result_message = f"Processed {total_processed} pages. Skipped {len(skipped_pages)} pages: {', '.join(skipped_pages)}" result_message = f"Processed {total_processed} pages. Skipped {len(skipped_pages)} pages: {', '.join(skipped_pages)}"
else: else:
result_message = f"Processed {total_processed} pages." result_message = f"Processed {total_processed} pages."
# Add user-friendly message about skipped Notion AI content
if pages_with_skipped_ai_content > 0:
result_message += (
" Audio transcriptions and AI summaries from Notion aren't accessible "
"via their API - all other content was saved."
)
# Log success # Log success
await task_logger.log_task_success( await task_logger.log_task_success(
log_entry, log_entry,
@ -453,6 +508,7 @@ async def index_notion_pages(
"documents_indexed": documents_indexed, "documents_indexed": documents_indexed,
"documents_skipped": documents_skipped, "documents_skipped": documents_skipped,
"skipped_pages_count": len(skipped_pages), "skipped_pages_count": len(skipped_pages),
"pages_with_skipped_ai_content": pages_with_skipped_ai_content,
"result_message": result_message, "result_message": result_message,
}, },
) )
@ -464,10 +520,28 @@ async def index_notion_pages(
# Clean up the async client # Clean up the async client
await notion_client.close() await notion_client.close()
# Build user-friendly notification messages
# This will be shown in the notification to inform users
notification_parts = []
if pages_with_skipped_ai_content > 0:
notification_parts.append(
"Some Notion AI content couldn't be synced (API limitation)"
)
if notion_client.is_using_legacy_token():
notification_parts.append(
"Using legacy token. Reconnect with OAuth for better reliability."
)
user_notification_message = (
" ".join(notification_parts) if notification_parts else None
)
return ( return (
total_processed, total_processed,
None, user_notification_message,
) # Return None on success (result_message is for logging only) )
except SQLAlchemyError as db_error: except SQLAlchemyError as db_error:
await session.rollback() await session.rollback()

View file

@ -28,6 +28,7 @@ from app.utils.document_converters import (
from .base import ( from .base import (
build_document_metadata_string, build_document_metadata_string,
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -426,6 +427,22 @@ async def index_obsidian_vault(
indexed_count += 1 indexed_count += 1
else: else:
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = await check_duplicate_document_by_hash(
session, content_hash
)
if duplicate_by_content:
logger.info(
f"Obsidian note {title} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
skipped_count += 1
continue
# Create new document # Create new document
logger.info(f"Indexing new note: {title}") logger.info(f"Indexing new note: {title}")

View file

@ -22,6 +22,7 @@ from .base import (
build_document_metadata_markdown, build_document_metadata_markdown,
calculate_date_range, calculate_date_range,
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -325,6 +326,22 @@ async def index_slack_messages(
logger.info(f"Successfully updated Slack message {msg_ts}") logger.info(f"Successfully updated Slack message {msg_ts}")
continue continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = await check_duplicate_document_by_hash(
session, content_hash
)
if duplicate_by_content:
logger.info(
f"Slack message {msg_ts} in channel {channel_name} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
documents_skipped += 1
continue
# Document doesn't exist - create new one # Document doesn't exist - create new one
# Process chunks # Process chunks
chunks = await create_document_chunks(combined_document_string) chunks = await create_document_chunks(combined_document_string)

View file

@ -21,6 +21,7 @@ from .base import (
build_document_metadata_markdown, build_document_metadata_markdown,
calculate_date_range, calculate_date_range,
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -354,6 +355,27 @@ async def index_teams_messages(
) )
continue continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = (
await check_duplicate_document_by_hash(
session, content_hash
)
)
if duplicate_by_content:
logger.info(
"Teams message %s in channel %s already indexed by another connector "
"(existing document ID: %s, type: %s). Skipping.",
message_id,
channel_name,
duplicate_by_content.id,
duplicate_by_content.document_type,
)
documents_skipped += 1
continue
# Document doesn't exist - create new one # Document doesn't exist - create new one
# Process chunks # Process chunks
chunks = await create_document_chunks( chunks = await create_document_chunks(

View file

@ -18,9 +18,11 @@ from app.utils.document_converters import (
generate_document_summary, generate_document_summary,
generate_unique_identifier_hash, generate_unique_identifier_hash,
) )
from app.utils.webcrawler_utils import parse_webcrawler_urls
from .base import ( from .base import (
check_document_by_unique_identifier, check_document_by_unique_identifier,
check_duplicate_document_by_hash,
get_connector_by_id, get_connector_by_id,
get_current_timestamp, get_current_timestamp,
logger, logger,
@ -96,13 +98,7 @@ async def index_crawled_urls(
api_key = connector.config.get("FIRECRAWL_API_KEY") api_key = connector.config.get("FIRECRAWL_API_KEY")
# Get URLs from connector config # Get URLs from connector config
initial_urls = connector.config.get("INITIAL_URLS", "") urls = parse_webcrawler_urls(connector.config.get("INITIAL_URLS"))
if isinstance(initial_urls, str):
urls = [url.strip() for url in initial_urls.split("\n") if url.strip()]
elif isinstance(initial_urls, list):
urls = [url.strip() for url in initial_urls if url.strip()]
else:
urls = []
logger.info( logger.info(
f"Starting crawled web page indexing for connector {connector_id} with {len(urls)} URLs" f"Starting crawled web page indexing for connector {connector_id} with {len(urls)} URLs"
@ -281,6 +277,22 @@ async def index_crawled_urls(
logger.info(f"Successfully updated URL {url}") logger.info(f"Successfully updated URL {url}")
continue continue
# Document doesn't exist by unique_identifier_hash
# Check if a document with the same content_hash exists (from another connector)
with session.no_autoflush:
duplicate_by_content = await check_duplicate_document_by_hash(
session, content_hash
)
if duplicate_by_content:
logger.info(
f"URL {url} already indexed by another connector "
f"(existing document ID: {duplicate_by_content.id}, "
f"type: {duplicate_by_content.document_type}). Skipping."
)
documents_skipped += 1
continue
# Document doesn't exist - create new one # Document doesn't exist - create new one
# Generate summary with metadata # Generate summary with metadata
user_llm = await get_user_long_context_llm( user_llm = await get_user_long_context_llm(

View file

@ -37,18 +37,32 @@ from .base import (
from .markdown_processor import add_received_markdown_file_document from .markdown_processor import add_received_markdown_file_document
# Constants for LlamaCloud retry configuration # Constants for LlamaCloud retry configuration
LLAMACLOUD_MAX_RETRIES = 3 LLAMACLOUD_MAX_RETRIES = 5 # Increased from 3 for large file resilience
LLAMACLOUD_BASE_DELAY = 5 # Base delay in seconds for exponential backoff LLAMACLOUD_BASE_DELAY = 10 # Base delay in seconds for exponential backoff
LLAMACLOUD_MAX_DELAY = 120 # Maximum delay between retries (2 minutes)
LLAMACLOUD_RETRYABLE_EXCEPTIONS = ( LLAMACLOUD_RETRYABLE_EXCEPTIONS = (
ssl.SSLError, ssl.SSLError,
httpx.ConnectError, httpx.ConnectError,
httpx.ConnectTimeout, httpx.ConnectTimeout,
httpx.ReadTimeout, httpx.ReadTimeout,
httpx.WriteTimeout, httpx.WriteTimeout,
httpx.RemoteProtocolError,
httpx.LocalProtocolError,
ConnectionError, ConnectionError,
ConnectionResetError,
TimeoutError, TimeoutError,
OSError, # Catches various network-level errors
) )
# Timeout calculation constants
UPLOAD_BYTES_PER_SECOND_SLOW = (
100 * 1024
) # 100 KB/s (conservative for slow connections)
MIN_UPLOAD_TIMEOUT = 120 # Minimum 2 minutes for any file
MAX_UPLOAD_TIMEOUT = 1800 # Maximum 30 minutes for very large files
BASE_JOB_TIMEOUT = 600 # 10 minutes base for job processing
PER_PAGE_JOB_TIMEOUT = 60 # 1 minute per page for processing
def get_google_drive_unique_identifier( def get_google_drive_unique_identifier(
connector: dict | None, connector: dict | None,
@ -204,6 +218,48 @@ async def find_existing_document_with_migration(
return existing_document return existing_document
def calculate_upload_timeout(file_size_bytes: int) -> float:
"""
Calculate appropriate upload timeout based on file size.
Assumes a conservative slow connection speed to handle worst-case scenarios.
Args:
file_size_bytes: Size of the file in bytes
Returns:
Timeout in seconds
"""
# Calculate time needed at slow connection speed
# Add 50% buffer for network variability and SSL overhead
estimated_time = (file_size_bytes / UPLOAD_BYTES_PER_SECOND_SLOW) * 1.5
# Clamp to reasonable bounds
return max(MIN_UPLOAD_TIMEOUT, min(estimated_time, MAX_UPLOAD_TIMEOUT))
def calculate_job_timeout(estimated_pages: int, file_size_bytes: int) -> float:
"""
Calculate job processing timeout based on page count and file size.
Args:
estimated_pages: Estimated number of pages
file_size_bytes: Size of the file in bytes
Returns:
Timeout in seconds
"""
# Base timeout + time per page
page_based_timeout = BASE_JOB_TIMEOUT + (estimated_pages * PER_PAGE_JOB_TIMEOUT)
# Also consider file size (large images take longer to process)
# ~1 minute per 10MB of file size
size_based_timeout = BASE_JOB_TIMEOUT + (file_size_bytes / (10 * 1024 * 1024)) * 60
# Use the larger of the two estimates
return max(page_based_timeout, size_based_timeout)
async def parse_with_llamacloud_retry( async def parse_with_llamacloud_retry(
file_path: str, file_path: str,
estimated_pages: int, estimated_pages: int,
@ -213,6 +269,9 @@ async def parse_with_llamacloud_retry(
""" """
Parse a file with LlamaCloud with retry logic for transient SSL/connection errors. Parse a file with LlamaCloud with retry logic for transient SSL/connection errors.
Uses dynamic timeout calculations based on file size and page count to handle
very large files reliably.
Args: Args:
file_path: Path to the file to parse file_path: Path to the file to parse
estimated_pages: Estimated number of pages for timeout calculation estimated_pages: Estimated number of pages for timeout calculation
@ -225,25 +284,37 @@ async def parse_with_llamacloud_retry(
Raises: Raises:
Exception: If all retries fail Exception: If all retries fail
""" """
import os
import random
from llama_cloud_services import LlamaParse from llama_cloud_services import LlamaParse
from llama_cloud_services.parse.utils import ResultType from llama_cloud_services.parse.utils import ResultType
# Calculate timeouts based on estimated pages # Get file size for timeout calculations
# Base timeout of 300 seconds + 30 seconds per page for large documents file_size_bytes = os.path.getsize(file_path)
base_timeout = 300 file_size_mb = file_size_bytes / (1024 * 1024)
per_page_timeout = 30
job_timeout = base_timeout + (estimated_pages * per_page_timeout)
# Create custom httpx client with larger timeouts for file uploads # Calculate dynamic timeouts based on file size and page count
# The SSL error often occurs during large file uploads, so we need generous timeouts upload_timeout = calculate_upload_timeout(file_size_bytes)
job_timeout = calculate_job_timeout(estimated_pages, file_size_bytes)
# HTTP client timeouts - scaled based on file size
# Write timeout is critical for large file uploads
custom_timeout = httpx.Timeout( custom_timeout = httpx.Timeout(
connect=60.0, # 60 seconds to establish connection connect=120.0, # 2 minutes to establish connection (handles slow DNS, etc.)
read=300.0, # 5 minutes to read response read=upload_timeout, # Dynamic based on file size
write=300.0, # 5 minutes to write/upload (important for large files) write=upload_timeout, # Dynamic based on file size (upload time)
pool=60.0, # 60 seconds to acquire connection from pool pool=120.0, # 2 minutes to acquire connection from pool
)
logging.info(
f"LlamaCloud upload configured: file_size={file_size_mb:.1f}MB, "
f"pages={estimated_pages}, upload_timeout={upload_timeout:.0f}s, "
f"job_timeout={job_timeout:.0f}s"
) )
last_exception = None last_exception = None
attempt_errors = []
for attempt in range(1, LLAMACLOUD_MAX_RETRIES + 1): for attempt in range(1, LLAMACLOUD_MAX_RETRIES + 1):
try: try:
@ -257,46 +328,66 @@ async def parse_with_llamacloud_retry(
language="en", language="en",
result_type=ResultType.MD, result_type=ResultType.MD,
# Timeout settings for large files # Timeout settings for large files
max_timeout=max(2000, job_timeout), # Overall max timeout max_timeout=int(max(2000, job_timeout + upload_timeout)),
job_timeout_in_seconds=job_timeout, job_timeout_in_seconds=job_timeout,
job_timeout_extra_time_per_page_in_seconds=per_page_timeout, job_timeout_extra_time_per_page_in_seconds=PER_PAGE_JOB_TIMEOUT,
# Use our custom client with larger timeouts # Use our custom client with larger timeouts
custom_client=custom_client, custom_client=custom_client,
) )
# Parse the file asynchronously # Parse the file asynchronously
result = await parser.aparse(file_path) result = await parser.aparse(file_path)
# Success - log if we had previous failures
if attempt > 1:
logging.info(
f"LlamaCloud upload succeeded on attempt {attempt} after "
f"{len(attempt_errors)} failures"
)
return result return result
except LLAMACLOUD_RETRYABLE_EXCEPTIONS as e: except LLAMACLOUD_RETRYABLE_EXCEPTIONS as e:
last_exception = e last_exception = e
error_type = type(e).__name__ error_type = type(e).__name__
error_msg = str(e)[:200]
attempt_errors.append(f"Attempt {attempt}: {error_type} - {error_msg}")
if attempt < LLAMACLOUD_MAX_RETRIES: if attempt < LLAMACLOUD_MAX_RETRIES:
# Calculate exponential backoff delay # Calculate exponential backoff with jitter
delay = LLAMACLOUD_BASE_DELAY * (2 ** (attempt - 1)) # Base delay doubles each attempt, capped at max delay
base_delay = min(
LLAMACLOUD_BASE_DELAY * (2 ** (attempt - 1)), LLAMACLOUD_MAX_DELAY
)
# Add random jitter (±25%) to prevent thundering herd
jitter = base_delay * 0.25 * (2 * random.random() - 1)
delay = base_delay + jitter
if task_logger and log_entry: if task_logger and log_entry:
await task_logger.log_task_progress( await task_logger.log_task_progress(
log_entry, log_entry,
f"LlamaCloud upload failed (attempt {attempt}/{LLAMACLOUD_MAX_RETRIES}), retrying in {delay}s", f"LlamaCloud upload failed (attempt {attempt}/{LLAMACLOUD_MAX_RETRIES}), retrying in {delay:.0f}s",
{ {
"error_type": error_type, "error_type": error_type,
"error_message": str(e)[:200], "error_message": error_msg,
"attempt": attempt, "attempt": attempt,
"retry_delay": delay, "retry_delay": delay,
"file_size_mb": round(file_size_mb, 1),
"upload_timeout": upload_timeout,
}, },
) )
else: else:
logging.warning( logging.warning(
f"LlamaCloud upload failed (attempt {attempt}/{LLAMACLOUD_MAX_RETRIES}): {error_type}. " f"LlamaCloud upload failed (attempt {attempt}/{LLAMACLOUD_MAX_RETRIES}): "
f"Retrying in {delay}s..." f"{error_type}. File: {file_size_mb:.1f}MB. Retrying in {delay:.0f}s..."
) )
await asyncio.sleep(delay) await asyncio.sleep(delay)
else: else:
logging.error( logging.error(
f"LlamaCloud upload failed after {LLAMACLOUD_MAX_RETRIES} attempts: {error_type} - {e}" f"LlamaCloud upload failed after {LLAMACLOUD_MAX_RETRIES} attempts. "
f"File size: {file_size_mb:.1f}MB, Pages: {estimated_pages}. "
f"Errors: {'; '.join(attempt_errors)}"
) )
except Exception: except Exception:
@ -304,7 +395,10 @@ async def parse_with_llamacloud_retry(
raise raise
# All retries exhausted # All retries exhausted
raise last_exception or RuntimeError("LlamaCloud parsing failed after all retries") raise last_exception or RuntimeError(
f"LlamaCloud parsing failed after {LLAMACLOUD_MAX_RETRIES} retries. "
f"File size: {file_size_mb:.1f}MB"
)
async def add_received_file_document_using_unstructured( async def add_received_file_document_using_unstructured(

View file

@ -229,3 +229,4 @@ auth_backend = AuthenticationBackend(
fastapi_users = FastAPIUsers[User, uuid.UUID](get_user_manager, [auth_backend]) fastapi_users = FastAPIUsers[User, uuid.UUID](get_user_manager, [auth_backend])
current_active_user = fastapi_users.current_user(active=True) current_active_user = fastapi_users.current_user(active=True)
current_optional_user = fastapi_users.current_user(active=True, optional=True)

View file

@ -0,0 +1,75 @@
"""
Utilities for working with message content.
Message content in new_chat_messages can be stored in various formats:
- String: Simple text content
- List: Array of content parts [{"type": "text", "text": "..."}, {"type": "tool-call", ...}]
- Dict: Single content object
These utilities help extract and transform content for different use cases.
"""
from langchain_core.messages import AIMessage, HumanMessage
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
def extract_text_content(content: str | dict | list) -> str:
"""Extract plain text content from various message formats."""
if isinstance(content, str):
return content
if isinstance(content, dict):
# Handle dict with 'text' key
if "text" in content:
return content["text"]
return str(content)
if isinstance(content, list):
# Handle list of parts (e.g., [{"type": "text", "text": "..."}])
texts = []
for part in content:
if isinstance(part, dict) and part.get("type") == "text":
texts.append(part.get("text", ""))
elif isinstance(part, str):
texts.append(part)
return "\n".join(texts) if texts else ""
return ""
async def bootstrap_history_from_db(
session: AsyncSession,
thread_id: int,
) -> list[HumanMessage | AIMessage]:
"""
Load message history from database and convert to LangChain format.
Used for cloned chats where the LangGraph checkpointer has no state,
but we have messages in the database that should be used as context.
Args:
session: Database session
thread_id: The chat thread ID
Returns:
List of LangChain messages (HumanMessage/AIMessage)
"""
from app.db import NewChatMessage
result = await session.execute(
select(NewChatMessage)
.filter(NewChatMessage.thread_id == thread_id)
.order_by(NewChatMessage.created_at)
)
db_messages = result.scalars().all()
langchain_messages: list[HumanMessage | AIMessage] = []
for msg in db_messages:
text_content = extract_text_content(msg.content)
if not text_content:
continue
if msg.role == "user":
langchain_messages.append(HumanMessage(content=text_content))
elif msg.role == "assistant":
langchain_messages.append(AIMessage(content=text_content))
return langchain_messages

View file

@ -43,6 +43,7 @@ def create_periodic_schedule(
user_id: str, user_id: str,
connector_type: SearchSourceConnectorType, connector_type: SearchSourceConnectorType,
frequency_minutes: int, frequency_minutes: int,
connector_config: dict | None = None,
) -> bool: ) -> bool:
""" """
Trigger the first indexing run immediately when periodic indexing is enabled. Trigger the first indexing run immediately when periodic indexing is enabled.
@ -57,11 +58,26 @@ def create_periodic_schedule(
user_id: User ID user_id: User ID
connector_type: Type of connector connector_type: Type of connector
frequency_minutes: Frequency in minutes (used for logging) frequency_minutes: Frequency in minutes (used for logging)
connector_config: Optional connector config dict for validation
Returns: Returns:
True if successful, False otherwise True if successful, False otherwise
""" """
try: try:
# Special handling for connectors that require config validation
if connector_type == SearchSourceConnectorType.WEBCRAWLER_CONNECTOR:
from app.utils.webcrawler_utils import parse_webcrawler_urls
config = connector_config or {}
urls = parse_webcrawler_urls(config.get("INITIAL_URLS"))
if not urls:
logger.info(
f"Webcrawler connector {connector_id} has no URLs configured, "
"skipping first indexing run (will run when URLs are added)"
)
return True # Return success - schedule is created, just no first run
logger.info( logger.info(
f"Periodic indexing enabled for connector {connector_id} " f"Periodic indexing enabled for connector {connector_id} "
f"(frequency: {frequency_minutes} minutes). Triggering first run..." f"(frequency: {frequency_minutes} minutes). Triggering first run..."

View file

@ -0,0 +1,28 @@
"""
Utility functions for webcrawler connector.
"""
def parse_webcrawler_urls(initial_urls: str | list | None) -> list[str]:
"""
Parse URLs from webcrawler INITIAL_URLS value.
Handles both string (newline-separated) and list formats.
Args:
initial_urls: The INITIAL_URLS value (string, list, or None)
Returns:
List of parsed, stripped, non-empty URLs
"""
if initial_urls is None:
return []
if isinstance(initial_urls, str):
return [url.strip() for url in initial_urls.split("\n") if url.strip()]
elif isinstance(initial_urls, list):
return [
url.strip() for url in initial_urls if isinstance(url, str) and url.strip()
]
else:
return []

View file

@ -27,6 +27,13 @@ function LoginContent() {
const error = searchParams.get("error"); const error = searchParams.get("error");
const message = searchParams.get("message"); const message = searchParams.get("message");
const logout = searchParams.get("logout"); const logout = searchParams.get("logout");
const returnUrl = searchParams.get("returnUrl");
// Save returnUrl to localStorage so it persists through OAuth flows (e.g., Google)
// This is read by TokenHandler after successful authentication
if (returnUrl) {
localStorage.setItem("surfsense_redirect_path", decodeURIComponent(returnUrl));
}
// Show registration success message // Show registration success message
if (registered === "true") { if (registered === "true") {
@ -93,7 +100,7 @@ function LoginContent() {
}, [searchParams, t, tCommon]); }, [searchParams, t, tCommon]);
// Use global loading screen for auth type determination - spinner animation won't reset // Use global loading screen for auth type determination - spinner animation won't reset
useGlobalLoadingEffect(isLoading, tCommon("loading"), "login"); useGlobalLoadingEffect(isLoading);
// Show nothing while loading - the GlobalLoadingProvider handles the loading UI // Show nothing while loading - the GlobalLoadingProvider handles the loading UI
if (isLoading) { if (isLoading) {

View file

@ -1,13 +1,10 @@
"use client"; "use client";
import { useTranslations } from "next-intl";
import { useGlobalLoadingEffect } from "@/hooks/use-global-loading"; import { useGlobalLoadingEffect } from "@/hooks/use-global-loading";
export default function AuthCallbackLoading() { export default function AuthCallbackLoading() {
const t = useTranslations("auth");
// Use global loading - spinner animation won't reset when page transitions // Use global loading - spinner animation won't reset when page transitions
useGlobalLoadingEffect(true, t("processing_authentication"), "default"); useGlobalLoadingEffect(true);
// Return null - the GlobalLoadingProvider handles the loading UI // Return null - the GlobalLoadingProvider handles the loading UI
return null; return null;

View file

@ -154,11 +154,7 @@ export function DashboardClientLayout({
isAutoConfiguring; isAutoConfiguring;
// Use global loading screen - spinner animation won't reset // Use global loading screen - spinner animation won't reset
useGlobalLoadingEffect( useGlobalLoadingEffect(shouldShowLoading);
shouldShowLoading,
isAutoConfiguring ? t("setting_up_ai") : t("checking_llm_prefs"),
"default"
);
if (shouldShowLoading) { if (shouldShowLoading) {
return null; return null;

View file

@ -13,7 +13,11 @@ import { useTranslations } from "next-intl";
import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { toast } from "sonner"; import { toast } from "sonner";
import { z } from "zod"; import { z } from "zod";
import { currentThreadAtom } from "@/atoms/chat/current-thread.atom"; import {
clearTargetCommentIdAtom,
currentThreadAtom,
setTargetCommentIdAtom,
} from "@/atoms/chat/current-thread.atom";
import { import {
type MentionedDocumentInfo, type MentionedDocumentInfo,
mentionedDocumentIdsAtom, mentionedDocumentIdsAtom,
@ -38,9 +42,11 @@ import { RecallMemoryToolUI, SaveMemoryToolUI } from "@/components/tool-ui/user-
import { Spinner } from "@/components/ui/spinner"; import { Spinner } from "@/components/ui/spinner";
import { useChatSessionStateSync } from "@/hooks/use-chat-session-state"; import { useChatSessionStateSync } from "@/hooks/use-chat-session-state";
import { useMessagesElectric } from "@/hooks/use-messages-electric"; import { useMessagesElectric } from "@/hooks/use-messages-electric";
import { publicChatApiService } from "@/lib/apis/public-chat-api.service";
// import { WriteTodosToolUI } from "@/components/tool-ui/write-todos"; // import { WriteTodosToolUI } from "@/components/tool-ui/write-todos";
import { getBearerToken } from "@/lib/auth-utils"; import { getBearerToken } from "@/lib/auth-utils";
import { createAttachmentAdapter, extractAttachmentContent } from "@/lib/chat/attachment-adapter"; import { createAttachmentAdapter, extractAttachmentContent } from "@/lib/chat/attachment-adapter";
import { convertToThreadMessage } from "@/lib/chat/message-utils";
import { import {
isPodcastGenerating, isPodcastGenerating,
looksLikePodcastRequest, looksLikePodcastRequest,
@ -110,112 +116,6 @@ function extractMentionedDocuments(content: unknown): MentionedDocumentInfo[] {
return []; return [];
} }
/**
* Zod schema for persisted attachment info
*/
const PersistedAttachmentSchema = z.object({
id: z.string(),
name: z.string(),
type: z.string(),
contentType: z.string().optional(),
imageDataUrl: z.string().optional(),
extractedContent: z.string().optional(),
});
const AttachmentsPartSchema = z.object({
type: z.literal("attachments"),
items: z.array(PersistedAttachmentSchema),
});
type PersistedAttachment = z.infer<typeof PersistedAttachmentSchema>;
/**
* Extract persisted attachments from message content (type-safe with Zod)
*/
function extractPersistedAttachments(content: unknown): PersistedAttachment[] {
if (!Array.isArray(content)) return [];
for (const part of content) {
const result = AttachmentsPartSchema.safeParse(part);
if (result.success) {
return result.data.items;
}
}
return [];
}
/**
* Convert backend message to assistant-ui ThreadMessageLike format
* Filters out 'thinking-steps' part as it's handled separately via messageThinkingSteps
* Restores attachments for user messages from persisted data
*/
function convertToThreadMessage(msg: MessageRecord): ThreadMessageLike {
let content: ThreadMessageLike["content"];
if (typeof msg.content === "string") {
content = [{ type: "text", text: msg.content }];
} else if (Array.isArray(msg.content)) {
// Filter out custom metadata parts - they're handled separately
const filteredContent = msg.content.filter((part: unknown) => {
if (typeof part !== "object" || part === null || !("type" in part)) return true;
const partType = (part as { type: string }).type;
// Filter out thinking-steps, mentioned-documents, and attachments
return (
partType !== "thinking-steps" &&
partType !== "mentioned-documents" &&
partType !== "attachments"
);
});
content =
filteredContent.length > 0
? (filteredContent as ThreadMessageLike["content"])
: [{ type: "text", text: "" }];
} else {
content = [{ type: "text", text: String(msg.content) }];
}
// Restore attachments for user messages
let attachments: ThreadMessageLike["attachments"];
if (msg.role === "user") {
const persistedAttachments = extractPersistedAttachments(msg.content);
if (persistedAttachments.length > 0) {
attachments = persistedAttachments.map((att) => ({
id: att.id,
name: att.name,
type: att.type as "document" | "image" | "file",
contentType: att.contentType || "application/octet-stream",
status: { type: "complete" as const },
content: [],
// Custom fields for our ChatAttachment interface
imageDataUrl: att.imageDataUrl,
extractedContent: att.extractedContent,
}));
}
}
// Build metadata.custom for author display in shared chats
const metadata = msg.author_id
? {
custom: {
author: {
displayName: msg.author_display_name ?? null,
avatarUrl: msg.author_avatar_url ?? null,
},
},
}
: undefined;
return {
id: `msg-${msg.id}`,
role: msg.role,
content,
createdAt: new Date(msg.created_at),
attachments,
metadata,
};
}
/** /**
* Tools that should render custom UI in the chat. * Tools that should render custom UI in the chat.
*/ */
@ -242,6 +142,8 @@ export default function NewChatPage() {
const params = useParams(); const params = useParams();
const queryClient = useQueryClient(); const queryClient = useQueryClient();
const [isInitializing, setIsInitializing] = useState(true); const [isInitializing, setIsInitializing] = useState(true);
const [isCompletingClone, setIsCompletingClone] = useState(false);
const [cloneError, setCloneError] = useState(false);
const [threadId, setThreadId] = useState<number | null>(null); const [threadId, setThreadId] = useState<number | null>(null);
const [currentThread, setCurrentThread] = useState<ThreadRecord | null>(null); const [currentThread, setCurrentThread] = useState<ThreadRecord | null>(null);
const [messages, setMessages] = useState<ThreadMessageLike[]>([]); const [messages, setMessages] = useState<ThreadMessageLike[]>([]);
@ -261,6 +163,8 @@ export default function NewChatPage() {
const setMessageDocumentsMap = useSetAtom(messageDocumentsMapAtom); const setMessageDocumentsMap = useSetAtom(messageDocumentsMapAtom);
const hydratePlanState = useSetAtom(hydratePlanStateAtom); const hydratePlanState = useSetAtom(hydratePlanStateAtom);
const setCurrentThreadState = useSetAtom(currentThreadAtom); const setCurrentThreadState = useSetAtom(currentThreadAtom);
const setTargetCommentId = useSetAtom(setTargetCommentIdAtom);
const clearTargetCommentId = useSetAtom(clearTargetCommentIdAtom);
// Get current user for author info in shared chats // Get current user for author info in shared chats
const { data: currentUser } = useAtomValue(currentUserAtom); const { data: currentUser } = useAtomValue(currentUserAtom);
@ -294,6 +198,12 @@ export default function NewChatPage() {
? membersData?.find((m) => m.user_id === msg.author_id) ? membersData?.find((m) => m.user_id === msg.author_id)
: null; : null;
// Preserve existing author info if member lookup fails (e.g., cloned chats)
const existingMsg = prev.find((m) => m.id === `msg-${msg.id}`);
const existingAuthor = existingMsg?.metadata?.custom?.author as
| { displayName?: string | null; avatarUrl?: string | null }
| undefined;
return convertToThreadMessage({ return convertToThreadMessage({
id: msg.id, id: msg.id,
thread_id: msg.thread_id, thread_id: msg.thread_id,
@ -301,8 +211,8 @@ export default function NewChatPage() {
content: msg.content, content: msg.content,
author_id: msg.author_id, author_id: msg.author_id,
created_at: msg.created_at, created_at: msg.created_at,
author_display_name: member?.user_display_name ?? null, author_display_name: member?.user_display_name ?? existingAuthor?.displayName ?? null,
author_avatar_url: member?.user_avatar_url ?? null, author_avatar_url: member?.user_avatar_url ?? existingAuthor?.avatarUrl ?? null,
}); });
}); });
}); });
@ -422,46 +332,71 @@ export default function NewChatPage() {
initializeThread(); initializeThread();
}, [initializeThread]); }, [initializeThread]);
// Handle clone completion when thread has clone_pending flag
useEffect(() => {
if (!currentThread?.clone_pending || isCompletingClone || cloneError) return;
const completeClone = async () => {
setIsCompletingClone(true);
try {
await publicChatApiService.completeClone({ thread_id: currentThread.id });
// Re-initialize thread to fetch cloned content using existing logic
await initializeThread();
// Invalidate threads query to update sidebar
queryClient.invalidateQueries({
predicate: (query) => Array.isArray(query.queryKey) && query.queryKey[0] === "threads",
});
} catch (error) {
console.error("[NewChatPage] Failed to complete clone:", error);
toast.error("Failed to copy chat content. Please try again.");
setCloneError(true);
} finally {
setIsCompletingClone(false);
}
};
completeClone();
}, [
currentThread?.clone_pending,
currentThread?.id,
isCompletingClone,
cloneError,
initializeThread,
queryClient,
]);
// Handle scroll to comment from URL query params (e.g., from inbox item click) // Handle scroll to comment from URL query params (e.g., from inbox item click)
const searchParams = useSearchParams(); const searchParams = useSearchParams();
const targetCommentId = searchParams.get("commentId"); const targetCommentIdParam = searchParams.get("commentId");
// Set target comment ID from URL param - the AssistantMessage and CommentItem
// components will handle scrolling and highlighting once comments are loaded
useEffect(() => { useEffect(() => {
if (!targetCommentId || isInitializing || messages.length === 0) return; if (targetCommentIdParam && !isInitializing) {
const commentId = Number.parseInt(targetCommentIdParam, 10);
const tryScroll = () => { if (!Number.isNaN(commentId)) {
const el = document.querySelector(`[data-comment-id="${targetCommentId}"]`); setTargetCommentId(commentId);
if (el) {
el.scrollIntoView({ behavior: "smooth", block: "center" });
return true;
} }
return false; }
};
// Try immediately // Cleanup on unmount or when navigating away
if (tryScroll()) return; return () => clearTargetCommentId();
}, [targetCommentIdParam, isInitializing, setTargetCommentId, clearTargetCommentId]);
// Retry every 200ms for up to 10 seconds
const intervalId = setInterval(() => {
if (tryScroll()) clearInterval(intervalId);
}, 200);
const timeoutId = setTimeout(() => clearInterval(intervalId), 10000);
return () => {
clearInterval(intervalId);
clearTimeout(timeoutId);
};
}, [targetCommentId, isInitializing, messages.length]);
// Sync current thread state to atom // Sync current thread state to atom
useEffect(() => { useEffect(() => {
setCurrentThreadState({ setCurrentThreadState((prev) => ({
...prev,
id: currentThread?.id ?? null, id: currentThread?.id ?? null,
visibility: currentThread?.visibility ?? null, visibility: currentThread?.visibility ?? null,
hasComments: currentThread?.has_comments ?? false, hasComments: currentThread?.has_comments ?? false,
addingCommentToMessageId: null, addingCommentToMessageId: null,
}); publicShareEnabled: currentThread?.public_share_enabled ?? false,
publicShareToken: currentThread?.public_share_token ?? null,
}));
}, [currentThread, setCurrentThreadState]); }, [currentThread, setCurrentThreadState]);
// Cancel ongoing request // Cancel ongoing request
@ -887,13 +822,13 @@ export default function NewChatPage() {
// Update the tool call with its result // Update the tool call with its result
updateToolCall(parsed.toolCallId, { result: parsed.output }); updateToolCall(parsed.toolCallId, { result: parsed.output });
// Handle podcast-specific logic // Handle podcast-specific logic
if (parsed.output?.status === "processing" && parsed.output?.task_id) { if (parsed.output?.status === "pending" && parsed.output?.podcast_id) {
// Check if this is a podcast tool by looking at the content part // Check if this is a podcast tool by looking at the content part
const idx = toolCallIndices.get(parsed.toolCallId); const idx = toolCallIndices.get(parsed.toolCallId);
if (idx !== undefined) { if (idx !== undefined) {
const part = contentParts[idx]; const part = contentParts[idx];
if (part?.type === "tool-call" && part.toolName === "generate_podcast") { if (part?.type === "tool-call" && part.toolName === "generate_podcast") {
setActivePodcastTaskId(parsed.output.task_id); setActivePodcastTaskId(String(parsed.output.podcast_id));
} }
} }
} }
@ -1307,12 +1242,12 @@ export default function NewChatPage() {
case "tool-output-available": case "tool-output-available":
updateToolCall(parsed.toolCallId, { result: parsed.output }); updateToolCall(parsed.toolCallId, { result: parsed.output });
if (parsed.output?.status === "processing" && parsed.output?.task_id) { if (parsed.output?.status === "pending" && parsed.output?.podcast_id) {
const idx = toolCallIndices.get(parsed.toolCallId); const idx = toolCallIndices.get(parsed.toolCallId);
if (idx !== undefined) { if (idx !== undefined) {
const part = contentParts[idx]; const part = contentParts[idx];
if (part?.type === "tool-call" && part.toolName === "generate_podcast") { if (part?.type === "tool-call" && part.toolName === "generate_podcast") {
setActivePodcastTaskId(parsed.output.task_id); setActivePodcastTaskId(String(parsed.output.podcast_id));
} }
} }
} }
@ -1485,6 +1420,16 @@ export default function NewChatPage() {
); );
} }
// Show loading state while completing clone
if (isCompletingClone) {
return (
<div className="flex h-[calc(100vh-64px)] flex-col items-center justify-center gap-4">
<Spinner size="lg" />
<div className="text-sm text-muted-foreground">Copying chat content...</div>
</div>
);
}
// Show error state only if we tried to load an existing thread but failed // Show error state only if we tried to load an existing thread but failed
// For new chats (urlChatId === 0), threadId being null is expected (lazy creation) // For new chats (urlChatId === 0), threadId being null is expected (lazy creation)
if (!threadId && urlChatId > 0) { if (!threadId && urlChatId > 0) {

View file

@ -115,13 +115,13 @@ import type {
Membership, Membership,
UpdateMembershipRequest, UpdateMembershipRequest,
} from "@/contracts/types/members.types"; } from "@/contracts/types/members.types";
import type { PermissionInfo } from "@/contracts/types/permissions.types";
import type { import type {
CreateRoleRequest, CreateRoleRequest,
DeleteRoleRequest, DeleteRoleRequest,
Role, Role,
UpdateRoleRequest, UpdateRoleRequest,
} from "@/contracts/types/roles.types"; } from "@/contracts/types/roles.types";
import type { PermissionInfo } from "@/contracts/types/permissions.types";
import { invitesApiService } from "@/lib/apis/invites-api.service"; import { invitesApiService } from "@/lib/apis/invites-api.service";
import { rolesApiService } from "@/lib/apis/roles-api.service"; import { rolesApiService } from "@/lib/apis/roles-api.service";
import { trackSearchSpaceInviteSent, trackSearchSpaceUsersViewed } from "@/lib/posthog/events"; import { trackSearchSpaceInviteSent, trackSearchSpaceUsersViewed } from "@/lib/posthog/events";
@ -980,11 +980,7 @@ function RolesTab({
> >
{/* Create Role Button / Section */} {/* Create Role Button / Section */}
{canCreate && !showCreateRole && ( {canCreate && !showCreateRole && (
<motion.div <motion.div initial={{ opacity: 0 }} animate={{ opacity: 1 }} className="flex justify-end">
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
className="flex justify-end"
>
<Button onClick={() => setShowCreateRole(true)} className="gap-2"> <Button onClick={() => setShowCreateRole(true)} className="gap-2">
<Plus className="h-4 w-4" /> <Plus className="h-4 w-4" />
Create Custom Role Create Custom Role
@ -1701,15 +1697,18 @@ function CreateRoleSection({
); );
}, []); }, []);
const applyPreset = useCallback((presetKey: keyof typeof ROLE_PRESETS) => { const applyPreset = useCallback(
const preset = ROLE_PRESETS[presetKey]; (presetKey: keyof typeof ROLE_PRESETS) => {
setSelectedPermissions(preset.permissions); const preset = ROLE_PRESETS[presetKey];
if (!name.trim()) { setSelectedPermissions(preset.permissions);
setName(preset.name); if (!name.trim()) {
setDescription(preset.description); setName(preset.name);
} setDescription(preset.description);
toast.success(`Applied ${preset.name} preset`); }
}, [name]); toast.success(`Applied ${preset.name} preset`);
},
[name]
);
const getCategoryStats = useCallback( const getCategoryStats = useCallback(
(category: string) => { (category: string) => {
@ -1857,10 +1856,7 @@ function CreateRoleSection({
const perms = groupedPermissions[category] || []; const perms = groupedPermissions[category] || [];
return ( return (
<div <div key={category} className="rounded-lg border bg-card overflow-hidden">
key={category}
className="rounded-lg border bg-card overflow-hidden"
>
{/* Category Header */} {/* Category Header */}
<div <div
className={cn( className={cn(

View file

@ -1,6 +1,5 @@
"use client"; "use client";
import { useTranslations } from "next-intl";
import { useEffect, useState } from "react"; import { useEffect, useState } from "react";
import { useGlobalLoadingEffect } from "@/hooks/use-global-loading"; import { useGlobalLoadingEffect } from "@/hooks/use-global-loading";
import { getBearerToken, redirectToLogin } from "@/lib/auth-utils"; import { getBearerToken, redirectToLogin } from "@/lib/auth-utils";
@ -10,11 +9,10 @@ interface DashboardLayoutProps {
} }
export default function DashboardLayout({ children }: DashboardLayoutProps) { export default function DashboardLayout({ children }: DashboardLayoutProps) {
const t = useTranslations("dashboard");
const [isCheckingAuth, setIsCheckingAuth] = useState(true); const [isCheckingAuth, setIsCheckingAuth] = useState(true);
// Use the global loading screen - spinner animation won't reset // Use the global loading screen - spinner animation won't reset
useGlobalLoadingEffect(isCheckingAuth, t("checking_auth"), "default"); useGlobalLoadingEffect(isCheckingAuth);
useEffect(() => { useEffect(() => {
// Check if user is authenticated // Check if user is authenticated

View file

@ -1,13 +1,10 @@
"use client"; "use client";
import { useTranslations } from "next-intl";
import { useGlobalLoadingEffect } from "@/hooks/use-global-loading"; import { useGlobalLoadingEffect } from "@/hooks/use-global-loading";
export default function DashboardLoading() { export default function DashboardLoading() {
const t = useTranslations("common");
// Use global loading - spinner animation won't reset when page transitions // Use global loading - spinner animation won't reset when page transitions
useGlobalLoadingEffect(true, t("loading"), "default"); useGlobalLoadingEffect(true);
// Return null - the GlobalLoadingProvider handles the loading UI // Return null - the GlobalLoadingProvider handles the loading UI
return null; return null;

View file

@ -106,7 +106,7 @@ export default function DashboardPage() {
const shouldShowLoading = isLoading || searchSpaces.length > 0; const shouldShowLoading = isLoading || searchSpaces.length > 0;
// Use global loading screen - spinner animation won't reset // Use global loading screen - spinner animation won't reset
useGlobalLoadingEffect(shouldShowLoading, t("fetching_spaces"), "default"); useGlobalLoadingEffect(shouldShowLoading);
if (error) return <ErrorScreen message={error?.message || "Failed to load search spaces"} />; if (error) return <ErrorScreen message={error?.message || "Failed to load search spaces"} />;

View file

@ -0,0 +1,11 @@
"use client";
import { useParams } from "next/navigation";
import { PublicChatView } from "@/components/public-chat/public-chat-view";
export default function PublicChatPage() {
const params = useParams();
const token = params.token as string;
return <PublicChatView shareToken={token} />;
}

View file

@ -0,0 +1,28 @@
import { atomWithMutation } from "jotai-tanstack-query";
import { toast } from "sonner";
import type {
TogglePublicShareRequest,
TogglePublicShareResponse,
} from "@/contracts/types/chat-threads.types";
import { chatThreadsApiService } from "@/lib/apis/chat-threads-api.service";
export const togglePublicShareMutationAtom = atomWithMutation(() => ({
mutationFn: async (request: TogglePublicShareRequest) => {
return chatThreadsApiService.togglePublicShare(request);
},
onSuccess: (response: TogglePublicShareResponse) => {
if (response.enabled && response.share_token) {
const publicUrl = `${window.location.origin}/public/${response.share_token}`;
navigator.clipboard.writeText(publicUrl);
toast.success("Public link copied to clipboard", {
description: "Anyone with this link can view the chat",
});
} else {
toast.success("Public sharing disabled");
}
},
onError: (error: Error) => {
console.error("Failed to toggle public share:", error);
toast.error("Failed to update public sharing");
},
}));

View file

@ -17,6 +17,10 @@ interface CurrentThreadState {
visibility: ChatVisibility | null; visibility: ChatVisibility | null;
hasComments: boolean; hasComments: boolean;
addingCommentToMessageId: number | null; addingCommentToMessageId: number | null;
/** Whether the right-side comments panel is collapsed (desktop only) */
commentsCollapsed: boolean;
publicShareEnabled: boolean;
publicShareToken: string | null;
} }
const initialState: CurrentThreadState = { const initialState: CurrentThreadState = {
@ -24,6 +28,9 @@ const initialState: CurrentThreadState = {
visibility: null, visibility: null,
hasComments: false, hasComments: false,
addingCommentToMessageId: null, addingCommentToMessageId: null,
commentsCollapsed: false,
publicShareEnabled: false,
publicShareToken: null,
}; };
export const currentThreadAtom = atom<CurrentThreadState>(initialState); export const currentThreadAtom = atom<CurrentThreadState>(initialState);
@ -34,6 +41,8 @@ export const commentsEnabledAtom = atom(
export const showCommentsGutterAtom = atom((get) => { export const showCommentsGutterAtom = atom((get) => {
const thread = get(currentThreadAtom); const thread = get(currentThreadAtom);
// Hide gutter if comments are collapsed
if (thread.commentsCollapsed) return false;
return ( return (
thread.visibility === "SEARCH_SPACE" && thread.visibility === "SEARCH_SPACE" &&
(thread.hasComments || thread.addingCommentToMessageId !== null) (thread.hasComments || thread.addingCommentToMessageId !== null)
@ -55,3 +64,34 @@ export const setThreadVisibilityAtom = atom(null, (get, set, newVisibility: Chat
export const resetCurrentThreadAtom = atom(null, (_, set) => { export const resetCurrentThreadAtom = atom(null, (_, set) => {
set(currentThreadAtom, initialState); set(currentThreadAtom, initialState);
}); });
/** Atom to read whether comments panel is collapsed */
export const commentsCollapsedAtom = atom((get) => get(currentThreadAtom).commentsCollapsed);
/** Atom to toggle the comments collapsed state */
export const toggleCommentsCollapsedAtom = atom(null, (get, set) => {
const current = get(currentThreadAtom);
set(currentThreadAtom, { ...current, commentsCollapsed: !current.commentsCollapsed });
});
/** Atom to explicitly set the comments collapsed state */
export const setCommentsCollapsedAtom = atom(null, (get, set, collapsed: boolean) => {
set(currentThreadAtom, { ...get(currentThreadAtom), commentsCollapsed: collapsed });
});
/** Target comment ID to scroll to (from URL navigation or inbox click) */
export const targetCommentIdAtom = atom<number | null>(null);
/** Setter for target comment ID - also ensures comments are not collapsed */
export const setTargetCommentIdAtom = atom(null, (get, set, commentId: number | null) => {
// Ensure comments are not collapsed when navigating to a comment
if (commentId !== null) {
set(currentThreadAtom, { ...get(currentThreadAtom), commentsCollapsed: false });
}
set(targetCommentIdAtom, commentId);
});
/** Clear target after navigation completes */
export const clearTargetCommentIdAtom = atom(null, (_, set) => {
set(targetCommentIdAtom, null);
});

View file

@ -2,29 +2,18 @@ import { atom } from "jotai";
interface GlobalLoadingState { interface GlobalLoadingState {
isLoading: boolean; isLoading: boolean;
message?: string;
variant: "login" | "default";
} }
export const globalLoadingAtom = atom<GlobalLoadingState>({ export const globalLoadingAtom = atom<GlobalLoadingState>({
isLoading: false, isLoading: false,
message: undefined,
variant: "default",
}); });
// Helper atom for showing global loading // Helper atom for showing global loading
export const showGlobalLoadingAtom = atom( export const showGlobalLoadingAtom = atom(null, (get, set) => {
null, set(globalLoadingAtom, { isLoading: true });
( });
get,
set,
{ message, variant = "default" }: { message?: string; variant?: "login" | "default" }
) => {
set(globalLoadingAtom, { isLoading: true, message, variant });
}
);
// Helper atom for hiding global loading // Helper atom for hiding global loading
export const hideGlobalLoadingAtom = atom(null, (get, set) => { export const hideGlobalLoadingAtom = atom(null, (get, set) => {
set(globalLoadingAtom, { isLoading: false, message: undefined, variant: "default" }); set(globalLoadingAtom, { isLoading: false });
}); });

View file

@ -1,7 +1,6 @@
"use client"; "use client";
import { useSearchParams } from "next/navigation"; import { useSearchParams } from "next/navigation";
import { useTranslations } from "next-intl";
import { useEffect } from "react"; import { useEffect } from "react";
import { useGlobalLoadingEffect } from "@/hooks/use-global-loading"; import { useGlobalLoadingEffect } from "@/hooks/use-global-loading";
import { getAndClearRedirectPath, setBearerToken } from "@/lib/auth-utils"; import { getAndClearRedirectPath, setBearerToken } from "@/lib/auth-utils";
@ -27,11 +26,10 @@ const TokenHandler = ({
tokenParamName = "token", tokenParamName = "token",
storageKey = "surfsense_bearer_token", storageKey = "surfsense_bearer_token",
}: TokenHandlerProps) => { }: TokenHandlerProps) => {
const t = useTranslations("auth");
const searchParams = useSearchParams(); const searchParams = useSearchParams();
// Always show loading for this component - spinner animation won't reset // Always show loading for this component - spinner animation won't reset
useGlobalLoadingEffect(true, t("processing_authentication"), "default"); useGlobalLoadingEffect(true);
useEffect(() => { useEffect(() => {
// Only run on client-side // Only run on client-side

View file

@ -5,13 +5,16 @@ import {
MessagePrimitive, MessagePrimitive,
useAssistantState, useAssistantState,
} from "@assistant-ui/react"; } from "@assistant-ui/react";
import { useAtom, useAtomValue } from "jotai"; import { useAtom, useAtomValue, useSetAtom } from "jotai";
import { CheckIcon, CopyIcon, DownloadIcon, MessageSquare, RefreshCwIcon } from "lucide-react"; import { CheckIcon, CopyIcon, DownloadIcon, MessageSquare, RefreshCwIcon } from "lucide-react";
import type { FC } from "react"; import type { FC } from "react";
import { useContext, useEffect, useRef, useState } from "react"; import { useContext, useEffect, useMemo, useRef, useState } from "react";
import { import {
addingCommentToMessageIdAtom, addingCommentToMessageIdAtom,
clearTargetCommentIdAtom,
commentsCollapsedAtom,
commentsEnabledAtom, commentsEnabledAtom,
targetCommentIdAtom,
} from "@/atoms/chat/current-thread.atom"; } from "@/atoms/chat/current-thread.atom";
import { activeSearchSpaceIdAtom } from "@/atoms/search-spaces/search-space-query.atoms"; import { activeSearchSpaceIdAtom } from "@/atoms/search-spaces/search-space-query.atoms";
import { BranchPicker } from "@/components/assistant-ui/branch-picker"; import { BranchPicker } from "@/components/assistant-ui/branch-picker";
@ -102,6 +105,7 @@ export const AssistantMessage: FC = () => {
const searchSpaceId = useAtomValue(activeSearchSpaceIdAtom); const searchSpaceId = useAtomValue(activeSearchSpaceIdAtom);
const dbMessageId = parseMessageId(messageId); const dbMessageId = parseMessageId(messageId);
const commentsEnabled = useAtomValue(commentsEnabledAtom); const commentsEnabled = useAtomValue(commentsEnabledAtom);
const commentsCollapsed = useAtomValue(commentsCollapsedAtom);
const [addingCommentToMessageId, setAddingCommentToMessageId] = useAtom( const [addingCommentToMessageId, setAddingCommentToMessageId] = useAtom(
addingCommentToMessageIdAtom addingCommentToMessageIdAtom
); );
@ -115,11 +119,23 @@ export const AssistantMessage: FC = () => {
const isLastMessage = useAssistantState(({ message }) => message?.isLast ?? false); const isLastMessage = useAssistantState(({ message }) => message?.isLast ?? false);
const isMessageStreaming = isThreadRunning && isLastMessage; const isMessageStreaming = isThreadRunning && isLastMessage;
const { data: commentsData } = useComments({ const { data: commentsData, isSuccess: commentsLoaded } = useComments({
messageId: dbMessageId ?? 0, messageId: dbMessageId ?? 0,
enabled: !!dbMessageId, enabled: !!dbMessageId,
}); });
// Target comment navigation - read target from global atom
const targetCommentId = useAtomValue(targetCommentIdAtom);
const clearTargetCommentId = useSetAtom(clearTargetCommentIdAtom);
// Check if target comment belongs to this message (including replies)
const hasTargetComment = useMemo(() => {
if (!targetCommentId || !commentsData?.comments) return false;
return commentsData.comments.some(
(c) => c.id === targetCommentId || c.replies?.some((r) => r.id === targetCommentId)
);
}, [targetCommentId, commentsData]);
const commentCount = commentsData?.total_count ?? 0; const commentCount = commentsData?.total_count ?? 0;
const hasComments = commentCount > 0; const hasComments = commentCount > 0;
const isAddingComment = dbMessageId !== null && addingCommentToMessageId === dbMessageId; const isAddingComment = dbMessageId !== null && addingCommentToMessageId === dbMessageId;
@ -144,6 +160,24 @@ export const AssistantMessage: FC = () => {
return () => observer.disconnect(); return () => observer.disconnect();
}, []); }, []);
// Auto-open sheet on mobile/tablet when this message has the target comment
useEffect(() => {
if (hasTargetComment && !isDesktop && commentsLoaded) {
setIsSheetOpen(true);
}
}, [hasTargetComment, isDesktop, commentsLoaded]);
// Scroll message into view when it contains target comment (desktop)
useEffect(() => {
if (hasTargetComment && isDesktop && commentsLoaded && messageRef.current) {
// Small delay to ensure DOM is ready after comments render
const timeoutId = setTimeout(() => {
messageRef.current?.scrollIntoView({ behavior: "smooth", block: "center" });
}, 100);
return () => clearTimeout(timeoutId);
}
}, [hasTargetComment, isDesktop, commentsLoaded]);
const showCommentTrigger = searchSpaceId && commentsEnabled && !isMessageStreaming && dbMessageId; const showCommentTrigger = searchSpaceId && commentsEnabled && !isMessageStreaming && dbMessageId;
// Determine sheet side based on screen size // Determine sheet side based on screen size
@ -157,8 +191,8 @@ export const AssistantMessage: FC = () => {
> >
<AssistantMessageInner /> <AssistantMessageInner />
{/* Desktop comment panel - only on lg screens and above */} {/* Desktop comment panel - only on lg screens and above, hidden when collapsed */}
{searchSpaceId && commentsEnabled && !isMessageStreaming && ( {searchSpaceId && commentsEnabled && !isMessageStreaming && !commentsCollapsed && (
<div className="absolute left-full top-0 ml-4 hidden lg:block w-72"> <div className="absolute left-full top-0 ml-4 hidden lg:block w-72">
<div <div
className={`sticky top-3 ${showCommentPanel ? "opacity-100" : "opacity-0 group-hover:opacity-100"} transition-opacity`} className={`sticky top-3 ${showCommentPanel ? "opacity-100" : "opacity-0 group-hover:opacity-100"} transition-opacity`}

View file

@ -24,11 +24,6 @@
"enabled": true, "enabled": true,
"status": "warning", "status": "warning",
"statusMessage": "Some requests may be blocked if not using Firecrawl." "statusMessage": "Some requests may be blocked if not using Firecrawl."
},
"COMPOSIO_GOOGLE_DRIVE_CONNECTOR": {
"enabled": false,
"status": "disabled",
"statusMessage": "Not available yet."
} }
}, },
"globalSettings": { "globalSettings": {

View file

@ -218,7 +218,7 @@ export const IndexingConfigurationView: FC<IndexingConfigurationViewProps> = ({
{isStartingIndexing ? ( {isStartingIndexing ? (
<> <>
<Spinner size="sm" className="mr-2" /> <Spinner size="sm" className="mr-2" />
Starting... Starting
</> </>
) : ( ) : (
"Start Indexing" "Start Indexing"

View file

@ -252,13 +252,12 @@ const defaultComponents = memoizeMarkdownComponents({
<hr className={cn("aui-md-hr my-5 border-b", className)} {...props} /> <hr className={cn("aui-md-hr my-5 border-b", className)} {...props} />
), ),
table: ({ className, ...props }) => ( table: ({ className, ...props }) => (
<table <div className="aui-md-table-wrapper my-5 w-full overflow-x-auto">
className={cn( <table
"aui-md-table my-5 w-full border-separate border-spacing-0 overflow-y-auto", className={cn("aui-md-table w-full min-w-max border-separate border-spacing-0", className)}
className {...props}
)} />
{...props} </div>
/>
), ),
th: ({ className, children, ...props }) => ( th: ({ className, children, ...props }) => (
<th <th

View file

@ -110,7 +110,7 @@ const ThreadContent: FC<{ header?: React.ReactNode }> = ({ header }) => {
}} }}
/> />
<ThreadPrimitive.ViewportFooter className="aui-thread-viewport-footer sticky bottom-0 z-20 mx-auto mt-auto flex w-full max-w-(--thread-max-width) flex-col gap-4 overflow-visible rounded-t-3xl bg-background pb-4 md:pb-6"> <ThreadPrimitive.ViewportFooter className="aui-thread-viewport-footer sticky bottom-0 z-10 mx-auto mt-auto flex w-full max-w-(--thread-max-width) flex-col gap-4 overflow-visible rounded-t-3xl bg-background pb-4 md:pb-6">
<ThreadScrollToBottom /> <ThreadScrollToBottom />
<AssistantIf condition={({ thread }) => !thread.isEmpty}> <AssistantIf condition={({ thread }) => !thread.isEmpty}>
<div className="fade-in slide-in-from-bottom-4 animate-in duration-500 ease-out fill-mode-both"> <div className="fade-in slide-in-from-bottom-4 animate-in duration-500 ease-out fill-mode-both">

View file

@ -0,0 +1,88 @@
"use client";
import { motion } from "motion/react";
import Link from "next/link";
import { AUTH_TYPE, BACKEND_URL } from "@/lib/env-config";
import { trackLoginAttempt } from "@/lib/posthog/events";
import { cn } from "@/lib/utils";
// Official Google "G" logo with brand colors
const GoogleLogo = ({ className }: { className?: string }) => (
<svg className={className} viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg">
<path
d="M22.56 12.25c0-.78-.07-1.53-.2-2.25H12v4.26h5.92c-.26 1.37-1.04 2.53-2.21 3.31v2.77h3.57c2.08-1.92 3.28-4.74 3.28-8.09z"
fill="#4285F4"
/>
<path
d="M12 23c2.97 0 5.46-.98 7.28-2.66l-3.57-2.77c-.98.66-2.23 1.06-3.71 1.06-2.86 0-5.29-1.93-6.16-4.53H2.18v2.84C3.99 20.53 7.7 23 12 23z"
fill="#34A853"
/>
<path
d="M5.84 14.09c-.22-.66-.35-1.36-.35-2.09s.13-1.43.35-2.09V7.07H2.18C1.43 8.55 1 10.22 1 12s.43 3.45 1.18 4.93l2.85-2.22.81-.62z"
fill="#FBBC05"
/>
<path
d="M12 5.38c1.62 0 3.06.56 4.21 1.64l3.15-3.15C17.45 2.09 14.97 1 12 1 7.7 1 3.99 3.47 2.18 7.07l3.66 2.84c.87-2.6 3.3-4.53 6.16-4.53z"
fill="#EA4335"
/>
</svg>
);
interface SignInButtonProps {
/**
* - "desktop": Hidden on mobile, visible on md+ (for navbar with separate mobile menu)
* - "mobile": Full width, always visible (for mobile menu)
* - "compact": Always visible, compact size (for headers)
*/
variant?: "desktop" | "mobile" | "compact";
}
export const SignInButton = ({ variant = "desktop" }: SignInButtonProps) => {
const isGoogleAuth = AUTH_TYPE === "GOOGLE";
const handleGoogleLogin = () => {
trackLoginAttempt("google");
window.location.href = `${BACKEND_URL}/auth/google/authorize-redirect`;
};
const getClassName = () => {
if (variant === "desktop") {
return isGoogleAuth
? "hidden rounded-full bg-white px-5 py-2 text-sm text-neutral-700 shadow-md ring-1 ring-neutral-200/50 hover:shadow-lg md:flex dark:bg-neutral-900 dark:text-neutral-200 dark:ring-neutral-700/50"
: "hidden rounded-full bg-black px-8 py-2 text-sm font-bold text-white shadow-[0px_-2px_0px_0px_rgba(255,255,255,0.4)_inset] md:block dark:bg-white dark:text-black";
}
if (variant === "compact") {
return isGoogleAuth
? "rounded-full bg-white px-4 py-1.5 text-sm text-neutral-700 shadow-md ring-1 ring-neutral-200/50 hover:shadow-lg dark:bg-neutral-900 dark:text-neutral-200 dark:ring-neutral-700/50"
: "rounded-full bg-black px-6 py-1.5 text-sm font-bold text-white shadow-[0px_-2px_0px_0px_rgba(255,255,255,0.4)_inset] dark:bg-white dark:text-black";
}
// mobile
return isGoogleAuth
? "w-full rounded-lg bg-white px-8 py-2.5 text-neutral-700 shadow-md ring-1 ring-neutral-200/50 dark:bg-neutral-900 dark:text-neutral-200 dark:ring-neutral-700/50 touch-manipulation"
: "w-full rounded-lg bg-black px-8 py-2 font-medium text-white shadow-[0px_-2px_0px_0px_rgba(255,255,255,0.4)_inset] dark:bg-white dark:text-black text-center touch-manipulation";
};
if (isGoogleAuth) {
return (
<motion.button
type="button"
onClick={handleGoogleLogin}
whileHover={{ scale: 1.02 }}
whileTap={{ scale: 0.98 }}
className={cn(
"flex items-center justify-center gap-2 font-semibold transition-all duration-200",
getClassName()
)}
>
<GoogleLogo className="h-4 w-4" />
<span>Sign In</span>
</motion.button>
);
}
return (
<Link href="/login" className={getClassName()}>
Sign In
</Link>
);
};

View file

@ -1,6 +1,9 @@
"use client"; "use client";
import { useAtomValue, useSetAtom } from "jotai";
import { MessageSquare } from "lucide-react"; import { MessageSquare } from "lucide-react";
import { useEffect, useRef, useState } from "react";
import { clearTargetCommentIdAtom, targetCommentIdAtom } from "@/atoms/chat/current-thread.atom";
import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar"; import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar";
import { Button } from "@/components/ui/button"; import { Button } from "@/components/ui/button";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
@ -76,10 +79,9 @@ function renderMentions(content: string): React.ReactNode {
const mentionPattern = /@\{([^}]+)\}/g; const mentionPattern = /@\{([^}]+)\}/g;
const parts: React.ReactNode[] = []; const parts: React.ReactNode[] = [];
let lastIndex = 0; let lastIndex = 0;
let match: RegExpExecArray | null;
while ((match = mentionPattern.exec(content)) !== null) { for (const match of content.matchAll(mentionPattern)) {
if (match.index > lastIndex) { if (match.index !== undefined && match.index > lastIndex) {
parts.push(content.slice(lastIndex, match.index)); parts.push(content.slice(lastIndex, match.index));
} }
@ -90,7 +92,7 @@ function renderMentions(content: string): React.ReactNode {
</span> </span>
); );
lastIndex = match.index + match[0].length; lastIndex = (match.index ?? 0) + match[0].length;
} }
if (lastIndex < content.length) { if (lastIndex < content.length) {
@ -113,6 +115,37 @@ export function CommentItem({
members = [], members = [],
membersLoading = false, membersLoading = false,
}: CommentItemProps) { }: CommentItemProps) {
const commentRef = useRef<HTMLDivElement>(null);
const [isHighlighted, setIsHighlighted] = useState(false);
// Target comment navigation
const targetCommentId = useAtomValue(targetCommentIdAtom);
const clearTargetCommentId = useSetAtom(clearTargetCommentIdAtom);
const isTarget = targetCommentId === comment.id;
// Scroll into view and highlight when this is the target comment
useEffect(() => {
if (isTarget && commentRef.current) {
// Small delay to ensure DOM is ready
const scrollTimeoutId = setTimeout(() => {
commentRef.current?.scrollIntoView({ behavior: "smooth", block: "center" });
setIsHighlighted(true);
}, 150);
// Remove highlight and clear target after delay
const clearTimeoutId = setTimeout(() => {
setIsHighlighted(false);
clearTargetCommentId();
}, 3000);
return () => {
clearTimeout(scrollTimeoutId);
clearTimeout(clearTimeoutId);
};
}
}, [isTarget, clearTargetCommentId]);
const displayName = const displayName =
comment.author?.displayName || comment.author?.email.split("@")[0] || "Unknown"; comment.author?.displayName || comment.author?.email.split("@")[0] || "Unknown";
const email = comment.author?.email || ""; const email = comment.author?.email || "";
@ -122,7 +155,14 @@ export function CommentItem({
}; };
return ( return (
<div className={cn("group flex gap-3")} data-comment-id={comment.id}> <div
ref={commentRef}
className={cn(
"group flex gap-3 rounded-lg p-1 -m-1 transition-all duration-300",
isHighlighted && "ring-2 ring-primary ring-offset-2 ring-offset-background"
)}
data-comment-id={comment.id}
>
<Avatar className="size-8 shrink-0"> <Avatar className="size-8 shrink-0">
{comment.author?.avatarUrl && ( {comment.author?.avatarUrl && (
<AvatarImage src={comment.author.avatarUrl} alt={displayName} /> <AvatarImage src={comment.author.avatarUrl} alt={displayName} />

View file

@ -1,4 +1,5 @@
"use client"; "use client";
import { useFeatureFlagVariantKey } from "@posthog/react";
import { AnimatePresence, motion } from "motion/react"; import { AnimatePresence, motion } from "motion/react";
import Image from "next/image"; import Image from "next/image";
import Link from "next/link"; import Link from "next/link";
@ -33,6 +34,8 @@ const GoogleLogo = ({ className }: { className?: string }) => (
export function HeroSection() { export function HeroSection() {
const containerRef = useRef<HTMLDivElement>(null); const containerRef = useRef<HTMLDivElement>(null);
const parentRef = useRef<HTMLDivElement>(null); const parentRef = useRef<HTMLDivElement>(null);
const heroVariant = useFeatureFlagVariantKey("notebooklm_flag");
const isNotebookLMVariant = heroVariant === "notebooklm";
return ( return (
<div <div
@ -83,12 +86,22 @@ export function HeroSection() {
<h2 className="relative z-50 mx-auto mb-4 mt-4 max-w-4xl text-balance text-center text-3xl font-semibold tracking-tight text-gray-700 md:text-7xl dark:text-neutral-300"> <h2 className="relative z-50 mx-auto mb-4 mt-4 max-w-4xl text-balance text-center text-3xl font-semibold tracking-tight text-gray-700 md:text-7xl dark:text-neutral-300">
<Balancer> <Balancer>
The AI Workspace{" "} {isNotebookLMVariant ? (
<div className="relative mx-auto inline-block w-max filter-[drop-shadow(0px_1px_3px_rgba(27,37,80,0.14))]"> <div className="relative mx-auto inline-block w-max filter-[drop-shadow(0px_1px_3px_rgba(27,37,80,0.14))]">
<div className="text-black [text-shadow:0_0_rgba(0,0,0,0.1)] dark:text-white"> <div className="text-black [text-shadow:0_0_rgba(0,0,0,0.1)] dark:text-white">
<span className="">Built for Teams</span> <span className="">NotebookLM for Teams</span>
</div>
</div> </div>
</div> ) : (
<>
The AI Workspace{" "}
<div className="relative mx-auto inline-block w-max filter-[drop-shadow(0px_1px_3px_rgba(27,37,80,0.14))]">
<div className="text-black [text-shadow:0_0_rgba(0,0,0,0.1)] dark:text-white">
<span className="">Built for Teams</span>
</div>
</div>
</>
)}
</Balancer> </Balancer>
</h2> </h2>
{/* // TODO:aCTUAL DESCRITION */} {/* // TODO:aCTUAL DESCRITION */}
@ -98,12 +111,7 @@ export function HeroSection() {
</p> </p>
<div className="mb-10 mt-8 flex w-full flex-col items-center justify-center gap-4 px-8 sm:flex-row md:mb-20"> <div className="mb-10 mt-8 flex w-full flex-col items-center justify-center gap-4 px-8 sm:flex-row md:mb-20">
<GetStartedButton /> <GetStartedButton />
{/* <Link <ContactSalesButton />
href="/pricing"
className="shadow-input group relative z-20 flex h-10 w-full cursor-pointer items-center justify-center space-x-2 rounded-lg bg-white p-px px-4 py-2 text-sm font-semibold leading-6 text-black no-underline transition duration-200 hover:-translate-y-0.5 sm:w-52 dark:bg-neutral-800 dark:text-white"
>
Start Free Trial
</Link> */}
</div> </div>
<div <div
ref={containerRef} ref={containerRef}
@ -193,6 +201,21 @@ function GetStartedButton() {
); );
} }
function ContactSalesButton() {
return (
<motion.div whileHover={{ scale: 1.02, y: -2 }} whileTap={{ scale: 0.98 }}>
<Link
href="https://calendly.com/eric-surfsense/surfsense-meeting"
target="_blank"
rel="noopener noreferrer"
className="group relative z-20 flex h-11 w-full cursor-pointer items-center justify-center gap-2 rounded-xl bg-white px-6 py-2.5 text-sm font-semibold text-neutral-700 shadow-lg ring-1 ring-neutral-200/50 transition-shadow duration-300 hover:shadow-xl sm:w-56 dark:bg-neutral-900 dark:text-neutral-200 dark:ring-neutral-700/50"
>
Contact Sales
</Link>
</motion.div>
);
}
const BackgroundGrids = () => { const BackgroundGrids = () => {
return ( return (
<div className="pointer-events-none absolute inset-0 z-0 grid h-full w-full -rotate-45 transform select-none grid-cols-2 gap-10 md:grid-cols-4"> <div className="pointer-events-none absolute inset-0 z-0 grid h-full w-full -rotate-45 transform select-none grid-cols-2 gap-10 md:grid-cols-4">

View file

@ -9,78 +9,12 @@ import {
import { AnimatePresence, motion } from "motion/react"; import { AnimatePresence, motion } from "motion/react";
import Link from "next/link"; import Link from "next/link";
import { useEffect, useState } from "react"; import { useEffect, useState } from "react";
import { SignInButton } from "@/components/auth/sign-in-button";
import { Logo } from "@/components/Logo"; import { Logo } from "@/components/Logo";
import { ThemeTogglerComponent } from "@/components/theme/theme-toggle"; import { ThemeTogglerComponent } from "@/components/theme/theme-toggle";
import { useGithubStars } from "@/hooks/use-github-stars"; import { useGithubStars } from "@/hooks/use-github-stars";
import { AUTH_TYPE, BACKEND_URL } from "@/lib/env-config";
import { trackLoginAttempt } from "@/lib/posthog/events";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
// Official Google "G" logo with brand colors
const GoogleLogo = ({ className }: { className?: string }) => (
<svg className={className} viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg">
<path
d="M22.56 12.25c0-.78-.07-1.53-.2-2.25H12v4.26h5.92c-.26 1.37-1.04 2.53-2.21 3.31v2.77h3.57c2.08-1.92 3.28-4.74 3.28-8.09z"
fill="#4285F4"
/>
<path
d="M12 23c2.97 0 5.46-.98 7.28-2.66l-3.57-2.77c-.98.66-2.23 1.06-3.71 1.06-2.86 0-5.29-1.93-6.16-4.53H2.18v2.84C3.99 20.53 7.7 23 12 23z"
fill="#34A853"
/>
<path
d="M5.84 14.09c-.22-.66-.35-1.36-.35-2.09s.13-1.43.35-2.09V7.07H2.18C1.43 8.55 1 10.22 1 12s.43 3.45 1.18 4.93l2.85-2.22.81-.62z"
fill="#FBBC05"
/>
<path
d="M12 5.38c1.62 0 3.06.56 4.21 1.64l3.15-3.15C17.45 2.09 14.97 1 12 1 7.7 1 3.99 3.47 2.18 7.07l3.66 2.84c.87-2.6 3.3-4.53 6.16-4.53z"
fill="#EA4335"
/>
</svg>
);
// Sign in button component that handles both Google OAuth and local auth
const SignInButton = ({ variant = "desktop" }: { variant?: "desktop" | "mobile" }) => {
const isGoogleAuth = AUTH_TYPE === "GOOGLE";
const handleGoogleLogin = () => {
trackLoginAttempt("google");
window.location.href = `${BACKEND_URL}/auth/google/authorize-redirect`;
};
if (isGoogleAuth) {
return (
<motion.button
type="button"
onClick={handleGoogleLogin}
whileHover={{ scale: 1.02 }}
whileTap={{ scale: 0.98 }}
className={cn(
"flex items-center justify-center gap-2 font-semibold transition-all duration-200",
variant === "desktop"
? "hidden rounded-full bg-white px-5 py-2 text-sm text-neutral-700 shadow-md ring-1 ring-neutral-200/50 hover:shadow-lg md:flex dark:bg-neutral-900 dark:text-neutral-200 dark:ring-neutral-700/50"
: "w-full rounded-lg bg-white px-8 py-2.5 text-neutral-700 shadow-md ring-1 ring-neutral-200/50 dark:bg-neutral-900 dark:text-neutral-200 dark:ring-neutral-700/50 touch-manipulation"
)}
>
<GoogleLogo className="h-4 w-4" />
<span>Sign In</span>
</motion.button>
);
}
return (
<Link
href="/login"
className={cn(
variant === "desktop"
? "hidden rounded-full bg-black px-8 py-2 text-sm font-bold text-white shadow-[0px_-2px_0px_0px_rgba(255,255,255,0.4)_inset] md:block dark:bg-white dark:text-black"
: "w-full rounded-lg bg-black px-8 py-2 font-medium text-white shadow-[0px_-2px_0px_0px_rgba(255,255,255,0.4)_inset] dark:bg-white dark:text-black text-center touch-manipulation"
)}
>
Sign In
</Link>
);
};
export const Navbar = () => { export const Navbar = () => {
const [isScrolled, setIsScrolled] = useState(false); const [isScrolled, setIsScrolled] = useState(false);

View file

@ -0,0 +1,36 @@
"use client";
import { createContext, type ReactNode, useContext } from "react";
interface SidebarContextValue {
isCollapsed: boolean;
setIsCollapsed: (collapsed: boolean) => void;
toggleCollapsed: () => void;
}
const SidebarContext = createContext<SidebarContextValue | null>(null);
interface SidebarProviderProps {
children: ReactNode;
value: SidebarContextValue;
}
export function SidebarProvider({ children, value }: SidebarProviderProps) {
return <SidebarContext.Provider value={value}>{children}</SidebarContext.Provider>;
}
export function useSidebarContext(): SidebarContextValue {
const context = useContext(SidebarContext);
if (!context) {
throw new Error("useSidebarContext must be used within a SidebarProvider");
}
return context;
}
/**
* Safe version that returns null if not within provider
* Useful for components that may be rendered outside the sidebar context
*/
export function useSidebarContextSafe(): SidebarContextValue | null {
return useContext(SidebarContext);
}

View file

@ -1 +1,2 @@
export { SidebarProvider, useSidebarContext, useSidebarContextSafe } from "./SidebarContext";
export { useSidebarState } from "./useSidebarState"; export { useSidebarState } from "./useSidebarState";

View file

@ -32,7 +32,6 @@ import { CreateSearchSpaceDialog } from "../ui/dialogs";
import { LayoutShell } from "../ui/shell"; import { LayoutShell } from "../ui/shell";
import { AllPrivateChatsSidebar } from "../ui/sidebar/AllPrivateChatsSidebar"; import { AllPrivateChatsSidebar } from "../ui/sidebar/AllPrivateChatsSidebar";
import { AllSharedChatsSidebar } from "../ui/sidebar/AllSharedChatsSidebar"; import { AllSharedChatsSidebar } from "../ui/sidebar/AllSharedChatsSidebar";
import { InboxSidebar } from "../ui/sidebar/InboxSidebar";
interface LayoutDataProviderProps { interface LayoutDataProviderProps {
searchSpaceId: string; searchSpaceId: string;
@ -87,10 +86,10 @@ export function LayoutDataProvider({
enabled: !!searchSpaceId, enabled: !!searchSpaceId,
}); });
// Fetch threads // Fetch threads (40 total to allow up to 20 per section - shared/private)
const { data: threadsData } = useQuery({ const { data: threadsData } = useQuery({
queryKey: ["threads", searchSpaceId, { limit: 4 }], queryKey: ["threads", searchSpaceId, { limit: 40 }],
queryFn: () => fetchThreads(Number(searchSpaceId), 4), queryFn: () => fetchThreads(Number(searchSpaceId), 40),
enabled: !!searchSpaceId, enabled: !!searchSpaceId,
}); });
@ -100,23 +99,60 @@ export function LayoutDataProvider({
// Inbox sidebar state // Inbox sidebar state
const [isInboxSidebarOpen, setIsInboxSidebarOpen] = useState(false); const [isInboxSidebarOpen, setIsInboxSidebarOpen] = useState(false);
const [isInboxDocked, setIsInboxDocked] = useState(false);
// Search space dialog state // Search space dialog state
const [isCreateSearchSpaceDialogOpen, setIsCreateSearchSpaceDialogOpen] = useState(false); const [isCreateSearchSpaceDialogOpen, setIsCreateSearchSpaceDialogOpen] = useState(false);
// Inbox hook // Inbox hooks - separate data sources for mentions and status tabs
// This ensures each tab has independent pagination and data loading
const userId = user?.id ? String(user.id) : null; const userId = user?.id ? String(user.id) : null;
// Mentions: Only fetch "new_mention" type notifications
const { const {
inboxItems, inboxItems: mentionItems,
unreadCount, unreadCount: mentionUnreadCount,
loading: inboxLoading, loading: mentionLoading,
loadingMore: inboxLoadingMore, loadingMore: mentionLoadingMore,
hasMore: inboxHasMore, hasMore: mentionHasMore,
loadMore: inboxLoadMore, loadMore: mentionLoadMore,
markAsRead, markAsRead: markMentionAsRead,
markAllAsRead, markAllAsRead: markAllMentionsAsRead,
} = useInbox(userId, Number(searchSpaceId) || null, "new_mention");
// Status: Fetch all types (will be filtered client-side to status types)
// We pass null to get all, then InboxSidebar filters to status types
const {
inboxItems: statusItems,
unreadCount: statusUnreadCount,
loading: statusLoading,
loadingMore: statusLoadingMore,
hasMore: statusHasMore,
loadMore: statusLoadMore,
markAsRead: markStatusAsRead,
markAllAsRead: markAllStatusAsRead,
} = useInbox(userId, Number(searchSpaceId) || null, null); } = useInbox(userId, Number(searchSpaceId) || null, null);
// Combined unread count for nav badge (mentions take priority for visibility)
const totalUnreadCount = mentionUnreadCount + statusUnreadCount;
// Unified mark as read that delegates to the correct hook
const markAsRead = useCallback(
async (id: number) => {
// Try both - one will succeed based on which list has the item
const mentionResult = await markMentionAsRead(id);
if (mentionResult) return true;
return markStatusAsRead(id);
},
[markMentionAsRead, markStatusAsRead]
);
// Mark all as read for both types
const markAllAsRead = useCallback(async () => {
await Promise.all([markAllMentionsAsRead(), markAllStatusAsRead()]);
return true;
}, [markAllMentionsAsRead, markAllStatusAsRead]);
// Delete dialogs state // Delete dialogs state
const [showDeleteChatDialog, setShowDeleteChatDialog] = useState(false); const [showDeleteChatDialog, setShowDeleteChatDialog] = useState(false);
const [chatToDelete, setChatToDelete] = useState<{ id: number; name: string } | null>(null); const [chatToDelete, setChatToDelete] = useState<{ id: number; name: string } | null>(null);
@ -197,7 +233,7 @@ export function LayoutDataProvider({
url: "#inbox", // Special URL to indicate this is handled differently url: "#inbox", // Special URL to indicate this is handled differently
icon: Inbox, icon: Inbox,
isActive: isInboxSidebarOpen, isActive: isInboxSidebarOpen,
badge: unreadCount > 0 ? formatInboxCount(unreadCount) : undefined, badge: totalUnreadCount > 0 ? formatInboxCount(totalUnreadCount) : undefined,
}, },
{ {
title: "Documents", title: "Documents",
@ -206,7 +242,7 @@ export function LayoutDataProvider({
isActive: pathname?.includes("/documents"), isActive: pathname?.includes("/documents"),
}, },
], ],
[searchSpaceId, pathname, isInboxSidebarOpen, unreadCount] [searchSpaceId, pathname, isInboxSidebarOpen, totalUnreadCount]
); );
// Handlers // Handlers
@ -298,9 +334,9 @@ export function LayoutDataProvider({
const handleNavItemClick = useCallback( const handleNavItemClick = useCallback(
(item: NavItem) => { (item: NavItem) => {
// Handle inbox specially - open sidebar instead of navigating // Handle inbox specially - toggle sidebar instead of navigating
if (item.url === "#inbox") { if (item.url === "#inbox") {
setIsInboxSidebarOpen(true); setIsInboxSidebarOpen((prev) => !prev);
return; return;
} }
router.push(item.url); router.push(item.url);
@ -462,6 +498,32 @@ export function LayoutDataProvider({
theme={theme} theme={theme}
setTheme={setTheme} setTheme={setTheme}
isChatPage={isChatPage} isChatPage={isChatPage}
inbox={{
isOpen: isInboxSidebarOpen,
onOpenChange: setIsInboxSidebarOpen,
// Separate data sources for each tab
mentions: {
items: mentionItems,
unreadCount: mentionUnreadCount,
loading: mentionLoading,
loadingMore: mentionLoadingMore,
hasMore: mentionHasMore,
loadMore: mentionLoadMore,
},
status: {
items: statusItems,
unreadCount: statusUnreadCount,
loading: statusLoading,
loadingMore: statusLoadingMore,
hasMore: statusHasMore,
loadMore: statusLoadMore,
},
totalUnreadCount,
markAsRead,
markAllAsRead,
isDocked: isInboxDocked,
onDockedChange: setIsInboxDocked,
}}
> >
{children} {children}
</LayoutShell> </LayoutShell>
@ -607,20 +669,6 @@ export function LayoutDataProvider({
searchSpaceId={searchSpaceId} searchSpaceId={searchSpaceId}
/> />
{/* Inbox Sidebar */}
<InboxSidebar
open={isInboxSidebarOpen}
onOpenChange={setIsInboxSidebarOpen}
inboxItems={inboxItems}
unreadCount={unreadCount}
loading={inboxLoading}
loadingMore={inboxLoadingMore}
hasMore={inboxHasMore}
loadMore={inboxLoadMore}
markAsRead={markAsRead}
markAllAsRead={markAllAsRead}
/>
{/* Create Search Space Dialog */} {/* Create Search Space Dialog */}
<CreateSearchSpaceDialog <CreateSearchSpaceDialog
open={isCreateSearchSpaceDialogOpen} open={isCreateSearchSpaceDialogOpen}

View file

@ -1,14 +1,43 @@
"use client"; "use client";
import { useState } from "react"; import { useMemo, useState } from "react";
import { TooltipProvider } from "@/components/ui/tooltip"; import { TooltipProvider } from "@/components/ui/tooltip";
import type { InboxItem } from "@/hooks/use-inbox";
import { useIsMobile } from "@/hooks/use-mobile"; import { useIsMobile } from "@/hooks/use-mobile";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import { useSidebarState } from "../../hooks"; import { SidebarProvider, useSidebarState } from "../../hooks";
import type { ChatItem, NavItem, PageUsage, SearchSpace, User } from "../../types/layout.types"; import type { ChatItem, NavItem, PageUsage, SearchSpace, User } from "../../types/layout.types";
import { Header } from "../header"; import { Header } from "../header";
import { IconRail } from "../icon-rail"; import { IconRail } from "../icon-rail";
import { MobileSidebar, MobileSidebarTrigger, Sidebar } from "../sidebar"; import { InboxSidebar, MobileSidebar, MobileSidebarTrigger, Sidebar } from "../sidebar";
// Tab-specific data source props
interface TabDataSource {
items: InboxItem[];
unreadCount: number;
loading: boolean;
loadingMore?: boolean;
hasMore?: boolean;
loadMore?: () => void;
}
// Inbox-related props with separate data sources per tab
interface InboxProps {
isOpen: boolean;
onOpenChange: (open: boolean) => void;
/** Mentions tab data source with independent pagination */
mentions: TabDataSource;
/** Status tab data source with independent pagination */
status: TabDataSource;
/** Combined unread count for nav badge */
totalUnreadCount: number;
markAsRead: (id: number) => Promise<boolean>;
markAllAsRead: () => Promise<boolean>;
/** Whether the inbox is docked (permanent) */
isDocked?: boolean;
/** Callback to change docked state */
onDockedChange?: (docked: boolean) => void;
}
interface LayoutShellProps { interface LayoutShellProps {
searchSpaces: SearchSpace[]; searchSpaces: SearchSpace[];
@ -42,6 +71,8 @@ interface LayoutShellProps {
isChatPage?: boolean; isChatPage?: boolean;
children: React.ReactNode; children: React.ReactNode;
className?: string; className?: string;
// Inbox props
inbox?: InboxProps;
} }
export function LayoutShell({ export function LayoutShell({
@ -76,111 +107,169 @@ export function LayoutShell({
isChatPage = false, isChatPage = false,
children, children,
className, className,
inbox,
}: LayoutShellProps) { }: LayoutShellProps) {
const isMobile = useIsMobile(); const isMobile = useIsMobile();
const [mobileMenuOpen, setMobileMenuOpen] = useState(false); const [mobileMenuOpen, setMobileMenuOpen] = useState(false);
const { isCollapsed, toggleCollapsed } = useSidebarState(defaultCollapsed); const { isCollapsed, setIsCollapsed, toggleCollapsed } = useSidebarState(defaultCollapsed);
// Memoize context value to prevent unnecessary re-renders
const sidebarContextValue = useMemo(
() => ({ isCollapsed, setIsCollapsed, toggleCollapsed }),
[isCollapsed, setIsCollapsed, toggleCollapsed]
);
// Mobile layout // Mobile layout
if (isMobile) { if (isMobile) {
return ( return (
<TooltipProvider delayDuration={0}> <SidebarProvider value={sidebarContextValue}>
<div className={cn("flex h-screen w-full flex-col bg-background", className)}> <TooltipProvider delayDuration={0}>
<Header <div className={cn("flex h-screen w-full flex-col bg-background", className)}>
breadcrumb={breadcrumb} <Header
mobileMenuTrigger={<MobileSidebarTrigger onClick={() => setMobileMenuOpen(true)} />} breadcrumb={breadcrumb}
/> mobileMenuTrigger={<MobileSidebarTrigger onClick={() => setMobileMenuOpen(true)} />}
/>
<MobileSidebar <MobileSidebar
isOpen={mobileMenuOpen} isOpen={mobileMenuOpen}
onOpenChange={setMobileMenuOpen} onOpenChange={setMobileMenuOpen}
searchSpaces={searchSpaces} searchSpaces={searchSpaces}
activeSearchSpaceId={activeSearchSpaceId} activeSearchSpaceId={activeSearchSpaceId}
onSearchSpaceSelect={onSearchSpaceSelect} onSearchSpaceSelect={onSearchSpaceSelect}
onSearchSpaceDelete={onSearchSpaceDelete} onSearchSpaceDelete={onSearchSpaceDelete}
onSearchSpaceSettings={onSearchSpaceSettings} onSearchSpaceSettings={onSearchSpaceSettings}
onAddSearchSpace={onAddSearchSpace} onAddSearchSpace={onAddSearchSpace}
searchSpace={searchSpace} searchSpace={searchSpace}
navItems={navItems} navItems={navItems}
onNavItemClick={onNavItemClick} onNavItemClick={onNavItemClick}
chats={chats} chats={chats}
sharedChats={sharedChats} sharedChats={sharedChats}
activeChatId={activeChatId} activeChatId={activeChatId}
onNewChat={onNewChat} onNewChat={onNewChat}
onChatSelect={onChatSelect} onChatSelect={onChatSelect}
onChatDelete={onChatDelete} onChatDelete={onChatDelete}
onChatArchive={onChatArchive} onChatArchive={onChatArchive}
onViewAllSharedChats={onViewAllSharedChats} onViewAllSharedChats={onViewAllSharedChats}
onViewAllPrivateChats={onViewAllPrivateChats} onViewAllPrivateChats={onViewAllPrivateChats}
user={user} user={user}
onSettings={onSettings} onSettings={onSettings}
onManageMembers={onManageMembers} onManageMembers={onManageMembers}
onUserSettings={onUserSettings} onUserSettings={onUserSettings}
onLogout={onLogout} onLogout={onLogout}
pageUsage={pageUsage} pageUsage={pageUsage}
theme={theme} theme={theme}
setTheme={setTheme} setTheme={setTheme}
/> />
<main className={cn("flex-1", isChatPage ? "overflow-hidden" : "overflow-auto")}> <main className={cn("flex-1", isChatPage ? "overflow-hidden" : "overflow-auto")}>
{children} {children}
</main> </main>
</div>
</TooltipProvider> {/* Mobile Inbox Sidebar - only render when open to avoid scroll blocking */}
{inbox?.isOpen && (
<InboxSidebar
open={inbox.isOpen}
onOpenChange={inbox.onOpenChange}
mentions={inbox.mentions}
status={inbox.status}
totalUnreadCount={inbox.totalUnreadCount}
markAsRead={inbox.markAsRead}
markAllAsRead={inbox.markAllAsRead}
onCloseMobileSidebar={() => setMobileMenuOpen(false)}
/>
)}
</div>
</TooltipProvider>
</SidebarProvider>
); );
} }
// Desktop layout // Desktop layout
return ( return (
<TooltipProvider delayDuration={0}> <SidebarProvider value={sidebarContextValue}>
<div className={cn("flex h-screen w-full gap-2 p-2 overflow-hidden bg-muted/40", className)}> <TooltipProvider delayDuration={0}>
<div className="hidden md:flex overflow-hidden"> <div
<IconRail className={cn("flex h-screen w-full gap-2 p-2 overflow-hidden bg-muted/40", className)}
searchSpaces={searchSpaces} >
activeSearchSpaceId={activeSearchSpaceId} <div className="hidden md:flex overflow-hidden">
onSearchSpaceSelect={onSearchSpaceSelect} <IconRail
onSearchSpaceDelete={onSearchSpaceDelete} searchSpaces={searchSpaces}
onSearchSpaceSettings={onSearchSpaceSettings} activeSearchSpaceId={activeSearchSpaceId}
onAddSearchSpace={onAddSearchSpace} onSearchSpaceSelect={onSearchSpaceSelect}
/> onSearchSpaceDelete={onSearchSpaceDelete}
onSearchSpaceSettings={onSearchSpaceSettings}
onAddSearchSpace={onAddSearchSpace}
/>
</div>
{/* Main container with sidebar and content - relative for inbox positioning */}
<div className="relative flex flex-1 rounded-xl border bg-background overflow-hidden">
<Sidebar
searchSpace={searchSpace}
isCollapsed={isCollapsed}
onToggleCollapse={toggleCollapsed}
navItems={navItems}
onNavItemClick={onNavItemClick}
chats={chats}
sharedChats={sharedChats}
activeChatId={activeChatId}
onNewChat={onNewChat}
onChatSelect={onChatSelect}
onChatDelete={onChatDelete}
onChatArchive={onChatArchive}
onViewAllSharedChats={onViewAllSharedChats}
onViewAllPrivateChats={onViewAllPrivateChats}
user={user}
onSettings={onSettings}
onManageMembers={onManageMembers}
onUserSettings={onUserSettings}
onLogout={onLogout}
pageUsage={pageUsage}
theme={theme}
setTheme={setTheme}
className="hidden md:flex border-r shrink-0"
/>
{/* Docked Inbox Sidebar - renders as flex sibling between sidebar and content */}
{inbox?.isDocked && (
<InboxSidebar
open={inbox.isOpen}
onOpenChange={inbox.onOpenChange}
mentions={inbox.mentions}
status={inbox.status}
totalUnreadCount={inbox.totalUnreadCount}
markAsRead={inbox.markAsRead}
markAllAsRead={inbox.markAllAsRead}
isDocked={inbox.isDocked}
onDockedChange={inbox.onDockedChange}
/>
)}
<main className="flex-1 flex flex-col min-w-0">
<Header breadcrumb={breadcrumb} />
<div className={cn("flex-1", isChatPage ? "overflow-hidden" : "overflow-auto")}>
{children}
</div>
</main>
{/* Floating Inbox Sidebar - positioned absolutely on top of content */}
{inbox && !inbox.isDocked && (
<InboxSidebar
open={inbox.isOpen}
onOpenChange={inbox.onOpenChange}
mentions={inbox.mentions}
status={inbox.status}
totalUnreadCount={inbox.totalUnreadCount}
markAsRead={inbox.markAsRead}
markAllAsRead={inbox.markAllAsRead}
isDocked={false}
onDockedChange={inbox.onDockedChange}
/>
)}
</div>
</div> </div>
</TooltipProvider>
<div className="flex flex-1 rounded-xl border bg-background overflow-hidden"> </SidebarProvider>
<Sidebar
searchSpace={searchSpace}
isCollapsed={isCollapsed}
onToggleCollapse={toggleCollapsed}
navItems={navItems}
onNavItemClick={onNavItemClick}
chats={chats}
sharedChats={sharedChats}
activeChatId={activeChatId}
onNewChat={onNewChat}
onChatSelect={onChatSelect}
onChatDelete={onChatDelete}
onChatArchive={onChatArchive}
onViewAllSharedChats={onViewAllSharedChats}
onViewAllPrivateChats={onViewAllPrivateChats}
user={user}
onSettings={onSettings}
onManageMembers={onManageMembers}
onUserSettings={onUserSettings}
onLogout={onLogout}
pageUsage={pageUsage}
theme={theme}
setTheme={setTheme}
className="hidden md:flex border-r shrink-0"
/>
<main className="flex-1 flex flex-col min-w-0">
<Header breadcrumb={breadcrumb} />
<div className={cn("flex-1", isChatPage ? "overflow-hidden" : "overflow-auto")}>
{children}
</div>
</main>
</div>
</div>
</TooltipProvider>
); );
} }

File diff suppressed because it is too large Load diff

View file

@ -3,7 +3,6 @@
import { FolderOpen, MessageSquare, PenSquare } from "lucide-react"; import { FolderOpen, MessageSquare, PenSquare } from "lucide-react";
import { useTranslations } from "next-intl"; import { useTranslations } from "next-intl";
import { Button } from "@/components/ui/button"; import { Button } from "@/components/ui/button";
import { ScrollArea } from "@/components/ui/scroll-area";
import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip"; import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
import type { ChatItem, NavItem, PageUsage, SearchSpace, User } from "../../types/layout.types"; import type { ChatItem, NavItem, PageUsage, SearchSpace, User } from "../../types/layout.types";
@ -121,101 +120,113 @@ export function Sidebar({
)} )}
</div> </div>
{/* Scrollable content */} {/* Chat sections - fills available space */}
<ScrollArea className="flex-1"> {isCollapsed ? (
{isCollapsed ? ( <div className="flex-1 flex flex-col items-center gap-2 py-2 w-[60px]">
<div className="flex flex-col items-center gap-2 py-2 w-[60px]"> {(chats.length > 0 || sharedChats.length > 0) && (
{(chats.length > 0 || sharedChats.length > 0) && ( <Tooltip>
<Tooltip> <TooltipTrigger asChild>
<TooltipTrigger asChild> <Button
<Button variant="ghost"
variant="ghost" size="icon"
size="icon" className="h-10 w-10"
className="h-10 w-10" onClick={() => onToggleCollapse?.()}
onClick={() => onToggleCollapse?.()} >
> <MessageSquare className="h-4 w-4" />
<MessageSquare className="h-4 w-4" /> <span className="sr-only">{t("chats")}</span>
<span className="sr-only">{t("chats")}</span> </Button>
</Button> </TooltipTrigger>
</TooltipTrigger> <TooltipContent side="right">
<TooltipContent side="right"> {t("chats")} ({chats.length + sharedChats.length})
{t("chats")} ({chats.length + sharedChats.length}) </TooltipContent>
</TooltipContent> </Tooltip>
</Tooltip> )}
</div>
) : (
<div className="flex-1 flex flex-col gap-1 py-2 w-[240px] min-h-0 overflow-hidden">
{/* Shared Chats Section - takes half the space */}
<SidebarSection
title={t("shared_chats")}
defaultOpen={true}
fillHeight={true}
action={
onViewAllSharedChats ? (
<Tooltip>
<TooltipTrigger asChild>
<Button
variant="ghost"
size="icon"
className="h-8 w-8 shrink-0 hover:bg-transparent hover:text-current focus-visible:ring-0"
onClick={onViewAllSharedChats}
>
<FolderOpen className="h-4 w-4" />
</Button>
</TooltipTrigger>
<TooltipContent side="top">
{t("view_all_shared_chats") || "View all shared chats"}
</TooltipContent>
</Tooltip>
) : undefined
}
>
{sharedChats.length > 0 ? (
<div className="relative flex-1 min-h-0">
<div
className={`flex flex-col gap-0.5 h-full overflow-y-auto scrollbar-thin scrollbar-thumb-muted-foreground/20 scrollbar-track-transparent ${sharedChats.length > 4 ? "pb-8" : ""}`}
>
{sharedChats.slice(0, 20).map((chat) => (
<ChatListItem
key={chat.id}
name={chat.name}
isActive={chat.id === activeChatId}
archived={chat.archived}
onClick={() => onChatSelect(chat)}
onArchive={() => onChatArchive?.(chat)}
onDelete={() => onChatDelete?.(chat)}
/>
))}
</div>
{/* Gradient fade indicator when more than 4 items */}
{sharedChats.length > 4 && (
<div className="pointer-events-none absolute bottom-0 left-0 right-0 h-8 bg-gradient-to-t from-sidebar via-sidebar/90 to-transparent" />
)}
</div>
) : (
<p className="px-2 py-1 text-xs text-muted-foreground">{t("no_shared_chats")}</p>
)} )}
</div> </SidebarSection>
) : (
<div className="flex flex-col gap-1 py-2 w-[240px]">
{/* Shared Chats Section */}
<SidebarSection
title={t("shared_chats")}
defaultOpen={true}
action={
onViewAllSharedChats ? (
<Tooltip>
<TooltipTrigger asChild>
<Button
variant="ghost"
size="icon"
className="h-8 w-8 shrink-0 hover:bg-transparent hover:text-current focus-visible:ring-0"
onClick={onViewAllSharedChats}
>
<FolderOpen className="h-4 w-4" />
</Button>
</TooltipTrigger>
<TooltipContent side="top">
{t("view_all_shared_chats") || "View all shared chats"}
</TooltipContent>
</Tooltip>
) : undefined
}
>
{sharedChats.length > 0 ? (
<div className="flex flex-col gap-0.5">
{sharedChats.map((chat) => (
<ChatListItem
key={chat.id}
name={chat.name}
isActive={chat.id === activeChatId}
archived={chat.archived}
onClick={() => onChatSelect(chat)}
onArchive={() => onChatArchive?.(chat)}
onDelete={() => onChatDelete?.(chat)}
/>
))}
</div>
) : (
<p className="px-2 py-1 text-xs text-muted-foreground">{t("no_shared_chats")}</p>
)}
</SidebarSection>
{/* Private Chats Section */} {/* Private Chats Section - takes half the space */}
<SidebarSection <SidebarSection
title={t("chats")} title={t("chats")}
defaultOpen={true} defaultOpen={true}
action={ fillHeight={true}
onViewAllPrivateChats ? ( action={
<Tooltip> onViewAllPrivateChats ? (
<TooltipTrigger asChild> <Tooltip>
<Button <TooltipTrigger asChild>
variant="ghost" <Button
size="icon" variant="ghost"
className="h-8 w-8 shrink-0 hover:bg-transparent hover:text-current focus-visible:ring-0" size="icon"
onClick={onViewAllPrivateChats} className="h-8 w-8 shrink-0 hover:bg-transparent hover:text-current focus-visible:ring-0"
> onClick={onViewAllPrivateChats}
<FolderOpen className="h-4 w-4" /> >
</Button> <FolderOpen className="h-4 w-4" />
</TooltipTrigger> </Button>
<TooltipContent side="top"> </TooltipTrigger>
{t("view_all_private_chats") || "View all private chats"} <TooltipContent side="top">
</TooltipContent> {t("view_all_private_chats") || "View all private chats"}
</Tooltip> </TooltipContent>
) : undefined </Tooltip>
} ) : undefined
> }
{chats.length > 0 ? ( >
<div className="flex flex-col gap-0.5"> {chats.length > 0 ? (
{chats.map((chat) => ( <div className="relative flex-1 min-h-0">
<div
className={`flex flex-col gap-0.5 h-full overflow-y-auto scrollbar-thin scrollbar-thumb-muted-foreground/20 scrollbar-track-transparent ${chats.length > 4 ? "pb-8" : ""}`}
>
{chats.slice(0, 20).map((chat) => (
<ChatListItem <ChatListItem
key={chat.id} key={chat.id}
name={chat.name} name={chat.name}
@ -227,13 +238,17 @@ export function Sidebar({
/> />
))} ))}
</div> </div>
) : ( {/* Gradient fade indicator when more than 4 items */}
<p className="px-2 py-1 text-xs text-muted-foreground">{t("no_chats")}</p> {chats.length > 4 && (
)} <div className="pointer-events-none absolute bottom-0 left-0 right-0 h-8 bg-gradient-to-t from-sidebar via-sidebar/90 to-transparent" />
</SidebarSection> )}
</div> </div>
)} ) : (
</ScrollArea> <p className="px-2 py-1 text-xs text-muted-foreground">{t("no_chats")}</p>
)}
</SidebarSection>
</div>
)}
{/* Footer */} {/* Footer */}
<div className="mt-auto border-t"> <div className="mt-auto border-t">

View file

@ -11,6 +11,8 @@ interface SidebarSectionProps {
children: React.ReactNode; children: React.ReactNode;
action?: React.ReactNode; action?: React.ReactNode;
persistentAction?: React.ReactNode; persistentAction?: React.ReactNode;
className?: string;
fillHeight?: boolean;
} }
export function SidebarSection({ export function SidebarSection({
@ -19,12 +21,18 @@ export function SidebarSection({
children, children,
action, action,
persistentAction, persistentAction,
className,
fillHeight = false,
}: SidebarSectionProps) { }: SidebarSectionProps) {
const [isOpen, setIsOpen] = useState(defaultOpen); const [isOpen, setIsOpen] = useState(defaultOpen);
return ( return (
<Collapsible open={isOpen} onOpenChange={setIsOpen} className="overflow-hidden"> <Collapsible
<div className="flex items-center group/section"> open={isOpen}
onOpenChange={setIsOpen}
className={cn("overflow-hidden", fillHeight && "flex flex-col flex-1 min-h-0", className)}
>
<div className="flex items-center group/section shrink-0">
<CollapsibleTrigger className="flex flex-1 items-center gap-1.5 px-2 py-1.5 text-xs font-medium text-muted-foreground hover:text-foreground transition-colors min-w-0"> <CollapsibleTrigger className="flex flex-1 items-center gap-1.5 px-2 py-1.5 text-xs font-medium text-muted-foreground hover:text-foreground transition-colors min-w-0">
<ChevronRight <ChevronRight
className={cn( className={cn(
@ -48,8 +56,14 @@ export function SidebarSection({
)} )}
</div> </div>
<CollapsibleContent className="overflow-hidden"> <CollapsibleContent
<div className="px-2 pb-2">{children}</div> className={cn("overflow-hidden", fillHeight && "flex-1 flex flex-col min-h-0")}
>
<div
className={cn("px-2 pb-2", fillHeight && "flex-1 flex flex-col min-h-0 overflow-hidden")}
>
{children}
</div>
</CollapsibleContent> </CollapsibleContent>
</Collapsible> </Collapsible>
); );

View file

@ -2,9 +2,10 @@
import { useQueryClient } from "@tanstack/react-query"; import { useQueryClient } from "@tanstack/react-query";
import { useAtomValue, useSetAtom } from "jotai"; import { useAtomValue, useSetAtom } from "jotai";
import { User, Users } from "lucide-react"; import { Globe, Link2, User, Users } from "lucide-react";
import { useCallback, useState } from "react"; import { useCallback, useState } from "react";
import { toast } from "sonner"; import { toast } from "sonner";
import { togglePublicShareMutationAtom } from "@/atoms/chat/chat-thread-mutation.atoms";
import { currentThreadAtom, setThreadVisibilityAtom } from "@/atoms/chat/current-thread.atom"; import { currentThreadAtom, setThreadVisibilityAtom } from "@/atoms/chat/current-thread.atom";
import { Button } from "@/components/ui/button"; import { Button } from "@/components/ui/button";
import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover"; import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover";
@ -48,11 +49,19 @@ export function ChatShareButton({ thread, onVisibilityChange, className }: ChatS
// Use Jotai atom for visibility (single source of truth) // Use Jotai atom for visibility (single source of truth)
const currentThreadState = useAtomValue(currentThreadAtom); const currentThreadState = useAtomValue(currentThreadAtom);
const setCurrentThreadState = useSetAtom(currentThreadAtom);
const setThreadVisibility = useSetAtom(setThreadVisibilityAtom); const setThreadVisibility = useSetAtom(setThreadVisibilityAtom);
// Public share mutation
const { mutateAsync: togglePublicShare, isPending: isTogglingPublic } = useAtomValue(
togglePublicShareMutationAtom
);
// Use Jotai visibility if available (synced from chat page), otherwise fall back to thread prop // Use Jotai visibility if available (synced from chat page), otherwise fall back to thread prop
const currentVisibility = currentThreadState.visibility ?? thread?.visibility ?? "PRIVATE"; const currentVisibility = currentThreadState.visibility ?? thread?.visibility ?? "PRIVATE";
const isOwnThread = thread?.created_by_id !== null; // If we have the thread, we can modify it const isPublicEnabled =
currentThreadState.publicShareEnabled ?? thread?.public_share_enabled ?? false;
const publicShareToken = currentThreadState.publicShareToken ?? null;
const handleVisibilityChange = useCallback( const handleVisibilityChange = useCallback(
async (newVisibility: ChatVisibility) => { async (newVisibility: ChatVisibility) => {
@ -87,12 +96,45 @@ export function ChatShareButton({ thread, onVisibilityChange, className }: ChatS
[thread, currentVisibility, onVisibilityChange, queryClient, setThreadVisibility] [thread, currentVisibility, onVisibilityChange, queryClient, setThreadVisibility]
); );
const handlePublicShareToggle = useCallback(async () => {
if (!thread) return;
try {
const response = await togglePublicShare({
thread_id: thread.id,
enabled: !isPublicEnabled,
});
// Update atom state with response
setCurrentThreadState((prev) => ({
...prev,
publicShareEnabled: response.enabled,
publicShareToken: response.share_token,
}));
} catch (error) {
console.error("Failed to toggle public share:", error);
}
}, [thread, isPublicEnabled, togglePublicShare, setCurrentThreadState]);
const handleCopyPublicLink = useCallback(async () => {
if (!publicShareToken) return;
const publicUrl = `${window.location.origin}/public/${publicShareToken}`;
await navigator.clipboard.writeText(publicUrl);
toast.success("Public link copied to clipboard");
}, [publicShareToken]);
// Don't show if no thread (new chat that hasn't been created yet) // Don't show if no thread (new chat that hasn't been created yet)
if (!thread) { if (!thread) {
return null; return null;
} }
const CurrentIcon = currentVisibility === "PRIVATE" ? User : Users; const CurrentIcon = isPublicEnabled ? Globe : currentVisibility === "PRIVATE" ? User : Users;
const buttonLabel = isPublicEnabled
? "Public"
: currentVisibility === "PRIVATE"
? "Private"
: "Shared";
return ( return (
<Popover open={open} onOpenChange={setOpen}> <Popover open={open} onOpenChange={setOpen}>
@ -108,9 +150,7 @@ export function ChatShareButton({ thread, onVisibilityChange, className }: ChatS
)} )}
> >
<CurrentIcon className="h-4 w-4" /> <CurrentIcon className="h-4 w-4" />
<span className="hidden md:inline text-sm"> <span className="hidden md:inline text-sm">{buttonLabel}</span>
{currentVisibility === "PRIVATE" ? "Private" : "Shared"}
</span>
</Button> </Button>
</PopoverTrigger> </PopoverTrigger>
</TooltipTrigger> </TooltipTrigger>
@ -124,6 +164,7 @@ export function ChatShareButton({ thread, onVisibilityChange, className }: ChatS
onCloseAutoFocus={(e) => e.preventDefault()} onCloseAutoFocus={(e) => e.preventDefault()}
> >
<div className="p-1.5 space-y-1"> <div className="p-1.5 space-y-1">
{/* Visibility Options */}
{visibilityOptions.map((option) => { {visibilityOptions.map((option) => {
const isSelected = currentVisibility === option.value; const isSelected = currentVisibility === option.value;
const Icon = option.icon; const Icon = option.icon;
@ -166,6 +207,72 @@ export function ChatShareButton({ thread, onVisibilityChange, className }: ChatS
</button> </button>
); );
})} })}
{/* Divider */}
<div className="border-t border-border my-1" />
{/* Public Share Option */}
<button
type="button"
onClick={handlePublicShareToggle}
disabled={isTogglingPublic}
className={cn(
"w-full flex items-center gap-2.5 px-2.5 py-2 rounded-md transition-all",
"hover:bg-accent/50 cursor-pointer",
"focus:outline-none",
"disabled:opacity-50 disabled:cursor-not-allowed",
isPublicEnabled && "bg-accent/80"
)}
>
<div
className={cn(
"size-7 rounded-md shrink-0 grid place-items-center",
isPublicEnabled ? "bg-primary/10" : "bg-muted"
)}
>
<Globe
className={cn(
"size-4 block",
isPublicEnabled ? "text-primary" : "text-muted-foreground"
)}
/>
</div>
<div className="flex-1 text-left min-w-0">
<div className="flex items-center gap-1.5">
<span className={cn("text-sm font-medium", isPublicEnabled && "text-primary")}>
Public
</span>
{isPublicEnabled && (
<span className="text-xs bg-primary/10 text-primary px-1.5 py-0.5 rounded">
ON
</span>
)}
</div>
<p className="text-xs text-muted-foreground mt-0.5 leading-snug">
Anyone with the link can read
</p>
</div>
{isPublicEnabled && publicShareToken && (
<div
role="button"
tabIndex={0}
onClick={(e) => {
e.stopPropagation();
handleCopyPublicLink();
}}
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
e.stopPropagation();
handleCopyPublicLink();
}
}}
className="shrink-0 p-1.5 rounded-md hover:bg-muted transition-colors cursor-pointer"
title="Copy public link"
>
<Link2 className="size-4 text-muted-foreground" />
</div>
)}
</button>
</div> </div>
</PopoverContent> </PopoverContent>
</Popover> </Popover>

View file

@ -1,7 +1,6 @@
"use client"; "use client";
import { useAtomValue } from "jotai"; import { useAtomValue } from "jotai";
import { useTranslations } from "next-intl";
import { useEffect, useRef, useState } from "react"; import { useEffect, useRef, useState } from "react";
import { currentUserAtom } from "@/atoms/user/user-query.atoms"; import { currentUserAtom } from "@/atoms/user/user-query.atoms";
import { useGlobalLoadingEffect } from "@/hooks/use-global-loading"; import { useGlobalLoadingEffect } from "@/hooks/use-global-loading";
@ -30,7 +29,6 @@ interface ElectricProviderProps {
* 5. Provides client via context - hooks should use useElectricClient() * 5. Provides client via context - hooks should use useElectricClient()
*/ */
export function ElectricProvider({ children }: ElectricProviderProps) { export function ElectricProvider({ children }: ElectricProviderProps) {
const t = useTranslations("common");
const [electricClient, setElectricClient] = useState<ElectricClient | null>(null); const [electricClient, setElectricClient] = useState<ElectricClient | null>(null);
const [error, setError] = useState<Error | null>(null); const [error, setError] = useState<Error | null>(null);
const { const {
@ -117,7 +115,7 @@ export function ElectricProvider({ children }: ElectricProviderProps) {
const shouldShowLoading = hasToken && isUserLoaded && !!user?.id && !electricClient && !error; const shouldShowLoading = hasToken && isUserLoaded && !!user?.id && !electricClient && !error;
// Use global loading hook with ownership tracking - prevents flash during transitions // Use global loading hook with ownership tracking - prevents flash during transitions
useGlobalLoadingEffect(shouldShowLoading, t("initializing"), "default"); useGlobalLoadingEffect(shouldShowLoading);
// For non-authenticated pages (like landing page), render immediately with null context // For non-authenticated pages (like landing page), render immediately with null context
// Also render immediately if user query failed (e.g., token expired) // Also render immediately if user query failed (e.g., token expired)

View file

@ -3,9 +3,7 @@
import { useAtomValue } from "jotai"; import { useAtomValue } from "jotai";
import { useEffect, useState } from "react"; import { useEffect, useState } from "react";
import { createPortal } from "react-dom"; import { createPortal } from "react-dom";
import { AmbientBackground } from "@/app/(home)/login/AmbientBackground";
import { globalLoadingAtom } from "@/atoms/ui/loading.atoms"; import { globalLoadingAtom } from "@/atoms/ui/loading.atoms";
import { Logo } from "@/components/Logo";
import { Spinner } from "@/components/ui/spinner"; import { Spinner } from "@/components/ui/spinner";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
@ -18,7 +16,7 @@ import { cn } from "@/lib/utils";
*/ */
export function GlobalLoadingProvider({ children }: { children: React.ReactNode }) { export function GlobalLoadingProvider({ children }: { children: React.ReactNode }) {
const [mounted, setMounted] = useState(false); const [mounted, setMounted] = useState(false);
const { isLoading, message, variant } = useAtomValue(globalLoadingAtom); const { isLoading } = useAtomValue(globalLoadingAtom);
useEffect(() => { useEffect(() => {
setMounted(true); setMounted(true);
@ -36,35 +34,11 @@ export function GlobalLoadingProvider({ children }: { children: React.ReactNode
)} )}
aria-hidden={!isLoading} aria-hidden={!isLoading}
> >
{variant === "login" ? ( <div className="flex min-h-screen flex-col items-center justify-center bg-background">
<div className="relative w-full h-full overflow-hidden bg-background"> <div className="h-12 w-12 flex items-center justify-center">
<AmbientBackground /> <Spinner size="lg" className="text-muted-foreground" />
<div className="mx-auto flex h-screen max-w-lg flex-col items-center justify-center">
<Logo className="rounded-md" />
<div className="mt-8 flex flex-col items-center space-y-4">
<div className="h-12 w-12 flex items-center justify-center">
{/* Spinner is always mounted, animation never resets */}
<Spinner size="lg" className="text-muted-foreground" />
</div>
<span className="text-muted-foreground text-sm min-h-[1.25rem] text-center max-w-xs">
{message}
</span>
</div>
</div>
</div> </div>
) : ( </div>
<div className="flex min-h-screen flex-col items-center justify-center bg-background">
<div className="flex flex-col items-center space-y-4">
<div className="h-12 w-12 flex items-center justify-center">
{/* Spinner is always mounted, animation never resets */}
<Spinner size="xl" className="text-primary" />
</div>
<span className="text-muted-foreground text-sm min-h-[1.25rem] text-center max-w-md px-4">
{message}
</span>
</div>
</div>
)}
</div> </div>
); );

View file

@ -3,6 +3,7 @@
import { PostHogProvider as PHProvider } from "@posthog/react"; import { PostHogProvider as PHProvider } from "@posthog/react";
import posthog from "posthog-js"; import posthog from "posthog-js";
import type { ReactNode } from "react"; import type { ReactNode } from "react";
import "../../instrumentation-client";
import { PostHogIdentify } from "./PostHogIdentify"; import { PostHogIdentify } from "./PostHogIdentify";
interface PostHogProviderProps { interface PostHogProviderProps {
@ -10,8 +11,8 @@ interface PostHogProviderProps {
} }
export function PostHogProvider({ children }: PostHogProviderProps) { export function PostHogProvider({ children }: PostHogProviderProps) {
// posthog-js is already initialized in instrumentation-client.ts // posthog-js is initialized by importing instrumentation-client.ts above
// We just need to wrap the app with the PostHogProvider for hook access // We wrap the app with the PostHogProvider for hook access
return ( return (
<PHProvider client={posthog}> <PHProvider client={posthog}>
<PostHogIdentify /> <PostHogIdentify />

View file

@ -0,0 +1,71 @@
"use client";
import { Copy, Loader2 } from "lucide-react";
import { useRouter, useSearchParams } from "next/navigation";
import { useCallback, useEffect, useRef, useState } from "react";
import { toast } from "sonner";
import { Button } from "@/components/ui/button";
import { publicChatApiService } from "@/lib/apis/public-chat-api.service";
import { getBearerToken } from "@/lib/auth-utils";
interface PublicChatFooterProps {
shareToken: string;
}
export function PublicChatFooter({ shareToken }: PublicChatFooterProps) {
const router = useRouter();
const searchParams = useSearchParams();
const [isCloning, setIsCloning] = useState(false);
const hasAutoCloned = useRef(false);
const triggerClone = useCallback(async () => {
setIsCloning(true);
try {
const response = await publicChatApiService.clonePublicChat({
share_token: shareToken,
});
// Redirect to the new chat page (content will be loaded there)
router.push(`/dashboard/${response.search_space_id}/new-chat/${response.thread_id}`);
} catch (error) {
const message = error instanceof Error ? error.message : "Failed to copy chat";
toast.error(message);
setIsCloning(false);
}
}, [shareToken, router]);
// Auto-trigger clone if user just logged in with action=clone
useEffect(() => {
const action = searchParams.get("action");
const token = getBearerToken();
// Only auto-clone once, if authenticated and action=clone is present
if (action === "clone" && token && !hasAutoCloned.current && !isCloning) {
hasAutoCloned.current = true;
triggerClone();
}
}, [searchParams, isCloning, triggerClone]);
const handleCopyAndContinue = async () => {
const token = getBearerToken();
if (!token) {
// Include action=clone in the returnUrl so it persists after login
const returnUrl = encodeURIComponent(`/public/${shareToken}?action=clone`);
router.push(`/login?returnUrl=${returnUrl}`);
return;
}
await triggerClone();
};
return (
<div className="mx-auto flex max-w-(--thread-max-width) items-center justify-center px-4 py-4">
<Button size="lg" onClick={handleCopyAndContinue} disabled={isCloning} className="gap-2">
{isCloning ? <Loader2 className="size-4 animate-spin" /> : <Copy className="size-4" />}
Copy and continue this chat
</Button>
</div>
);
}

View file

@ -0,0 +1,64 @@
"use client";
import { AssistantRuntimeProvider } from "@assistant-ui/react";
import { Loader2 } from "lucide-react";
import { Navbar } from "@/components/homepage/navbar";
import { DisplayImageToolUI } from "@/components/tool-ui/display-image";
import { GeneratePodcastToolUI } from "@/components/tool-ui/generate-podcast";
import { LinkPreviewToolUI } from "@/components/tool-ui/link-preview";
import { ScrapeWebpageToolUI } from "@/components/tool-ui/scrape-webpage";
import { usePublicChat } from "@/hooks/use-public-chat";
import { usePublicChatRuntime } from "@/hooks/use-public-chat-runtime";
import { PublicChatFooter } from "./public-chat-footer";
import { PublicThread } from "./public-thread";
interface PublicChatViewProps {
shareToken: string;
}
export function PublicChatView({ shareToken }: PublicChatViewProps) {
const { data, isLoading, error } = usePublicChat(shareToken);
const runtime = usePublicChatRuntime({ data });
if (isLoading) {
return (
<main className="min-h-screen bg-linear-to-b from-gray-50 to-gray-100 text-gray-900 dark:from-black dark:to-gray-900 dark:text-white overflow-x-hidden">
<Navbar />
<div className="flex h-screen items-center justify-center">
<Loader2 className="size-8 animate-spin text-muted-foreground" />
</div>
</main>
);
}
if (error || !data) {
return (
<main className="min-h-screen bg-linear-to-b from-gray-50 to-gray-100 text-gray-900 dark:from-black dark:to-gray-900 dark:text-white overflow-x-hidden">
<Navbar />
<div className="flex h-screen flex-col items-center justify-center gap-4 px-4 text-center">
<h1 className="text-2xl font-semibold">Chat not found</h1>
<p className="text-muted-foreground">
This chat may have been removed or is no longer public.
</p>
</div>
</main>
);
}
return (
<main className="min-h-screen bg-linear-to-b from-gray-50 to-gray-100 text-gray-900 dark:from-black dark:to-gray-900 dark:text-white overflow-x-hidden">
<Navbar />
<AssistantRuntimeProvider runtime={runtime}>
{/* Tool UIs for rendering tool results */}
<GeneratePodcastToolUI />
<LinkPreviewToolUI />
<DisplayImageToolUI />
<ScrapeWebpageToolUI />
<div className="flex h-screen flex-col pt-16">
<PublicThread footer={<PublicChatFooter shareToken={shareToken} />} />
</div>
</AssistantRuntimeProvider>
</main>
);
}

View file

@ -0,0 +1,173 @@
"use client";
import {
ActionBarPrimitive,
AssistantIf,
MessagePrimitive,
ThreadPrimitive,
useAssistantState,
} from "@assistant-ui/react";
import { CheckIcon, CopyIcon } from "lucide-react";
import { type FC, type ReactNode, useState } from "react";
import { MarkdownText } from "@/components/assistant-ui/markdown-text";
import { ToolFallback } from "@/components/assistant-ui/tool-fallback";
import { TooltipIconButton } from "@/components/assistant-ui/tooltip-icon-button";
interface PublicThreadProps {
footer?: ReactNode;
}
/**
* Read-only thread component for public chat viewing.
* No composer, no edit capabilities - just message display.
*/
export const PublicThread: FC<PublicThreadProps> = ({ footer }) => {
return (
<ThreadPrimitive.Root
className="aui-root aui-thread-root @container flex h-full min-h-0 flex-col bg-background"
style={{
["--thread-max-width" as string]: "44rem",
}}
>
<ThreadPrimitive.Viewport className="aui-thread-viewport relative flex flex-1 min-h-0 flex-col overflow-y-auto px-4 pt-4">
<ThreadPrimitive.Messages
components={{
UserMessage: PublicUserMessage,
AssistantMessage: PublicAssistantMessage,
}}
/>
{/* Spacer to ensure footer doesn't overlap last message */}
<div className="h-24" />
</ThreadPrimitive.Viewport>
{footer && (
<div className="sticky bottom-0 z-20 border-t bg-background/95 backdrop-blur supports-backdrop-filter:bg-background/60">
{footer}
</div>
)}
</ThreadPrimitive.Root>
);
};
/**
* User avatar component with fallback to initials
*/
interface AuthorMetadata {
displayName: string | null;
avatarUrl: string | null;
}
const UserAvatar: FC<AuthorMetadata & { hasError: boolean; onError: () => void }> = ({
displayName,
avatarUrl,
hasError,
onError,
}) => {
const initials = displayName
? displayName
.split(" ")
.map((n) => n[0])
.join("")
.toUpperCase()
.slice(0, 2)
: "U";
if (avatarUrl && !hasError) {
return (
<img
src={avatarUrl}
alt={displayName || "User"}
className="size-8 rounded-full object-cover"
referrerPolicy="no-referrer"
onError={onError}
/>
);
}
return (
<div className="flex size-8 items-center justify-center rounded-full bg-primary/10 text-xs font-medium text-primary">
{initials}
</div>
);
};
const PublicUserMessage: FC = () => {
const metadata = useAssistantState(({ message }) => message?.metadata);
const author = metadata?.custom?.author as AuthorMetadata | undefined;
return (
<MessagePrimitive.Root
className="aui-user-message-root fade-in slide-in-from-bottom-1 mx-auto grid w-full max-w-(--thread-max-width) animate-in auto-rows-auto grid-cols-[minmax(72px,1fr)_auto] content-start gap-y-2 px-2 py-3 duration-150 [&:where(>*)]:col-start-2"
data-role="user"
>
<div className="aui-user-message-content-wrapper col-start-2 min-w-0 flex items-end gap-2">
<div className="flex-1 min-w-0">
<div className="aui-user-message-content wrap-break-word rounded-2xl bg-muted px-4 py-2.5 text-foreground">
<MessagePrimitive.Parts />
</div>
</div>
{author && (
<div className="shrink-0 mb-1.5">
<UserAvatarWithState displayName={author.displayName} avatarUrl={author.avatarUrl} />
</div>
)}
</div>
</MessagePrimitive.Root>
);
};
const UserAvatarWithState: FC<AuthorMetadata> = ({ displayName, avatarUrl }) => {
const [hasError, setHasError] = useState(false);
return (
<UserAvatar
displayName={displayName}
avatarUrl={avatarUrl}
hasError={hasError}
onError={() => setHasError(true)}
/>
);
};
const PublicAssistantMessage: FC = () => {
return (
<MessagePrimitive.Root
className="aui-assistant-message-root group fade-in slide-in-from-bottom-1 relative mx-auto w-full max-w-(--thread-max-width) animate-in py-3 duration-150"
data-role="assistant"
>
<div className="aui-assistant-message-content wrap-break-word px-2 text-foreground leading-relaxed">
<MessagePrimitive.Parts
components={{
Text: MarkdownText,
tools: { Fallback: ToolFallback },
}}
/>
</div>
<div className="aui-assistant-message-footer mt-1 mb-5 ml-2 flex">
<PublicAssistantActionBar />
</div>
</MessagePrimitive.Root>
);
};
const PublicAssistantActionBar: FC = () => {
return (
<ActionBarPrimitive.Root
autohide="not-last"
autohideFloat="single-branch"
className="aui-assistant-action-bar-root -ml-1 flex gap-1 text-muted-foreground data-floating:absolute data-floating:rounded-md data-floating:border data-floating:bg-background data-floating:p-1 data-floating:shadow-sm"
>
<ActionBarPrimitive.Copy asChild>
<TooltipIconButton tooltip="Copy">
<AssistantIf condition={({ message }) => message.isCopied}>
<CheckIcon />
</AssistantIf>
<AssistantIf condition={({ message }) => !message.isCopied}>
<CopyIcon />
</AssistantIf>
</TooltipIconButton>
</ActionBarPrimitive.Copy>
</ActionBarPrimitive.Root>
);
};

View file

@ -149,16 +149,16 @@ export function Audio({ id, src, title, description, artwork, durationMs, classN
return ( return (
<div <div
className={cn( className={cn(
"flex items-center gap-4 rounded-xl border border-destructive/20 bg-destructive/5 p-4", "flex items-center gap-3 sm:gap-4 rounded-xl border border-destructive/20 bg-destructive/5 p-3 sm:p-4",
className className
)} )}
> >
<div className="flex size-16 items-center justify-center rounded-lg bg-destructive/10"> <div className="flex size-12 sm:size-16 shrink-0 items-center justify-center rounded-lg bg-destructive/10">
<Volume2Icon className="size-8 text-destructive" /> <Volume2Icon className="size-6 sm:size-8 text-destructive" />
</div> </div>
<div className="flex-1"> <div className="flex-1 min-w-0">
<p className="font-medium text-destructive">{title}</p> <p className="font-medium text-destructive text-sm sm:text-base truncate">{title}</p>
<p className="text-destructive/70 text-sm">{error}</p> <p className="text-destructive/70 text-xs sm:text-sm">{error}</p>
</div> </div>
</div> </div>
); );
@ -168,7 +168,7 @@ export function Audio({ id, src, title, description, artwork, durationMs, classN
<div <div
id={id} id={id}
className={cn( className={cn(
"group relative overflow-hidden rounded-xl border bg-gradient-to-br from-background to-muted/30 p-4 shadow-sm transition-all hover:shadow-md", "group relative overflow-hidden rounded-xl border bg-gradient-to-br from-background to-muted/30 p-3 sm:p-4 shadow-sm transition-all hover:shadow-md",
className className
)} )}
> >
@ -177,15 +177,15 @@ export function Audio({ id, src, title, description, artwork, durationMs, classN
<track kind="captions" srcLang="en" label="English captions" default /> <track kind="captions" srcLang="en" label="English captions" default />
</audio> </audio>
<div className="flex gap-4"> <div className="flex gap-3 sm:gap-4">
{/* Artwork */} {/* Artwork */}
<div className="relative shrink-0"> <div className="relative shrink-0">
<div className="relative size-20 overflow-hidden rounded-lg bg-gradient-to-br from-primary/20 to-primary/5 shadow-inner"> <div className="relative size-14 sm:size-20 overflow-hidden rounded-lg bg-gradient-to-br from-primary/20 to-primary/5 shadow-inner">
{artwork ? ( {artwork ? (
<Image src={artwork} alt={title} fill className="object-cover" unoptimized /> <Image src={artwork} alt={title} fill className="object-cover" unoptimized />
) : ( ) : (
<div className="flex size-full items-center justify-center"> <div className="flex size-full items-center justify-center">
<Volume2Icon className="size-8 text-primary/50" /> <Volume2Icon className="size-6 sm:size-8 text-primary/50" />
</div> </div>
)} )}
</div> </div>
@ -195,14 +195,16 @@ export function Audio({ id, src, title, description, artwork, durationMs, classN
<div className="flex min-w-0 flex-1 flex-col justify-between"> <div className="flex min-w-0 flex-1 flex-col justify-between">
{/* Title and description */} {/* Title and description */}
<div className="min-w-0"> <div className="min-w-0">
<h3 className="truncate font-semibold text-foreground">{title}</h3> <h3 className="truncate font-semibold text-foreground text-sm sm:text-base">{title}</h3>
{description && ( {description && (
<p className="mt-0.5 line-clamp-1 text-muted-foreground text-sm">{description}</p> <p className="mt-0.5 line-clamp-1 text-muted-foreground text-xs sm:text-sm">
{description}
</p>
)} )}
</div> </div>
{/* Progress bar */} {/* Progress bar */}
<div className="mt-2 space-y-1"> <div className="mt-1.5 sm:mt-2 space-y-0.5 sm:space-y-1">
<Slider <Slider
value={[currentTime]} value={[currentTime]}
max={duration || 100} max={duration || 100}
@ -211,7 +213,7 @@ export function Audio({ id, src, title, description, artwork, durationMs, classN
className="cursor-pointer" className="cursor-pointer"
disabled={isLoading} disabled={isLoading}
/> />
<div className="flex justify-between text-muted-foreground text-xs"> <div className="flex justify-between text-muted-foreground text-[10px] sm:text-xs">
<span>{formatTime(currentTime)}</span> <span>{formatTime(currentTime)}</span>
<span>{formatTime(duration)}</span> <span>{formatTime(duration)}</span>
</div> </div>
@ -220,33 +222,37 @@ export function Audio({ id, src, title, description, artwork, durationMs, classN
</div> </div>
{/* Controls */} {/* Controls */}
<div className="mt-3 flex items-center justify-between border-t pt-3"> <div className="mt-2 sm:mt-3 flex items-center justify-between border-t pt-2 sm:pt-3">
<div className="flex items-center gap-2"> <div className="flex items-center gap-1.5 sm:gap-2">
{/* Play/Pause button */} {/* Play/Pause button */}
<Button <Button
variant="default" variant="default"
size="sm" size="sm"
onClick={togglePlayPause} onClick={togglePlayPause}
disabled={isLoading} disabled={isLoading}
className="gap-2" className="gap-1.5 sm:gap-2 h-7 sm:h-8 px-2.5 sm:px-3 text-xs sm:text-sm"
> >
{isLoading ? ( {isLoading ? (
<div className="size-4 animate-spin rounded-full border-2 border-current border-t-transparent" /> <div className="size-3 sm:size-4 animate-spin rounded-full border-2 border-current border-t-transparent" />
) : isPlaying ? ( ) : isPlaying ? (
<PauseIcon className="size-4" /> <PauseIcon className="size-3 sm:size-4" />
) : ( ) : (
<PlayIcon className="size-4" /> <PlayIcon className="size-3 sm:size-4" />
)} )}
{isPlaying ? "Pause" : "Play"} {isPlaying ? "Pause" : "Play"}
</Button> </Button>
{/* Volume control */} {/* Volume control */}
<div className="flex items-center gap-1.5"> <div className="flex items-center gap-1 sm:gap-1.5">
<Button variant="ghost" size="icon" onClick={toggleMute} className="size-8"> <Button variant="ghost" size="icon" onClick={toggleMute} className="size-7 sm:size-8">
{isMuted ? <VolumeXIcon className="size-4" /> : <Volume2Icon className="size-4" />} {isMuted ? (
<VolumeXIcon className="size-3.5 sm:size-4" />
) : (
<Volume2Icon className="size-3.5 sm:size-4" />
)}
</Button> </Button>
{/* Custom volume bar - visually distinct from progress slider */} {/* Custom volume bar - visually distinct from progress slider */}
<div className="relative flex h-6 w-16 items-center"> <div className="relative flex h-6 w-12 sm:w-16 items-center">
<div className="relative h-1 w-full rounded-full bg-muted-foreground/20"> <div className="relative h-1 w-full rounded-full bg-muted-foreground/20">
<div <div
className="absolute left-0 top-0 h-full rounded-full bg-muted-foreground/60 transition-all" className="absolute left-0 top-0 h-full rounded-full bg-muted-foreground/60 transition-all"
@ -268,8 +274,13 @@ export function Audio({ id, src, title, description, artwork, durationMs, classN
</div> </div>
{/* Download button */} {/* Download button */}
<Button variant="outline" size="sm" onClick={handleDownload} className="gap-2"> <Button
<DownloadIcon className="size-4" /> variant="outline"
size="sm"
onClick={handleDownload}
className="gap-1.5 sm:gap-2 h-7 sm:h-8 px-2.5 sm:px-3 text-xs sm:text-sm"
>
<DownloadIcon className="size-3 sm:size-4" />
Download Download
</Button> </Button>
</div> </div>

View file

@ -20,21 +20,31 @@ const GeneratePodcastArgsSchema = z.object({
}); });
const GeneratePodcastResultSchema = z.object({ const GeneratePodcastResultSchema = z.object({
status: z.enum(["processing", "already_generating", "success", "error"]), // Support both old and new status values for backwards compatibility
task_id: z.string().nullish(), status: z.enum([
"pending",
"generating",
"ready",
"failed",
// Legacy values from old saved chats
"processing",
"already_generating",
"success",
"error",
]),
podcast_id: z.number().nullish(), podcast_id: z.number().nullish(),
task_id: z.string().nullish(), // Legacy field for old saved chats
title: z.string().nullish(), title: z.string().nullish(),
transcript_entries: z.number().nullish(), transcript_entries: z.number().nullish(),
message: z.string().nullish(), message: z.string().nullish(),
error: z.string().nullish(), error: z.string().nullish(),
}); });
const TaskStatusResponseSchema = z.object({ const PodcastStatusResponseSchema = z.object({
status: z.enum(["processing", "success", "error"]), status: z.enum(["pending", "generating", "ready", "failed"]),
podcast_id: z.number().nullish(), id: z.number(),
title: z.string().nullish(), title: z.string(),
transcript_entries: z.number().nullish(), transcript_entries: z.number().nullish(),
state: z.string().nullish(),
error: z.string().nullish(), error: z.string().nullish(),
}); });
@ -52,17 +62,17 @@ const PodcastDetailsSchema = z.object({
*/ */
type GeneratePodcastArgs = z.infer<typeof GeneratePodcastArgsSchema>; type GeneratePodcastArgs = z.infer<typeof GeneratePodcastArgsSchema>;
type GeneratePodcastResult = z.infer<typeof GeneratePodcastResultSchema>; type GeneratePodcastResult = z.infer<typeof GeneratePodcastResultSchema>;
type TaskStatusResponse = z.infer<typeof TaskStatusResponseSchema>; type PodcastStatusResponse = z.infer<typeof PodcastStatusResponseSchema>;
type PodcastTranscriptEntry = z.infer<typeof PodcastTranscriptEntrySchema>; type PodcastTranscriptEntry = z.infer<typeof PodcastTranscriptEntrySchema>;
/** /**
* Parse and validate task status response * Parse and validate podcast status response
*/ */
function parseTaskStatusResponse(data: unknown): TaskStatusResponse { function parsePodcastStatusResponse(data: unknown): PodcastStatusResponse | null {
const result = TaskStatusResponseSchema.safeParse(data); const result = PodcastStatusResponseSchema.safeParse(data);
if (!result.success) { if (!result.success) {
console.warn("Invalid task status response:", result.error.issues); console.warn("Invalid podcast status response:", result.error.issues);
return { status: "error", error: "Invalid response from server" }; return null;
} }
return result.data; return result.data;
} }
@ -86,23 +96,27 @@ function parsePodcastDetails(data: unknown): { podcast_transcript?: PodcastTrans
*/ */
function PodcastGeneratingState({ title }: { title: string }) { function PodcastGeneratingState({ title }: { title: string }) {
return ( return (
<div className="my-4 overflow-hidden rounded-xl border border-primary/20 bg-gradient-to-br from-primary/5 to-primary/10 p-6"> <div className="my-4 overflow-hidden rounded-xl border border-primary/20 bg-gradient-to-br from-primary/5 to-primary/10 p-4 sm:p-6">
<div className="flex items-center gap-4"> <div className="flex items-center gap-3 sm:gap-4">
<div className="relative"> <div className="relative shrink-0">
<div className="flex size-16 items-center justify-center rounded-full bg-primary/20"> <div className="flex size-12 sm:size-16 items-center justify-center rounded-full bg-primary/20">
<MicIcon className="size-8 text-primary" /> <MicIcon className="size-6 sm:size-8 text-primary" />
</div> </div>
{/* Animated rings */} {/* Animated rings */}
<div className="absolute inset-1 animate-ping rounded-full bg-primary/20" /> <div className="absolute inset-1 animate-ping rounded-full bg-primary/20" />
</div> </div>
<div className="flex-1"> <div className="flex-1 min-w-0">
<h3 className="font-semibold text-foreground text-lg">{title}</h3> <h3 className="font-semibold text-foreground text-sm sm:text-lg leading-tight">
<div className="mt-2 flex items-center gap-2 text-muted-foreground"> {title}
<Spinner size="sm" /> </h3>
<span className="text-sm">Generating podcast. This may take a few minutes.</span> <div className="mt-1.5 sm:mt-2 flex items-center gap-1.5 sm:gap-2 text-muted-foreground">
<Spinner size="sm" className="size-3 sm:size-4" />
<span className="text-xs sm:text-sm">
Generating podcast. This may take a few minutes.
</span>
</div> </div>
<div className="mt-3"> <div className="mt-2 sm:mt-3">
<div className="h-1.5 w-full overflow-hidden rounded-full bg-primary/10"> <div className="h-1 sm:h-1.5 w-full overflow-hidden rounded-full bg-primary/10">
<div className="h-full w-1/3 animate-pulse rounded-full bg-primary" /> <div className="h-full w-1/3 animate-pulse rounded-full bg-primary" />
</div> </div>
</div> </div>
@ -117,15 +131,17 @@ function PodcastGeneratingState({ title }: { title: string }) {
*/ */
function PodcastErrorState({ title, error }: { title: string; error: string }) { function PodcastErrorState({ title, error }: { title: string; error: string }) {
return ( return (
<div className="my-4 overflow-hidden rounded-xl border border-destructive/20 bg-destructive/5 p-6"> <div className="my-4 overflow-hidden rounded-xl border border-destructive/20 bg-destructive/5 p-4 sm:p-6">
<div className="flex items-center gap-4"> <div className="flex items-center gap-3 sm:gap-4">
<div className="flex size-16 shrink-0 items-center justify-center rounded-full bg-destructive/10"> <div className="flex size-12 sm:size-16 shrink-0 items-center justify-center rounded-full bg-destructive/10">
<AlertCircleIcon className="size-8 text-destructive" /> <AlertCircleIcon className="size-6 sm:size-8 text-destructive" />
</div> </div>
<div className="flex-1"> <div className="flex-1 min-w-0">
<h3 className="font-semibold text-foreground">{title}</h3> <h3 className="font-semibold text-foreground text-sm sm:text-base leading-tight">
<p className="mt-1 text-destructive text-sm">Failed to generate podcast</p> {title}
<p className="mt-2 text-muted-foreground text-sm">{error}</p> </h3>
<p className="mt-1 text-destructive text-xs sm:text-sm">Failed to generate podcast</p>
<p className="mt-1.5 sm:mt-2 text-muted-foreground text-xs sm:text-sm">{error}</p>
</div> </div>
</div> </div>
</div> </div>
@ -137,16 +153,18 @@ function PodcastErrorState({ title, error }: { title: string; error: string }) {
*/ */
function AudioLoadingState({ title }: { title: string }) { function AudioLoadingState({ title }: { title: string }) {
return ( return (
<div className="my-4 overflow-hidden rounded-xl border bg-muted/30 p-6"> <div className="my-4 overflow-hidden rounded-xl border bg-muted/30 p-4 sm:p-6">
<div className="flex items-center gap-4"> <div className="flex items-center gap-3 sm:gap-4">
<div className="flex size-16 items-center justify-center rounded-full bg-primary/10"> <div className="flex size-12 sm:size-16 shrink-0 items-center justify-center rounded-full bg-primary/10">
<MicIcon className="size-8 text-primary/50" /> <MicIcon className="size-6 sm:size-8 text-primary/50" />
</div> </div>
<div className="flex-1"> <div className="flex-1 min-w-0">
<h3 className="font-semibold text-foreground">{title}</h3> <h3 className="font-semibold text-foreground text-sm sm:text-base leading-tight">
<div className="mt-2 flex items-center gap-2 text-muted-foreground"> {title}
<Spinner size="sm" /> </h3>
<span className="text-sm">Loading audio...</span> <div className="mt-1.5 sm:mt-2 flex items-center gap-1.5 sm:gap-2 text-muted-foreground">
<Spinner size="sm" className="size-3 sm:size-4" />
<span className="text-xs sm:text-sm">Loading audio...</span>
</div> </div>
</div> </div>
</div> </div>
@ -264,13 +282,13 @@ function PodcastPlayer({
/> />
{/* Transcript section */} {/* Transcript section */}
{transcript && transcript.length > 0 && ( {transcript && transcript.length > 0 && (
<details className="mt-3 rounded-lg border bg-muted/30 p-3"> <details className="mt-2 sm:mt-3 rounded-lg border bg-muted/30 p-2.5 sm:p-3">
<summary className="cursor-pointer font-medium text-muted-foreground text-sm hover:text-foreground"> <summary className="cursor-pointer font-medium text-muted-foreground text-xs sm:text-sm hover:text-foreground">
View transcript ({transcript.length} entries) View transcript ({transcript.length} entries)
</summary> </summary>
<div className="mt-3 space-y-3 max-h-96 overflow-y-auto"> <div className="mt-2 sm:mt-3 space-y-2 sm:space-y-3 max-h-64 sm:max-h-96 overflow-y-auto">
{transcript.map((entry, idx) => ( {transcript.map((entry, idx) => (
<div key={`${idx}-${entry.speaker_id}`} className="text-sm"> <div key={`${idx}-${entry.speaker_id}`} className="text-xs sm:text-sm">
<span className="font-medium text-primary">Speaker {entry.speaker_id + 1}:</span>{" "} <span className="font-medium text-primary">Speaker {entry.speaker_id + 1}:</span>{" "}
<span className="text-muted-foreground">{entry.dialog}</span> <span className="text-muted-foreground">{entry.dialog}</span>
</div> </div>
@ -283,44 +301,42 @@ function PodcastPlayer({
} }
/** /**
* Polling component that checks task status and shows player when complete * Polling component that checks podcast status and shows player when ready
*/ */
function PodcastTaskPoller({ taskId, title }: { taskId: string; title: string }) { function PodcastStatusPoller({ podcastId, title }: { podcastId: number; title: string }) {
const [taskStatus, setTaskStatus] = useState<TaskStatusResponse>({ status: "processing" }); const [podcastStatus, setPodcastStatus] = useState<PodcastStatusResponse | null>(null);
const pollingRef = useRef<NodeJS.Timeout | null>(null); const pollingRef = useRef<NodeJS.Timeout | null>(null);
// Set active podcast state when this component mounts // Set active podcast state when this component mounts
useEffect(() => { useEffect(() => {
setActivePodcastTaskId(taskId); setActivePodcastTaskId(String(podcastId));
// Clear when component unmounts // Clear when component unmounts
return () => { return () => {
// Only clear if this task is still the active one
clearActivePodcastTaskId(); clearActivePodcastTaskId();
}; };
}, [taskId]); }, [podcastId]);
// Poll for task status // Poll for podcast status
useEffect(() => { useEffect(() => {
const pollStatus = async () => { const pollStatus = async () => {
try { try {
const rawResponse = await baseApiService.get<unknown>( const rawResponse = await baseApiService.get<unknown>(`/api/v1/podcasts/${podcastId}`);
`/api/v1/podcasts/task/${taskId}/status` const response = parsePodcastStatusResponse(rawResponse);
); if (response) {
const response = parseTaskStatusResponse(rawResponse); setPodcastStatus(response);
setTaskStatus(response);
// Stop polling if task is complete or errored // Stop polling if podcast is ready or failed
if (response.status !== "processing") { if (response.status === "ready" || response.status === "failed") {
if (pollingRef.current) { if (pollingRef.current) {
clearInterval(pollingRef.current); clearInterval(pollingRef.current);
pollingRef.current = null; pollingRef.current = null;
}
clearActivePodcastTaskId();
} }
// Clear the active podcast state when task completes
clearActivePodcastTaskId();
} }
} catch (err) { } catch (err) {
console.error("Error polling task status:", err); console.error("Error polling podcast status:", err);
// Don't stop polling on network errors, continue polling // Don't stop polling on network errors, continue polling
} }
}; };
@ -336,27 +352,31 @@ function PodcastTaskPoller({ taskId, title }: { taskId: string; title: string })
clearInterval(pollingRef.current); clearInterval(pollingRef.current);
} }
}; };
}, [taskId]); }, [podcastId]);
// Show loading state while processing // Show loading state while pending or generating
if (taskStatus.status === "processing") { if (
!podcastStatus ||
podcastStatus.status === "pending" ||
podcastStatus.status === "generating"
) {
return <PodcastGeneratingState title={title} />; return <PodcastGeneratingState title={title} />;
} }
// Show error state // Show error state
if (taskStatus.status === "error") { if (podcastStatus.status === "failed") {
return <PodcastErrorState title={title} error={taskStatus.error || "Generation failed"} />; return <PodcastErrorState title={title} error={podcastStatus.error || "Generation failed"} />;
} }
// Show player when complete // Show player when ready
if (taskStatus.status === "success" && taskStatus.podcast_id) { if (podcastStatus.status === "ready") {
return ( return (
<PodcastPlayer <PodcastPlayer
podcastId={taskStatus.podcast_id} podcastId={podcastStatus.id}
title={taskStatus.title || title} title={podcastStatus.title || title}
description={ description={
taskStatus.transcript_entries podcastStatus.transcript_entries
? `${taskStatus.transcript_entries} dialogue entries` ? `${podcastStatus.transcript_entries} dialogue entries`
: "SurfSense AI-generated podcast" : "SurfSense AI-generated podcast"
} }
/> />
@ -392,9 +412,9 @@ export const GeneratePodcastToolUI = makeAssistantToolUI<
if (status.type === "incomplete") { if (status.type === "incomplete") {
if (status.reason === "cancelled") { if (status.reason === "cancelled") {
return ( return (
<div className="my-4 rounded-xl border border-muted p-4 text-muted-foreground"> <div className="my-4 rounded-xl border border-muted p-3 sm:p-4 text-muted-foreground">
<p className="flex items-center gap-2"> <p className="flex items-center gap-1.5 sm:gap-2 text-xs sm:text-sm">
<MicIcon className="size-4" /> <MicIcon className="size-3.5 sm:size-4" />
<span className="line-through">Podcast generation cancelled</span> <span className="line-through">Podcast generation cancelled</span>
</p> </p>
</div> </div>
@ -415,25 +435,26 @@ export const GeneratePodcastToolUI = makeAssistantToolUI<
return <PodcastGeneratingState title={title} />; return <PodcastGeneratingState title={title} />;
} }
// Error result // Failed result (new: "failed", legacy: "error")
if (result.status === "error") { if (result.status === "failed" || result.status === "error") {
return <PodcastErrorState title={title} error={result.error || "Unknown error"} />; return <PodcastErrorState title={title} error={result.error || "Generation failed"} />;
} }
// Already generating - show simple warning, don't create another poller // Already generating - show simple warning, don't create another poller
// The FIRST tool call will display the podcast when ready // The FIRST tool call will display the podcast when ready
if (result.status === "already_generating") { // (new: "generating", legacy: "already_generating")
if (result.status === "generating" || result.status === "already_generating") {
return ( return (
<div className="my-4 overflow-hidden rounded-xl border border-amber-500/20 bg-amber-500/5 p-4"> <div className="my-4 overflow-hidden rounded-xl border border-amber-500/20 bg-amber-500/5 p-3 sm:p-4">
<div className="flex items-center gap-3"> <div className="flex items-center gap-2.5 sm:gap-3">
<div className="flex size-10 shrink-0 items-center justify-center rounded-full bg-amber-500/20"> <div className="flex size-8 sm:size-10 shrink-0 items-center justify-center rounded-full bg-amber-500/20">
<MicIcon className="size-5 text-amber-500" /> <MicIcon className="size-4 sm:size-5 text-amber-500" />
</div> </div>
<div> <div className="min-w-0">
<p className="text-amber-600 dark:text-amber-400 text-sm font-medium"> <p className="text-amber-600 dark:text-amber-400 text-xs sm:text-sm font-medium">
Podcast already in progress Podcast already in progress
</p> </p>
<p className="text-muted-foreground text-xs mt-0.5"> <p className="text-muted-foreground text-[10px] sm:text-xs mt-0.5">
Please wait for the current podcast to complete. Please wait for the current podcast to complete.
</p> </p>
</div> </div>
@ -442,13 +463,13 @@ export const GeneratePodcastToolUI = makeAssistantToolUI<
); );
} }
// Processing - poll for completion // Pending - poll for completion (new: "pending" with podcast_id)
if (result.status === "processing" && result.task_id) { if (result.status === "pending" && result.podcast_id) {
return <PodcastTaskPoller taskId={result.task_id} title={result.title || title} />; return <PodcastStatusPoller podcastId={result.podcast_id} title={result.title || title} />;
} }
// Success with podcast_id (direct result, not via polling) // Ready with podcast_id (new: "ready", legacy: "success")
if (result.status === "success" && result.podcast_id) { if ((result.status === "ready" || result.status === "success") && result.podcast_id) {
return ( return (
<PodcastPlayer <PodcastPlayer
podcastId={result.podcast_id} podcastId={result.podcast_id}
@ -462,7 +483,29 @@ export const GeneratePodcastToolUI = makeAssistantToolUI<
); );
} }
// Legacy: old chats with Celery task_id (status: "processing" or "success" without podcast_id)
// These can't be recovered since the old task polling endpoint no longer exists
if (result.task_id && !result.podcast_id) {
return (
<div className="my-4 overflow-hidden rounded-xl border border-muted p-4">
<div className="flex items-center gap-3">
<div className="flex size-10 shrink-0 items-center justify-center rounded-full bg-muted">
<MicIcon className="size-5 text-muted-foreground" />
</div>
<div>
<p className="text-muted-foreground text-sm">
This podcast was generated with an older version and cannot be displayed.
</p>
<p className="text-muted-foreground text-xs mt-0.5">
Please generate a new podcast to listen.
</p>
</div>
</div>
</div>
);
}
// Fallback - missing required data // Fallback - missing required data
return <PodcastErrorState title={title} error="Missing task ID or podcast ID" />; return <PodcastErrorState title={title} error="Missing podcast ID" />;
}, },
}); });

View file

@ -66,6 +66,29 @@ Click **Save** to apply the capabilities.
--- ---
## Limitations & Unsupported Content
Notion's API has limitations on certain block types that cannot be retrieved. SurfSense will automatically skip these unsupported blocks and continue syncing all other content.
### Unsupported Block Types
The following Notion features are **not accessible via the Notion API** and will be skipped during sync:
- **Transcription blocks** - Audio/video transcriptions from Notion AI
- **AI blocks** - AI-generated content blocks
### Learn More
The Notion API only supports specific block types for retrieval. The official list of **supported block types** is documented in Notion's Block reference:
- **[Block Object Reference](https://developers.notion.com/reference/block)** - Official documentation listing all supported block types. Any block type not listed here (such as `transcription` and `ai_block`) is not accessible via the Notion API.
For additional information:
- [Working with Page Content](https://developers.notion.com/docs/working-with-page-content) - Guide on how the Notion API handles page content
- [Notion API Reference](https://developers.notion.com/reference) - Complete API documentation
---
## Running SurfSense with Notion Connector ## Running SurfSense with Notion Connector
Add the Notion environment variables to your Docker run command: Add the Notion environment variables to your Docker run command:

View file

@ -0,0 +1,19 @@
import { z } from "zod";
/**
* Toggle public share
*/
export const togglePublicShareRequest = z.object({
thread_id: z.number(),
enabled: z.boolean(),
});
export const togglePublicShareResponse = z.object({
enabled: z.boolean(),
public_url: z.string().nullable(),
share_token: z.string().nullable(),
});
// Type exports
export type TogglePublicShareRequest = z.infer<typeof togglePublicShareRequest>;
export type TogglePublicShareResponse = z.infer<typeof togglePublicShareResponse>;

View file

@ -0,0 +1,75 @@
import { z } from "zod";
/**
* Author info for public chat
*/
export const publicAuthor = z.object({
display_name: z.string().nullable(),
avatar_url: z.string().nullable(),
});
/**
* Message in a public chat
*/
export const publicChatMessage = z.object({
role: z.string(),
content: z.unknown(),
author: publicAuthor.nullable(),
created_at: z.string(),
});
/**
* Thread info for public chat
*/
export const publicChatThread = z.object({
title: z.string(),
created_at: z.string(),
});
/**
* Get public chat
*/
export const getPublicChatRequest = z.object({
share_token: z.string(),
});
export const getPublicChatResponse = z.object({
thread: publicChatThread,
messages: z.array(publicChatMessage),
});
/**
* Clone public chat (init)
*/
export const clonePublicChatRequest = z.object({
share_token: z.string(),
});
export const clonePublicChatResponse = z.object({
thread_id: z.number(),
search_space_id: z.number(),
share_token: z.string(),
});
/**
* Complete clone
*/
export const completeCloneRequest = z.object({
thread_id: z.number(),
});
export const completeCloneResponse = z.object({
status: z.string(),
message_count: z.number(),
});
// Type exports
export type PublicAuthor = z.infer<typeof publicAuthor>;
export type PublicChatMessage = z.infer<typeof publicChatMessage>;
export type PublicChatThread = z.infer<typeof publicChatThread>;
export type GetPublicChatRequest = z.infer<typeof getPublicChatRequest>;
export type GetPublicChatResponse = z.infer<typeof getPublicChatResponse>;
export type ClonePublicChatRequest = z.infer<typeof clonePublicChatRequest>;
export type ClonePublicChatResponse = z.infer<typeof clonePublicChatResponse>;
export type CompleteCloneRequest = z.infer<typeof completeCloneRequest>;
export type CompleteCloneResponse = z.infer<typeof completeCloneResponse>;

View file

@ -25,6 +25,10 @@ export const useGithubStars = () => {
setStars(data?.stargazers_count); setStars(data?.stargazers_count);
} catch (err) { } catch (err) {
// Ignore abort errors (expected on unmount)
if (err instanceof Error && err.name === "AbortError") {
return;
}
if (err instanceof Error) { if (err instanceof Error) {
console.error("Error fetching stars:", err); console.error("Error fetching stars:", err);
setError(err.message); setError(err.message);
@ -37,7 +41,7 @@ export const useGithubStars = () => {
getStars(); getStars();
return () => { return () => {
abortController.abort(); abortController.abort("Component unmounted");
}; };
}, []); }, []);

View file

@ -20,21 +20,18 @@ let pendingHideTimeout: ReturnType<typeof setTimeout> | null = null;
export function useGlobalLoading() { export function useGlobalLoading() {
const [loading, setLoading] = useAtom(globalLoadingAtom); const [loading, setLoading] = useAtom(globalLoadingAtom);
const show = useCallback( const show = useCallback(() => {
(message?: string, variant: "login" | "default" = "default") => { // Cancel any pending hide - new loading request takes over
// Cancel any pending hide - new loading request takes over if (pendingHideTimeout) {
if (pendingHideTimeout) { clearTimeout(pendingHideTimeout);
clearTimeout(pendingHideTimeout); pendingHideTimeout = null;
pendingHideTimeout = null; }
}
const id = ++loadingIdCounter; const id = ++loadingIdCounter;
currentLoadingId = id; currentLoadingId = id;
setLoading({ isLoading: true, message, variant }); setLoading({ isLoading: true });
return id; return id;
}, }, [setLoading]);
[setLoading]
);
const hide = useCallback( const hide = useCallback(
(id?: number) => { (id?: number) => {
@ -50,7 +47,7 @@ export function useGlobalLoading() {
// Double-check we're still the current loading after the delay // Double-check we're still the current loading after the delay
if (id === undefined || id === currentLoadingId) { if (id === undefined || id === currentLoadingId) {
currentLoadingId = null; currentLoadingId = null;
setLoading({ isLoading: false, message: undefined, variant: "default" }); setLoading({ isLoading: false });
} }
pendingHideTimeout = null; pendingHideTimeout = null;
}, 50); // Small delay to allow next component to mount and show loading }, 50); // Small delay to allow next component to mount and show loading
@ -70,27 +67,21 @@ export function useGlobalLoading() {
* transition loading states (e.g., layout page). * transition loading states (e.g., layout page).
* *
* @param shouldShow - Whether the loading screen should be visible * @param shouldShow - Whether the loading screen should be visible
* @param message - Optional message to display
* @param variant - Visual style variant ("login" or "default")
*/ */
export function useGlobalLoadingEffect( export function useGlobalLoadingEffect(shouldShow: boolean) {
shouldShow: boolean,
message?: string,
variant: "login" | "default" = "default"
) {
const { show, hide } = useGlobalLoading(); const { show, hide } = useGlobalLoading();
const loadingIdRef = useRef<number | null>(null); const loadingIdRef = useRef<number | null>(null);
useEffect(() => { useEffect(() => {
if (shouldShow) { if (shouldShow) {
// Show loading and store the ID // Show loading and store the ID
loadingIdRef.current = show(message, variant); loadingIdRef.current = show();
} else if (loadingIdRef.current !== null) { } else if (loadingIdRef.current !== null) {
// Only hide if we were the ones showing loading // Only hide if we were the ones showing loading
hide(loadingIdRef.current); hide(loadingIdRef.current);
loadingIdRef.current = null; loadingIdRef.current = null;
} }
}, [shouldShow, message, variant, show, hide]); }, [shouldShow, show, hide]);
// Cleanup on unmount - only hide if we're still the active loading // Cleanup on unmount - only hide if we're still the active loading
useEffect(() => { useEffect(() => {

View file

@ -318,9 +318,13 @@ export function useInbox(
try { try {
// STEP 1: Fetch server counts (total and recent) - guaranteed accurate // STEP 1: Fetch server counts (total and recent) - guaranteed accurate
console.log("[useInbox] Fetching unread count from server"); console.log(
"[useInbox] Fetching unread count from server",
typeFilter ? `for type: ${typeFilter}` : "for all types"
);
const serverCounts = await notificationsApiService.getUnreadCount( const serverCounts = await notificationsApiService.getUnreadCount(
searchSpaceId ?? undefined searchSpaceId ?? undefined,
typeFilter ?? undefined
); );
if (mounted) { if (mounted) {

View file

@ -0,0 +1,51 @@
"use client";
import { type AppendMessage, useExternalStoreRuntime } from "@assistant-ui/react";
import { useCallback, useMemo } from "react";
import type { GetPublicChatResponse, PublicChatMessage } from "@/contracts/types/public-chat.types";
import { convertToThreadMessage } from "@/lib/chat/message-utils";
import type { MessageRecord } from "@/lib/chat/thread-persistence";
interface UsePublicChatRuntimeOptions {
data: GetPublicChatResponse | undefined;
}
/**
* Map PublicChatMessage to MessageRecord shape for reuse of convertToThreadMessage
*/
function toMessageRecord(msg: PublicChatMessage, idx: number): MessageRecord {
return {
id: idx,
thread_id: 0,
role: msg.role as "user" | "assistant" | "system",
content: msg.content,
created_at: msg.created_at,
author_id: msg.author ? "public" : null,
author_display_name: msg.author?.display_name ?? null,
author_avatar_url: msg.author?.avatar_url ?? null,
};
}
/**
* Creates a read-only runtime for public chat viewing.
*/
export function usePublicChatRuntime({ data }: UsePublicChatRuntimeOptions) {
const messages = useMemo(() => data?.messages ?? [], [data?.messages]);
// No-op - public chat is read-only
const onNew = useCallback(async (_message: AppendMessage) => {}, []);
const convertMessage = useCallback(
(msg: PublicChatMessage, idx: number) => convertToThreadMessage(toMessageRecord(msg, idx)),
[]
);
const runtime = useExternalStoreRuntime({
isRunning: false,
messages,
onNew,
convertMessage,
});
return runtime;
}

View file

@ -0,0 +1,14 @@
import { useQuery } from "@tanstack/react-query";
import type { GetPublicChatResponse } from "@/contracts/types/public-chat.types";
import { publicChatApiService } from "@/lib/apis/public-chat-api.service";
import { cacheKeys } from "@/lib/query-client/cache-keys";
export function usePublicChat(shareToken: string) {
return useQuery<GetPublicChatResponse, Error>({
queryKey: cacheKeys.publicChat.byToken(shareToken),
queryFn: () => publicChatApiService.getPublicChat({ share_token: shareToken }),
enabled: !!shareToken,
staleTime: 30_000,
retry: false,
});
}

View file

@ -12,5 +12,17 @@ if (process.env.NEXT_PUBLIC_POSTHOG_KEY) {
capture_pageview: "history_change", capture_pageview: "history_change",
// Enable session recording // Enable session recording
capture_pageleave: true, capture_pageleave: true,
loaded: (posthog) => {
// Expose PostHog to window for console access and toolbar
if (typeof window !== "undefined") {
window.posthog = posthog;
}
},
}); });
} }
// Always expose posthog to window for debugging/toolbar access
// This allows testing feature flags even without POSTHOG_KEY configured
if (typeof window !== "undefined") {
window.posthog = posthog;
}

View file

@ -23,7 +23,10 @@ export type RequestOptions = {
class BaseApiService { class BaseApiService {
baseUrl: string; baseUrl: string;
noAuthEndpoints: string[] = ["/auth/jwt/login", "/auth/register", "/auth/refresh"]; // Add more endpoints as needed noAuthEndpoints: string[] = ["/auth/jwt/login", "/auth/register", "/auth/refresh"];
// Prefixes that don't require auth (checked with startsWith)
noAuthPrefixes: string[] = ["/api/v1/public/", "/api/v1/podcasts/"];
// Use a getter to always read fresh token from localStorage // Use a getter to always read fresh token from localStorage
// This ensures the token is always up-to-date after login/logout // This ensures the token is always up-to-date after login/logout
@ -84,7 +87,10 @@ class BaseApiService {
} }
// Validate the bearer token // Validate the bearer token
if (!this.bearerToken && !this.noAuthEndpoints.includes(url)) { const isNoAuthEndpoint =
this.noAuthEndpoints.includes(url) ||
this.noAuthPrefixes.some((prefix) => url.startsWith(prefix));
if (!this.bearerToken && !isNoAuthEndpoint) {
throw new AuthenticationError("You are not authenticated. Please login again."); throw new AuthenticationError("You are not authenticated. Please login again.");
} }

View file

@ -0,0 +1,33 @@
import {
type TogglePublicShareRequest,
type TogglePublicShareResponse,
togglePublicShareRequest,
togglePublicShareResponse,
} from "@/contracts/types/chat-threads.types";
import { ValidationError } from "../error";
import { baseApiService } from "./base-api.service";
class ChatThreadsApiService {
/**
* Toggle public sharing for a thread.
* Requires authentication.
*/
togglePublicShare = async (
request: TogglePublicShareRequest
): Promise<TogglePublicShareResponse> => {
const parsed = togglePublicShareRequest.safeParse(request);
if (!parsed.success) {
const errorMessage = parsed.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.patch(
`/api/v1/threads/${parsed.data.thread_id}/public-share`,
togglePublicShareResponse,
{ body: { enabled: parsed.data.enabled } }
);
};
}
export const chatThreadsApiService = new ChatThreadsApiService();

View file

@ -5,6 +5,7 @@ import {
getNotificationsRequest, getNotificationsRequest,
getNotificationsResponse, getNotificationsResponse,
getUnreadCountResponse, getUnreadCountResponse,
type InboxItemTypeEnum,
type MarkAllNotificationsReadResponse, type MarkAllNotificationsReadResponse,
type MarkNotificationReadRequest, type MarkNotificationReadRequest,
type MarkNotificationReadResponse, type MarkNotificationReadResponse,
@ -92,12 +93,20 @@ class NotificationsApiService {
* Get unread notification count with split between total and recent * Get unread notification count with split between total and recent
* - total_unread: All unread notifications * - total_unread: All unread notifications
* - recent_unread: Unread within sync window (last 14 days) * - recent_unread: Unread within sync window (last 14 days)
* @param searchSpaceId - Optional search space ID to filter by
* @param type - Optional notification type to filter by (type-safe enum)
*/ */
getUnreadCount = async (searchSpaceId?: number): Promise<GetUnreadCountResponse> => { getUnreadCount = async (
searchSpaceId?: number,
type?: InboxItemTypeEnum
): Promise<GetUnreadCountResponse> => {
const params = new URLSearchParams(); const params = new URLSearchParams();
if (searchSpaceId !== undefined) { if (searchSpaceId !== undefined) {
params.append("search_space_id", String(searchSpaceId)); params.append("search_space_id", String(searchSpaceId));
} }
if (type) {
params.append("type", type);
}
const queryString = params.toString(); const queryString = params.toString();
return baseApiService.get( return baseApiService.get(

View file

@ -0,0 +1,73 @@
import {
type ClonePublicChatRequest,
type ClonePublicChatResponse,
type CompleteCloneRequest,
type CompleteCloneResponse,
clonePublicChatRequest,
clonePublicChatResponse,
completeCloneRequest,
completeCloneResponse,
type GetPublicChatRequest,
type GetPublicChatResponse,
getPublicChatRequest,
getPublicChatResponse,
} from "@/contracts/types/public-chat.types";
import { ValidationError } from "../error";
import { baseApiService } from "./base-api.service";
class PublicChatApiService {
/**
* Get a public chat by share token.
* No authentication required.
*/
getPublicChat = async (request: GetPublicChatRequest): Promise<GetPublicChatResponse> => {
const parsed = getPublicChatRequest.safeParse(request);
if (!parsed.success) {
const errorMessage = parsed.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.get(`/api/v1/public/${parsed.data.share_token}`, getPublicChatResponse);
};
/**
* Clone a public chat to the user's account.
* Creates an empty thread and returns thread_id for redirect.
* Requires authentication.
*/
clonePublicChat = async (request: ClonePublicChatRequest): Promise<ClonePublicChatResponse> => {
const parsed = clonePublicChatRequest.safeParse(request);
if (!parsed.success) {
const errorMessage = parsed.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.post(
`/api/v1/public/${parsed.data.share_token}/clone`,
clonePublicChatResponse
);
};
/**
* Complete the clone by copying messages and podcasts.
* Called from the chat page after redirect.
* Requires authentication.
*/
completeClone = async (request: CompleteCloneRequest): Promise<CompleteCloneResponse> => {
const parsed = completeCloneRequest.safeParse(request);
if (!parsed.success) {
const errorMessage = parsed.error.issues.map((issue) => issue.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.post(
`/api/v1/threads/${parsed.data.thread_id}/complete-clone`,
completeCloneResponse
);
};
}
export const publicChatApiService = new PublicChatApiService();

View file

@ -0,0 +1,109 @@
import type { ThreadMessageLike } from "@assistant-ui/react";
import { z } from "zod";
import type { MessageRecord } from "./thread-persistence";
/**
* Zod schema for persisted attachment info
*/
const PersistedAttachmentSchema = z.object({
id: z.string(),
name: z.string(),
type: z.string(),
contentType: z.string().optional(),
imageDataUrl: z.string().optional(),
extractedContent: z.string().optional(),
});
const AttachmentsPartSchema = z.object({
type: z.literal("attachments"),
items: z.array(PersistedAttachmentSchema),
});
type PersistedAttachment = z.infer<typeof PersistedAttachmentSchema>;
/**
* Extract persisted attachments from message content (type-safe with Zod)
*/
function extractPersistedAttachments(content: unknown): PersistedAttachment[] {
if (!Array.isArray(content)) return [];
for (const part of content) {
const result = AttachmentsPartSchema.safeParse(part);
if (result.success) {
return result.data.items;
}
}
return [];
}
/**
* Convert backend message to assistant-ui ThreadMessageLike format
* Filters out 'thinking-steps' part as it's handled separately via messageThinkingSteps
* Restores attachments for user messages from persisted data
*/
export function convertToThreadMessage(msg: MessageRecord): ThreadMessageLike {
let content: ThreadMessageLike["content"];
if (typeof msg.content === "string") {
content = [{ type: "text", text: msg.content }];
} else if (Array.isArray(msg.content)) {
// Filter out custom metadata parts - they're handled separately
const filteredContent = msg.content.filter((part: unknown) => {
if (typeof part !== "object" || part === null || !("type" in part)) return true;
const partType = (part as { type: string }).type;
// Filter out thinking-steps, mentioned-documents, and attachments
return (
partType !== "thinking-steps" &&
partType !== "mentioned-documents" &&
partType !== "attachments"
);
});
content =
filteredContent.length > 0
? (filteredContent as ThreadMessageLike["content"])
: [{ type: "text", text: "" }];
} else {
content = [{ type: "text", text: String(msg.content) }];
}
// Restore attachments for user messages
let attachments: ThreadMessageLike["attachments"];
if (msg.role === "user") {
const persistedAttachments = extractPersistedAttachments(msg.content);
if (persistedAttachments.length > 0) {
attachments = persistedAttachments.map((att) => ({
id: att.id,
name: att.name,
type: att.type as "document" | "image" | "file",
contentType: att.contentType || "application/octet-stream",
status: { type: "complete" as const },
content: [],
// Custom fields for our ChatAttachment interface
imageDataUrl: att.imageDataUrl,
extractedContent: att.extractedContent,
}));
}
}
// Build metadata.custom for author display in shared chats
const metadata = msg.author_id
? {
custom: {
author: {
displayName: msg.author_display_name ?? null,
avatarUrl: msg.author_avatar_url ?? null,
},
},
}
: undefined;
return {
id: `msg-${msg.id}`,
role: msg.role,
content,
createdAt: new Date(msg.created_at),
attachments,
metadata,
};
}

View file

@ -24,6 +24,9 @@ export interface ThreadRecord {
created_at: string; created_at: string;
updated_at: string; updated_at: string;
has_comments?: boolean; has_comments?: boolean;
public_share_enabled?: boolean;
public_share_token?: string | null;
clone_pending?: boolean;
} }
export interface MessageRecord { export interface MessageRecord {

View file

@ -79,4 +79,7 @@ export const cacheKeys = {
comments: { comments: {
byMessage: (messageId: number) => ["comments", "message", messageId] as const, byMessage: (messageId: number) => ["comments", "message", messageId] as const,
}, },
publicChat: {
byToken: (shareToken: string) => ["public-chat", shareToken] as const,
},
}; };

View file

@ -2,8 +2,6 @@
"common": { "common": {
"app_name": "SurfSense", "app_name": "SurfSense",
"welcome": "Welcome", "welcome": "Welcome",
"loading": "Loading",
"initializing": "Initializing",
"save": "Save", "save": "Save",
"cancel": "Cancel", "cancel": "Cancel",
"delete": "Delete", "delete": "Delete",
@ -80,8 +78,7 @@
"passwords_no_match_desc": "The passwords you entered do not match", "passwords_no_match_desc": "The passwords you entered do not match",
"creating_account": "Creating your account", "creating_account": "Creating your account",
"creating_account_btn": "Creating account", "creating_account_btn": "Creating account",
"redirecting_login": "Redirecting to login page", "redirecting_login": "Redirecting to login page"
"processing_authentication": "Processing authentication"
}, },
"searchSpace": { "searchSpace": {
"create_title": "Create Search Space", "create_title": "Create Search Space",
@ -146,10 +143,7 @@
"api_keys": "API Keys", "api_keys": "API Keys",
"profile": "Profile", "profile": "Profile",
"loading_dashboard": "Loading Dashboard", "loading_dashboard": "Loading Dashboard",
"checking_auth": "Checking authentication",
"loading_config": "Loading Configuration", "loading_config": "Loading Configuration",
"checking_llm_prefs": "Checking your LLM preferences",
"setting_up_ai": "Setting up AI",
"config_error": "Configuration Error", "config_error": "Configuration Error",
"failed_load_llm_config": "Failed to load your LLM configuration", "failed_load_llm_config": "Failed to load your LLM configuration",
"error_loading_chats": "Error loading chats", "error_loading_chats": "Error loading chats",
@ -171,7 +165,6 @@
"create_search_space": "Create Search Space", "create_search_space": "Create Search Space",
"add_new_search_space": "Add New Search Space", "add_new_search_space": "Add New Search Space",
"loading": "Loading", "loading": "Loading",
"fetching_spaces": "Fetching your search spaces",
"may_take_moment": "This may take a moment", "may_take_moment": "This may take a moment",
"error": "Error", "error": "Error",
"something_wrong": "Something went wrong", "something_wrong": "Something went wrong",
@ -715,7 +708,8 @@
"all": "All", "all": "All",
"unread": "Unread", "unread": "Unread",
"connectors": "Connectors", "connectors": "Connectors",
"all_connectors": "All connectors" "all_connectors": "All connectors",
"close": "Close"
}, },
"errors": { "errors": {
"something_went_wrong": "Something went wrong", "something_went_wrong": "Something went wrong",

View file

@ -2,8 +2,6 @@
"common": { "common": {
"app_name": "SurfSense", "app_name": "SurfSense",
"welcome": "欢迎", "welcome": "欢迎",
"loading": "加载中...",
"initializing": "正在初始化",
"save": "保存", "save": "保存",
"cancel": "取消", "cancel": "取消",
"delete": "删除", "delete": "删除",
@ -80,8 +78,7 @@
"passwords_no_match_desc": "您输入的密码不一致", "passwords_no_match_desc": "您输入的密码不一致",
"creating_account": "正在创建您的账户", "creating_account": "正在创建您的账户",
"creating_account_btn": "创建中", "creating_account_btn": "创建中",
"redirecting_login": "正在跳转到登录页面", "redirecting_login": "正在跳转到登录页面"
"processing_authentication": "正在处理身份验证"
}, },
"searchSpace": { "searchSpace": {
"create_title": "创建搜索空间", "create_title": "创建搜索空间",
@ -131,10 +128,7 @@
"api_keys": "API 密钥", "api_keys": "API 密钥",
"profile": "个人资料", "profile": "个人资料",
"loading_dashboard": "正在加载仪表盘", "loading_dashboard": "正在加载仪表盘",
"checking_auth": "正在检查身份验证",
"loading_config": "正在加载配置", "loading_config": "正在加载配置",
"checking_llm_prefs": "正在检查您的 LLM 偏好设置",
"setting_up_ai": "正在设置 AI",
"config_error": "配置错误", "config_error": "配置错误",
"failed_load_llm_config": "无法加载您的 LLM 配置", "failed_load_llm_config": "无法加载您的 LLM 配置",
"error_loading_chats": "加载对话失败", "error_loading_chats": "加载对话失败",
@ -156,7 +150,6 @@
"create_search_space": "创建搜索空间", "create_search_space": "创建搜索空间",
"add_new_search_space": "添加新的搜索空间", "add_new_search_space": "添加新的搜索空间",
"loading": "加载中", "loading": "加载中",
"fetching_spaces": "正在获取您的搜索空间",
"may_take_moment": "这可能需要一些时间", "may_take_moment": "这可能需要一些时间",
"error": "错误", "error": "错误",
"something_wrong": "出现错误", "something_wrong": "出现错误",
@ -686,7 +679,7 @@
"system": "系统", "system": "系统",
"logout": "退出登录", "logout": "退出登录",
"inbox": "收件箱", "inbox": "收件箱",
"search_inbox": "搜索收件箱...", "search_inbox": "搜索收件箱",
"mark_all_read": "全部标记为已读", "mark_all_read": "全部标记为已读",
"mark_as_read": "标记为已读", "mark_as_read": "标记为已读",
"mentions": "提及", "mentions": "提及",
@ -700,7 +693,8 @@
"all": "全部", "all": "全部",
"unread": "未读", "unread": "未读",
"connectors": "连接器", "connectors": "连接器",
"all_connectors": "所有连接器" "all_connectors": "所有连接器",
"close": "关闭"
}, },
"errors": { "errors": {
"something_went_wrong": "出错了", "something_went_wrong": "出错了",

Some files were not shown because too many files have changed in this diff Show more