Merge remote-tracking branch 'upstream/dev' into feat/web-search

This commit is contained in:
Anish Sarkar 2026-03-16 19:34:38 +05:30
commit 60d12b0a70
45 changed files with 377 additions and 198 deletions

View file

@ -14,6 +14,7 @@ SurfSense 现已支持以下国产 LLM
- ✅ **阿里通义千问 (Alibaba Qwen)** - 阿里云通义千问大模型
- ✅ **月之暗面 Kimi (Moonshot)** - 月之暗面 Kimi 大模型
- ✅ **智谱 AI GLM (Zhipu)** - 智谱 AI GLM 系列模型
- ✅ **MiniMax** - MiniMax 大模型 (M2.5 系列204K 上下文)
---
@ -197,6 +198,52 @@ API Base URL: https://open.bigmodel.cn/api/paas/v4
---
## 5⃣ MiniMax 配置 | MiniMax Configuration
### 获取 API Key
1. 访问 [MiniMax 开放平台](https://platform.minimaxi.com/)
2. 注册并登录账号
3. 进入 **API Keys** 页面
4. 创建新的 API Key
5. 复制 API Key
### 在 SurfSense 中配置
| 字段 | 值 | 说明 |
|------|-----|------|
| **Configuration Name** | `MiniMax M2.5` | 配置名称(自定义) |
| **Provider** | `MINIMAX` | 选择 MiniMax |
| **Model Name** | `MiniMax-M2.5` | 推荐模型<br>其他选项: `MiniMax-M2.5-highspeed` |
| **API Key** | `eyJ...` | 你的 MiniMax API Key |
| **API Base URL** | `https://api.minimax.io/v1` | MiniMax API 地址 |
| **Parameters** | `{"temperature": 1.0}` | 注意temperature 必须在 (0.0, 1.0] 范围内,不能为 0 |
### 示例配置
```
Configuration Name: MiniMax M2.5
Provider: MINIMAX
Model Name: MiniMax-M2.5
API Key: eyJxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
API Base URL: https://api.minimax.io/v1
```
### 可用模型
- **MiniMax-M2.5**: 高性能通用模型204K 上下文窗口(推荐)
- **MiniMax-M2.5-highspeed**: 高速推理版本204K 上下文窗口
### 注意事项
- **temperature 参数**: MiniMax 要求 temperature 必须在 (0.0, 1.0] 范围内,不能设置为 0。建议使用 1.0。
- 两个模型都支持 204K 超长上下文窗口,适合处理长文本任务。
### 定价
- 请访问 [MiniMax 定价页面](https://platform.minimaxi.com/document/Price) 查看最新价格
---
## ⚙️ 高级配置 | Advanced Configuration
### 自定义参数 | Custom Parameters
@ -268,8 +315,8 @@ docker compose logs backend | grep -i "error"
|---------|---------|------|
| **文档摘要** | Qwen-Plus, GLM-4 | 平衡性能和成本 |
| **代码分析** | DeepSeek-Coder | 代码专用 |
| **长文本处理** | Kimi 128K | 超长上下文 |
| **快速响应** | Qwen-Turbo, GLM-4-Flash | 速度优先 |
| **长文本处理** | Kimi 128K, MiniMax-M2.5 (204K) | 超长上下文 |
| **快速响应** | Qwen-Turbo, GLM-4-Flash, MiniMax-M2.5-highspeed | 速度优先 |
### 2. 成本优化
@ -294,6 +341,7 @@ docker compose logs backend | grep -i "error"
- [阿里云百炼文档](https://help.aliyun.com/zh/model-studio/)
- [Moonshot AI 文档](https://platform.moonshot.cn/docs)
- [智谱 AI 文档](https://open.bigmodel.cn/dev/api)
- [MiniMax 文档](https://platform.minimaxi.com/document/Guides)
### SurfSense 文档

View file

@ -0,0 +1,23 @@
"""Add MINIMAX to LiteLLMProvider enum
Revision ID: 106
Revises: 105
"""
from collections.abc import Sequence
from alembic import op
revision: str = "106"
down_revision: str | None = "105"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
op.execute("COMMIT")
op.execute("ALTER TYPE litellmprovider ADD VALUE IF NOT EXISTS 'MINIMAX'")
def downgrade() -> None:
pass

View file

@ -59,6 +59,7 @@ PROVIDER_MAP = {
"DATABRICKS": "databricks",
"COMETAPI": "cometapi",
"HUGGINGFACE": "huggingface",
"MINIMAX": "openai",
"CUSTOM": "custom",
}

View file

@ -183,6 +183,23 @@ global_llm_configs:
use_default_system_instructions: true
citations_enabled: true
# Example: MiniMax M2.5 - High-performance with 204K context window
- id: -8
name: "Global MiniMax M2.5"
description: "MiniMax M2.5 with 204K context window and competitive pricing"
provider: "MINIMAX"
model_name: "MiniMax-M2.5"
api_key: "your-minimax-api-key-here"
api_base: "https://api.minimax.io/v1"
rpm: 60
tpm: 100000
litellm_params:
temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0], cannot be 0
max_tokens: 4000
system_instructions: ""
use_default_system_instructions: true
citations_enabled: true
# =============================================================================
# Image Generation Configuration
# =============================================================================

View file

@ -463,7 +463,7 @@ async def _process_gmail_messages_phase2(
"connector_id": connector_id,
"source": "composio",
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -477,7 +477,7 @@ async def index_composio_google_calendar(
"connector_id": connector_id,
"source": "composio",
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -1112,7 +1112,7 @@ async def _index_composio_drive_delta_sync(
"connector_id": connector_id,
"source": "composio",
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()
@ -1520,7 +1520,7 @@ async def _index_composio_drive_full_scan(
"connector_id": connector_id,
"source": "composio",
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -215,6 +215,7 @@ class LiteLLMProvider(StrEnum):
COMETAPI = "COMETAPI"
HUGGINGFACE = "HUGGINGFACE"
GITHUB_MODELS = "GITHUB_MODELS"
MINIMAX = "MINIMAX"
CUSTOM = "CUSTOM"

View file

@ -1,3 +1,4 @@
import asyncio
import time
from datetime import datetime
@ -49,7 +50,7 @@ class ChucksHybridSearchRetriever:
# Get embedding for the query
embedding_model = config.embedding_model_instance
t_embed = time.perf_counter()
query_embedding = embedding_model.embed(query_text)
query_embedding = await asyncio.to_thread(embedding_model.embed, query_text)
perf.debug(
"[chunk_search] vector_search embedding in %.3fs",
time.perf_counter() - t_embed,
@ -195,7 +196,7 @@ class ChucksHybridSearchRetriever:
if query_embedding is None:
embedding_model = config.embedding_model_instance
t_embed = time.perf_counter()
query_embedding = embedding_model.embed(query_text)
query_embedding = await asyncio.to_thread(embedding_model.embed, query_text)
perf.debug(
"[chunk_search] hybrid_search embedding in %.3fs",
time.perf_counter() - t_embed,
@ -427,4 +428,4 @@ class ChucksHybridSearchRetriever:
search_space_id,
document_type,
)
return final_docs
return final_docs

View file

@ -1,11 +1,10 @@
import logging
from datetime import datetime
from sqlalchemy import delete
from sqlalchemy.ext.asyncio import AsyncSession
from app.connectors.linear_connector import LinearConnector
from app.db import Chunk, Document
from app.db import Document
from app.services.llm_service import get_user_long_context_llm
from app.utils.document_converters import (
create_document_chunks,
@ -105,10 +104,6 @@ class LinearKBSyncService:
)
summary_embedding = embed_text(summary_content)
await self.db_session.execute(
delete(Chunk).where(Chunk.document_id == document.id)
)
chunks = await create_document_chunks(issue_content)
document.title = f"{issue_identifier}: {issue_title}"
@ -131,7 +126,7 @@ class LinearKBSyncService:
"connector_id": connector_id,
}
flag_modified(document, "document_metadata")
safe_set_chunks(document, chunks)
await safe_set_chunks(self.db_session, document, chunks)
document.updated_at = get_current_timestamp()
await self.db_session.commit()

View file

@ -85,6 +85,7 @@ PROVIDER_MAP = {
"ZHIPU": "openai",
"GITHUB_MODELS": "github",
"HUGGINGFACE": "huggingface",
"MINIMAX": "openai",
"CUSTOM": "custom",
}

View file

@ -127,6 +127,7 @@ async def validate_llm_config(
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai", # GLM needs special handling
"MINIMAX": "openai",
"GITHUB_MODELS": "github",
}
provider_prefix = provider_map.get(provider, provider.lower())
@ -277,6 +278,7 @@ async def get_search_space_llm_instance(
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai",
"MINIMAX": "openai",
}
provider_prefix = provider_map.get(
global_config["provider"], global_config["provider"].lower()
@ -350,6 +352,7 @@ async def get_search_space_llm_instance(
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai",
"MINIMAX": "openai",
"GITHUB_MODELS": "github",
}
provider_prefix = provider_map.get(

View file

@ -1,10 +1,9 @@
import logging
from datetime import datetime
from sqlalchemy import delete
from sqlalchemy.ext.asyncio import AsyncSession
from app.db import Chunk, Document
from app.db import Document
from app.services.llm_service import get_user_long_context_llm
from app.utils.document_converters import (
create_document_chunks,
@ -130,11 +129,6 @@ class NotionKBSyncService:
summary_content = f"Notion Page: {document.document_metadata.get('page_title')}\n\n{full_content}"
summary_embedding = embed_text(summary_content)
logger.debug(f"Deleting old chunks for document {document_id}")
await self.db_session.execute(
delete(Chunk).where(Chunk.document_id == document.id)
)
logger.debug("Creating new chunks")
chunks = await create_document_chunks(full_content)
logger.debug(f"Created {len(chunks)} chunks")
@ -147,7 +141,7 @@ class NotionKBSyncService:
**document.document_metadata,
"indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
}
safe_set_chunks(document, chunks)
await safe_set_chunks(self.db_session, document, chunks)
document.updated_at = get_current_timestamp()
logger.debug("Committing changes to database")

View file

@ -432,7 +432,7 @@ async def index_airtable_records(
"table_name": item["table_name"],
"connector_id": connector_id,
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -28,45 +28,37 @@ def get_current_timestamp() -> datetime:
return datetime.now(UTC)
def safe_set_chunks(document: Document, chunks: list) -> None:
async def safe_set_chunks(
session: "AsyncSession", document: Document, chunks: list
) -> None:
"""
Safely assign chunks to a document without triggering lazy loading.
Delete old chunks and assign new ones to a document.
ALWAYS use this instead of `document.chunks = chunks` to avoid
SQLAlchemy async errors (MissingGreenlet / greenlet_spawn).
Why this is needed:
- Direct assignment `document.chunks = chunks` triggers SQLAlchemy to
load the OLD chunks first (for comparison/orphan detection)
- This lazy loading fails in async context with asyncpg driver
- set_committed_value bypasses this by setting the value directly
This function is safe regardless of how the document was loaded
(with or without selectinload).
This replaces direct ``document.chunks = chunks`` which triggers lazy
loading (and MissingGreenlet errors in async contexts). It also
explicitly deletes pre-existing chunks so they don't accumulate across
repeated re-indexes ``set_committed_value`` bypasses SQLAlchemy's
delete-orphan cascade.
Args:
document: The Document object to update
chunks: List of Chunk objects to assign
Example:
# Instead of: document.chunks = chunks (DANGEROUS!)
safe_set_chunks(document, chunks) # Always safe
session: The current async database session.
document: The Document object to update.
chunks: List of Chunk objects to assign.
"""
from sqlalchemy.orm import object_session
from sqlalchemy import delete
from sqlalchemy.orm.attributes import set_committed_value
# Keep relationship assignment lazy-load-safe.
set_committed_value(document, "chunks", chunks)
from app.db import Chunk
# Ensure chunk rows are actually persisted.
# set_committed_value bypasses normal unit-of-work tracking, so we need to
# explicitly attach chunk objects to the current session.
session = object_session(document)
if session is not None:
if document.id is not None:
for chunk in chunks:
chunk.document_id = document.id
session.add_all(chunks)
if document.id is not None:
await session.execute(
delete(Chunk).where(Chunk.document_id == document.id)
)
for chunk in chunks:
chunk.document_id = document.id
set_committed_value(document, "chunks", chunks)
session.add_all(chunks)
def parse_date_flexible(date_str: str) -> datetime:

View file

@ -430,7 +430,7 @@ async def index_bookstack_pages(
document.content_hash = item["content_hash"]
document.embedding = summary_embedding
document.document_metadata = doc_metadata
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -439,7 +439,7 @@ async def index_clickup_tasks(
"connector_id": connector_id,
"indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -413,7 +413,7 @@ async def index_confluence_pages(
"indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"connector_id": connector_id,
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -690,7 +690,7 @@ async def index_discord_messages(
"indexed_at": datetime.now(UTC).strftime("%Y-%m-%d %H:%M:%S"),
"connector_id": connector_id,
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -386,7 +386,7 @@ async def index_elasticsearch_documents(
document.content_hash = item["content_hash"]
document.unique_identifier_hash = item["unique_identifier_hash"]
document.document_metadata = metadata
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -415,7 +415,7 @@ async def index_github_repos(
document.content_hash = item["content_hash"]
document.embedding = summary_embedding
document.document_metadata = doc_metadata
safe_set_chunks(document, chunks_data)
await safe_set_chunks(session, document, chunks_data)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -528,7 +528,7 @@ async def index_google_calendar_events(
"indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"connector_id": connector_id,
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -451,7 +451,7 @@ async def index_google_gmail_messages(
"date": item["date_str"],
"connector_id": connector_id,
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -393,7 +393,7 @@ async def index_jira_issues(
"indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"connector_id": connector_id,
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -431,7 +431,7 @@ async def index_linear_issues(
"indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"connector_id": connector_id,
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -488,7 +488,7 @@ async def index_luma_events(
"indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"connector_id": connector_id,
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -479,7 +479,7 @@ async def index_notion_pages(
"indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"connector_id": connector_id,
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -571,7 +571,7 @@ async def index_obsidian_vault(
document.content_hash = content_hash
document.embedding = embedding
document.document_metadata = document_metadata
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -564,7 +564,7 @@ async def index_slack_messages(
"indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"connector_id": connector_id,
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -603,7 +603,7 @@ async def index_teams_messages(
"indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"connector_id": connector_id,
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.updated_at = get_current_timestamp()
document.status = DocumentStatus.ready()

View file

@ -410,7 +410,7 @@ async def index_crawled_urls(
"indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"connector_id": connector_id,
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.status = DocumentStatus.ready() # READY status
document.updated_at = get_current_timestamp()

View file

@ -14,45 +14,37 @@ from app.db import Document
md = MarkdownifyTransformer()
def safe_set_chunks(document: Document, chunks: list) -> None:
async def safe_set_chunks(
session: "AsyncSession", document: Document, chunks: list
) -> None:
"""
Safely assign chunks to a document without triggering lazy loading.
Delete old chunks and assign new ones to a document.
ALWAYS use this instead of `document.chunks = chunks` to avoid
SQLAlchemy async errors (MissingGreenlet / greenlet_spawn).
Why this is needed:
- Direct assignment `document.chunks = chunks` triggers SQLAlchemy to
load the OLD chunks first (for comparison/orphan detection)
- This lazy loading fails in async context with asyncpg driver
- set_committed_value bypasses this by setting the value directly
This function is safe regardless of how the document was loaded
(with or without selectinload).
This replaces direct ``document.chunks = chunks`` which triggers lazy
loading (and MissingGreenlet errors in async contexts). It also
explicitly deletes pre-existing chunks so they don't accumulate across
repeated re-indexes ``set_committed_value`` bypasses SQLAlchemy's
delete-orphan cascade.
Args:
document: The Document object to update
chunks: List of Chunk objects to assign
Example:
# Instead of: document.chunks = chunks (DANGEROUS!)
safe_set_chunks(document, chunks) # Always safe
session: The current async database session.
document: The Document object to update.
chunks: List of Chunk objects to assign.
"""
from sqlalchemy.orm import object_session
from sqlalchemy import delete
from sqlalchemy.orm.attributes import set_committed_value
# Keep relationship assignment lazy-load-safe.
set_committed_value(document, "chunks", chunks)
from app.db import Chunk
# Ensure chunk rows are actually persisted.
# set_committed_value bypasses normal unit-of-work tracking, so we need to
# explicitly attach chunk objects to the current session.
session = object_session(document)
if session is not None:
if document.id is not None:
for chunk in chunks:
chunk.document_id = document.id
session.add_all(chunks)
if document.id is not None:
await session.execute(
delete(Chunk).where(Chunk.document_id == document.id)
)
for chunk in chunks:
chunk.document_id = document.id
set_committed_value(document, "chunks", chunks)
session.add_all(chunks)
def get_current_timestamp() -> datetime:

View file

@ -227,7 +227,7 @@ async def add_circleback_meeting_document(
if summary_embedding is not None:
document.embedding = summary_embedding
document.document_metadata = document_metadata
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.source_markdown = markdown_content
document.content_needs_reindexing = False
document.updated_at = get_current_timestamp()

View file

@ -21,6 +21,7 @@ from app.utils.document_converters import (
from .base import (
check_document_by_unique_identifier,
get_current_timestamp,
safe_set_chunks,
)
@ -154,7 +155,7 @@ async def add_extension_received_document(
existing_document.content_hash = content_hash
existing_document.embedding = summary_embedding
existing_document.document_metadata = content.metadata.model_dump()
existing_document.chunks = chunks
await safe_set_chunks(session, existing_document, chunks)
existing_document.source_markdown = combined_document_string
existing_document.updated_at = get_current_timestamp()

View file

@ -35,6 +35,7 @@ from .base import (
check_document_by_unique_identifier,
check_duplicate_document,
get_current_timestamp,
safe_set_chunks,
)
from .markdown_processor import add_received_markdown_file_document
@ -488,7 +489,7 @@ async def add_received_file_document_using_unstructured(
"FILE_NAME": file_name,
"ETL_SERVICE": "UNSTRUCTURED",
}
existing_document.chunks = chunks
await safe_set_chunks(session, existing_document, chunks)
existing_document.source_markdown = file_in_markdown
existing_document.content_needs_reindexing = False
existing_document.updated_at = get_current_timestamp()
@ -622,7 +623,7 @@ async def add_received_file_document_using_llamacloud(
"FILE_NAME": file_name,
"ETL_SERVICE": "LLAMACLOUD",
}
existing_document.chunks = chunks
await safe_set_chunks(session, existing_document, chunks)
existing_document.source_markdown = file_in_markdown
existing_document.content_needs_reindexing = False
existing_document.updated_at = get_current_timestamp()
@ -777,7 +778,7 @@ async def add_received_file_document_using_docling(
"FILE_NAME": file_name,
"ETL_SERVICE": "DOCLING",
}
existing_document.chunks = chunks
await safe_set_chunks(session, existing_document, chunks)
existing_document.source_markdown = file_in_markdown
existing_document.content_needs_reindexing = False
existing_document.updated_at = get_current_timestamp()

View file

@ -21,6 +21,7 @@ from .base import (
check_document_by_unique_identifier,
check_duplicate_document,
get_current_timestamp,
safe_set_chunks,
)
@ -258,7 +259,7 @@ async def add_received_markdown_file_document(
existing_document.document_metadata = {
"FILE_NAME": file_name,
}
existing_document.chunks = chunks
await safe_set_chunks(session, existing_document, chunks)
existing_document.source_markdown = file_in_markdown
existing_document.updated_at = get_current_timestamp()
existing_document.status = DocumentStatus.ready() # Mark as ready

View file

@ -419,7 +419,7 @@ async def add_youtube_video_document(
"author": video_data.get("author_name", "Unknown"),
"thumbnail": video_data.get("thumbnail_url", ""),
}
safe_set_chunks(document, chunks)
await safe_set_chunks(session, document, chunks)
document.source_markdown = combined_document_string
document.status = DocumentStatus.ready() # READY status - fully processed
document.updated_at = get_current_timestamp()

View file

@ -13,12 +13,32 @@ from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
from sqlalchemy import delete as sa_delete
from sqlalchemy.orm.attributes import set_committed_value
from app.config import config
from app.db import SurfsenseDocsChunk, SurfsenseDocsDocument, async_session_maker
from app.utils.document_converters import embed_text
logger = logging.getLogger(__name__)
async def _safe_set_docs_chunks(
session: AsyncSession, document: SurfsenseDocsDocument, chunks: list
) -> None:
"""safe_set_chunks variant for the SurfsenseDocsDocument/Chunk models."""
if document.id is not None:
await session.execute(
sa_delete(SurfsenseDocsChunk).where(
SurfsenseDocsChunk.document_id == document.id
)
)
for chunk in chunks:
chunk.document_id = document.id
set_committed_value(document, "chunks", chunks)
session.add_all(chunks)
# Path to docs relative to project root
DOCS_DIR = (
Path(__file__).resolve().parent.parent.parent.parent
@ -156,7 +176,7 @@ async def index_surfsense_docs(session: AsyncSession) -> tuple[int, int, int, in
existing_doc.content = content
existing_doc.content_hash = content_hash
existing_doc.embedding = embed_text(content)
existing_doc.chunks = chunks
await _safe_set_docs_chunks(session, existing_doc, chunks)
existing_doc.updated_at = datetime.now(UTC)
updated += 1

View file

@ -19,10 +19,12 @@ import {
ChevronRightIcon,
CopyIcon,
DownloadIcon,
Plus,
RefreshCwIcon,
Settings2,
SquareIcon,
Unplug,
Wrench,
Upload,
X,
} from "lucide-react";
import { useParams } from "next/navigation";
@ -53,6 +55,7 @@ import { currentUserAtom } from "@/atoms/user/user-query.atoms";
import { AssistantMessage } from "@/components/assistant-ui/assistant-message";
import { ChatSessionStatus } from "@/components/assistant-ui/chat-session-status";
import { ConnectorIndicator } from "@/components/assistant-ui/connector-popup";
import { useDocumentUploadDialog } from "@/components/assistant-ui/document-upload-popup";
import {
InlineMentionEditor,
type InlineMentionEditorRef,
@ -73,6 +76,13 @@ import {
import type { ThinkingStep } from "@/components/tool-ui/deepagent-thinking";
import { Avatar, AvatarFallback, AvatarGroup } from "@/components/ui/avatar";
import { Button } from "@/components/ui/button";
import { Drawer, DrawerContent, DrawerHandle, DrawerTitle } from "@/components/ui/drawer";
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu";
import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover";
import { Switch } from "@/components/ui/switch";
import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip";
@ -278,21 +288,14 @@ const ConnectToolsBanner: FC = () => {
</Avatar>
))}
</AvatarGroup>
<span
role="button"
tabIndex={0}
<button
type="button"
onClick={handleDismiss}
onKeyDown={(e) => {
if (e.key === "Enter" || e.key === " ") {
e.preventDefault();
handleDismiss(e as unknown as React.MouseEvent);
}
}}
className="shrink-0 ml-0.5 p-0.5 text-muted-foreground/40 hover:text-foreground transition-colors"
aria-label="Dismiss"
>
<X className="size-3.5" />
</span>
</button>
</button>
</div>
);
@ -564,6 +567,7 @@ const ComposerAction: FC<ComposerActionProps> = ({ isBlockedByOtherUser = false
const setConnectorDialogOpen = useSetAtom(connectorDialogOpenAtom);
const [toolsPopoverOpen, setToolsPopoverOpen] = useState(false);
const isDesktop = useMediaQuery("(min-width: 640px)");
const { openDialog: openUploadDialog } = useDocumentUploadDialog();
const [toolsScrollPos, setToolsScrollPos] = useState<"top" | "middle" | "bottom">("top");
const handleToolsScroll = useCallback((e: React.UIEvent<HTMLDivElement>) => {
const el = e.currentTarget;
@ -607,87 +611,144 @@ const ComposerAction: FC<ComposerActionProps> = ({ isBlockedByOtherUser = false
return (
<div className="aui-composer-action-wrapper relative mx-3 mb-2 flex items-center justify-between">
<div className="flex items-center gap-1">
<Popover open={toolsPopoverOpen} onOpenChange={setToolsPopoverOpen}>
<PopoverTrigger asChild>
<TooltipIconButton
tooltip="Manage tools"
side="bottom"
{!isDesktop ? (
<>
<DropdownMenu>
<DropdownMenuTrigger asChild>
<Button
variant="ghost"
size="icon"
className="size-[34px] rounded-full p-1 font-semibold text-xs hover:bg-muted-foreground/15 dark:border-muted-foreground/15 dark:hover:bg-muted-foreground/30"
aria-label="More actions"
data-joyride="connector-icon"
>
<Plus className="size-4" />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent side="top" align="start" sideOffset={8}>
<DropdownMenuItem onSelect={() => setToolsPopoverOpen(true)}>
<Settings2 className="size-4" />
Manage Tools
</DropdownMenuItem>
<DropdownMenuItem onSelect={() => openUploadDialog()}>
<Upload className="size-4" />
Upload Files
</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>
<Drawer open={toolsPopoverOpen} onOpenChange={setToolsPopoverOpen}>
<DrawerContent className="max-h-[60dvh]">
<DrawerHandle />
<div className="flex items-center justify-between px-4 py-2">
<DrawerTitle className="text-sm font-medium">Agent Tools</DrawerTitle>
<span className="text-xs text-muted-foreground">
{enabledCount}/{agentTools?.length ?? 0} enabled
</span>
</div>
<div className="overflow-y-auto pb-6" onScroll={handleToolsScroll}>
{agentTools?.map((tool) => {
const isDisabled = disabledTools.includes(tool.name);
return (
<div
key={tool.name}
className="flex w-full items-center gap-3 px-4 py-2 hover:bg-muted-foreground/10 transition-colors"
>
<span className="flex-1 min-w-0 text-sm font-medium truncate">
{formatToolName(tool.name)}
</span>
<Switch
checked={!isDisabled}
onCheckedChange={() => toggleTool(tool.name)}
className="shrink-0"
/>
</div>
);
})}
{!agentTools?.length && (
<div className="px-4 py-6 text-center text-sm text-muted-foreground">
Loading tools...
</div>
)}
</div>
</DrawerContent>
</Drawer>
<Button
variant="ghost"
size="icon"
className="size-[34px] rounded-full p-1 font-semibold text-xs hover:bg-muted-foreground/15 dark:border-muted-foreground/15 dark:hover:bg-muted-foreground/30"
aria-label="Manage tools"
data-joyride="connector-icon"
aria-label="Manage connectors"
onClick={() => setConnectorDialogOpen(true)}
>
<Wrench className="size-4" />
</TooltipIconButton>
</PopoverTrigger>
<PopoverContent
side="bottom"
align="start"
sideOffset={12}
className="w-[calc(100vw-2rem)] max-w-56 sm:max-w-72 sm:w-72 p-0 select-none"
onOpenAutoFocus={(e) => e.preventDefault()}
>
<div className="flex items-center justify-between px-2.5 py-2 sm:px-3 sm:py-2.5 border-b">
<span className="text-xs sm:text-sm font-medium">Agent Tools</span>
<span className="text-[10px] sm:text-xs text-muted-foreground">
{enabledCount}/{agentTools?.length ?? 0} enabled
</span>
</div>
<div
className="max-h-48 sm:max-h-64 overflow-y-auto py-0.5 sm:py-1"
onScroll={handleToolsScroll}
style={{
maskImage: `linear-gradient(to bottom, ${toolsScrollPos === "top" ? "black" : "transparent"}, black 16px, black calc(100% - 16px), ${toolsScrollPos === "bottom" ? "black" : "transparent"})`,
WebkitMaskImage: `linear-gradient(to bottom, ${toolsScrollPos === "top" ? "black" : "transparent"}, black 16px, black calc(100% - 16px), ${toolsScrollPos === "bottom" ? "black" : "transparent"})`,
}}
<Unplug className="size-4" />
</Button>
</>
) : (
<Popover open={toolsPopoverOpen} onOpenChange={setToolsPopoverOpen}>
<PopoverTrigger asChild>
<TooltipIconButton
tooltip="Manage tools"
side="bottom"
variant="ghost"
size="icon"
className="size-[34px] rounded-full p-1 font-semibold text-xs hover:bg-muted-foreground/15 dark:border-muted-foreground/15 dark:hover:bg-muted-foreground/30"
aria-label="Manage tools"
data-joyride="connector-icon"
>
<Settings2 className="size-4" />
</TooltipIconButton>
</PopoverTrigger>
<PopoverContent
side="bottom"
align="start"
sideOffset={12}
className="w-[calc(100vw-2rem)] max-w-56 sm:max-w-72 sm:w-72 p-0 select-none"
onOpenAutoFocus={(e) => e.preventDefault()}
>
{agentTools?.map((tool) => {
const isDisabled = disabledTools.includes(tool.name);
const row = (
<label className="flex items-center gap-2 sm:gap-3 px-2.5 sm:px-3 py-1 sm:py-1.5 cursor-pointer hover:bg-muted-foreground/10 transition-colors">
<span className="flex-1 min-w-0 text-xs sm:text-sm font-medium truncate">
{formatToolName(tool.name)}
</span>
<Switch
checked={!isDisabled}
onCheckedChange={() => toggleTool(tool.name)}
className="shrink-0 scale-[0.6] sm:scale-75"
/>
</label>
);
if (!isDesktop) {
return <div key={tool.name}>{row}</div>;
}
return (
<Tooltip key={tool.name}>
<TooltipTrigger asChild>{row}</TooltipTrigger>
<TooltipContent side="right" className="max-w-64 text-xs">
{tool.description}
</TooltipContent>
</Tooltip>
);
})}
{!agentTools?.length && (
<div className="px-3 py-4 text-center text-xs text-muted-foreground">
Loading tools...
</div>
)}
</div>
</PopoverContent>
</Popover>
{!isDesktop && (
<TooltipIconButton
tooltip="Manage connectors"
side="bottom"
variant="ghost"
size="icon"
className="size-[34px] rounded-full p-1 font-semibold text-xs hover:bg-muted-foreground/15 dark:border-muted-foreground/15 dark:hover:bg-muted-foreground/30"
aria-label="Manage connectors"
onClick={() => setConnectorDialogOpen(true)}
>
<Unplug className="size-4" />
</TooltipIconButton>
<div className="flex items-center justify-between px-2.5 py-2 sm:px-3 sm:py-2.5 border-b">
<span className="text-xs sm:text-sm font-medium">Agent Tools</span>
<span className="text-[10px] sm:text-xs text-muted-foreground">
{enabledCount}/{agentTools?.length ?? 0} enabled
</span>
</div>
<div
className="max-h-48 sm:max-h-64 overflow-y-auto py-0.5 sm:py-1"
onScroll={handleToolsScroll}
style={{
maskImage: `linear-gradient(to bottom, ${toolsScrollPos === "top" ? "black" : "transparent"}, black 16px, black calc(100% - 16px), ${toolsScrollPos === "bottom" ? "black" : "transparent"})`,
WebkitMaskImage: `linear-gradient(to bottom, ${toolsScrollPos === "top" ? "black" : "transparent"}, black 16px, black calc(100% - 16px), ${toolsScrollPos === "bottom" ? "black" : "transparent"})`,
}}
>
{agentTools?.map((tool) => {
const isDisabled = disabledTools.includes(tool.name);
const row = (
<div className="flex w-full items-center gap-2 sm:gap-3 px-2.5 sm:px-3 py-1 sm:py-1.5 hover:bg-muted-foreground/10 transition-colors">
<span className="flex-1 min-w-0 text-xs sm:text-sm font-medium truncate">
{formatToolName(tool.name)}
</span>
<Switch
checked={!isDisabled}
onCheckedChange={() => toggleTool(tool.name)}
className="shrink-0 scale-[0.6] sm:scale-75"
/>
</div>
);
return (
<Tooltip key={tool.name}>
<TooltipTrigger asChild>{row}</TooltipTrigger>
<TooltipContent side="right" className="max-w-64 text-xs">
{tool.description}
</TooltipContent>
</Tooltip>
);
})}
{!agentTools?.length && (
<div className="px-3 py-4 text-center text-xs text-muted-foreground">
Loading tools...
</div>
)}
</div>
</PopoverContent>
</Popover>
)}
{sidebarDocs.length > 0 && (
<button

View file

@ -12,6 +12,7 @@ export { default as FireworksAiIcon } from "./fireworksai.svg";
export { default as GeminiIcon } from "./gemini.svg";
export { default as GroqIcon } from "./groq.svg";
export { default as HuggingFaceIcon } from "./huggingface.svg";
export { default as MiniMaxIcon } from "./minimax.svg";
export { default as MistralIcon } from "./mistral.svg";
export { default as MoonshotIcon } from "./moonshot.svg";
export { default as NscaleIcon } from "./nscale.svg";

View file

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path fill="currentColor" d="M21.6 4.8h-2.4l-4.2 7.2-3-5.16h-.01L9 12 4.8 4.8H2.4L9 16.14V20.4h2.4v-4.2h1.2v4.2h2.4v-4.26z"/></svg>

After

Width:  |  Height:  |  Size: 192 B

View file

@ -1525,6 +1525,20 @@ export const LLM_MODELS: LLMModel[] = [
provider: "GITHUB_MODELS",
contextWindow: "64K",
},
// MiniMax
{
value: "MiniMax-M2.5",
label: "MiniMax M2.5",
provider: "MINIMAX",
contextWindow: "204K",
},
{
value: "MiniMax-M2.5-highspeed",
label: "MiniMax M2.5 Highspeed",
provider: "MINIMAX",
contextWindow: "204K",
},
];
// Helper function to get models by provider

View file

@ -181,6 +181,13 @@ export const LLM_PROVIDERS: LLMProvider[] = [
description: "AI models from GitHub Marketplace",
apiBase: "https://models.github.ai/inference",
},
{
value: "MINIMAX",
label: "MiniMax",
example: "MiniMax-M2.5, MiniMax-M2.5-highspeed",
description: "High-performance models with 204K context",
apiBase: "https://api.minimax.io/v1",
},
{
value: "CUSTOM",
label: "Custom Provider",

View file

@ -34,6 +34,7 @@ export const liteLLMProviderEnum = z.enum([
"COMETAPI",
"HUGGINGFACE",
"GITHUB_MODELS",
"MINIMAX",
"CUSTOM",
]);

View file

@ -15,6 +15,7 @@ import {
GeminiIcon,
GroqIcon,
HuggingFaceIcon,
MiniMaxIcon,
MistralIcon,
MoonshotIcon,
NscaleIcon,
@ -85,6 +86,8 @@ export function getProviderIcon(
return <GroqIcon className={cn(className)} />;
case "HUGGINGFACE":
return <HuggingFaceIcon className={cn(className)} />;
case "MINIMAX":
return <MiniMaxIcon className={cn(className)} />;
case "MISTRAL":
return <MistralIcon className={cn(className)} />;
case "MOONSHOT":