diff --git a/docs/chinese-llm-setup.md b/docs/chinese-llm-setup.md
index 2a184608f..37042aa2f 100644
--- a/docs/chinese-llm-setup.md
+++ b/docs/chinese-llm-setup.md
@@ -14,6 +14,7 @@ SurfSense 现已支持以下国产 LLM:
- ✅ **阿里通义千问 (Alibaba Qwen)** - 阿里云通义千问大模型
- ✅ **月之暗面 Kimi (Moonshot)** - 月之暗面 Kimi 大模型
- ✅ **智谱 AI GLM (Zhipu)** - 智谱 AI GLM 系列模型
+- ✅ **MiniMax** - MiniMax 大模型 (M2.5 系列,204K 上下文)
---
@@ -197,6 +198,52 @@ API Base URL: https://open.bigmodel.cn/api/paas/v4
---
+## 5️⃣ MiniMax 配置 | MiniMax Configuration
+
+### 获取 API Key
+
+1. 访问 [MiniMax 开放平台](https://platform.minimaxi.com/)
+2. 注册并登录账号
+3. 进入 **API Keys** 页面
+4. 创建新的 API Key
+5. 复制 API Key
+
+### 在 SurfSense 中配置
+
+| 字段 | 值 | 说明 |
+|------|-----|------|
+| **Configuration Name** | `MiniMax M2.5` | 配置名称(自定义) |
+| **Provider** | `MINIMAX` | 选择 MiniMax |
+| **Model Name** | `MiniMax-M2.5` | 推荐模型
其他选项: `MiniMax-M2.5-highspeed` |
+| **API Key** | `eyJ...` | 你的 MiniMax API Key |
+| **API Base URL** | `https://api.minimax.io/v1` | MiniMax API 地址 |
+| **Parameters** | `{"temperature": 1.0}` | 注意:temperature 必须在 (0.0, 1.0] 范围内,不能为 0 |
+
+### 示例配置
+
+```
+Configuration Name: MiniMax M2.5
+Provider: MINIMAX
+Model Name: MiniMax-M2.5
+API Key: eyJxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+API Base URL: https://api.minimax.io/v1
+```
+
+### 可用模型
+
+- **MiniMax-M2.5**: 高性能通用模型,204K 上下文窗口(推荐)
+- **MiniMax-M2.5-highspeed**: 高速推理版本,204K 上下文窗口
+
+### 注意事项
+
+- **temperature 参数**: MiniMax 要求 temperature 必须在 (0.0, 1.0] 范围内,不能设置为 0。建议使用 1.0。
+- 两个模型都支持 204K 超长上下文窗口,适合处理长文本任务。
+
+### 定价
+- 请访问 [MiniMax 定价页面](https://platform.minimaxi.com/document/Price) 查看最新价格
+
+---
+
## ⚙️ 高级配置 | Advanced Configuration
### 自定义参数 | Custom Parameters
@@ -268,8 +315,8 @@ docker compose logs backend | grep -i "error"
|---------|---------|------|
| **文档摘要** | Qwen-Plus, GLM-4 | 平衡性能和成本 |
| **代码分析** | DeepSeek-Coder | 代码专用 |
-| **长文本处理** | Kimi 128K | 超长上下文 |
-| **快速响应** | Qwen-Turbo, GLM-4-Flash | 速度优先 |
+| **长文本处理** | Kimi 128K, MiniMax-M2.5 (204K) | 超长上下文 |
+| **快速响应** | Qwen-Turbo, GLM-4-Flash, MiniMax-M2.5-highspeed | 速度优先 |
### 2. 成本优化
@@ -294,6 +341,7 @@ docker compose logs backend | grep -i "error"
- [阿里云百炼文档](https://help.aliyun.com/zh/model-studio/)
- [Moonshot AI 文档](https://platform.moonshot.cn/docs)
- [智谱 AI 文档](https://open.bigmodel.cn/dev/api)
+- [MiniMax 文档](https://platform.minimaxi.com/document/Guides)
### SurfSense 文档
diff --git a/surfsense_backend/alembic/versions/106_add_minimax_to_litellmprovider_enum.py b/surfsense_backend/alembic/versions/106_add_minimax_to_litellmprovider_enum.py
new file mode 100644
index 000000000..fed3bc7c3
--- /dev/null
+++ b/surfsense_backend/alembic/versions/106_add_minimax_to_litellmprovider_enum.py
@@ -0,0 +1,23 @@
+"""Add MINIMAX to LiteLLMProvider enum
+
+Revision ID: 106
+Revises: 105
+"""
+
+from collections.abc import Sequence
+
+from alembic import op
+
+revision: str = "106"
+down_revision: str | None = "105"
+branch_labels: str | Sequence[str] | None = None
+depends_on: str | Sequence[str] | None = None
+
+
+def upgrade() -> None:
+ op.execute("COMMIT")
+ op.execute("ALTER TYPE litellmprovider ADD VALUE IF NOT EXISTS 'MINIMAX'")
+
+
+def downgrade() -> None:
+ pass
diff --git a/surfsense_backend/app/agents/new_chat/llm_config.py b/surfsense_backend/app/agents/new_chat/llm_config.py
index 4ddb47330..60cd2a452 100644
--- a/surfsense_backend/app/agents/new_chat/llm_config.py
+++ b/surfsense_backend/app/agents/new_chat/llm_config.py
@@ -59,6 +59,7 @@ PROVIDER_MAP = {
"DATABRICKS": "databricks",
"COMETAPI": "cometapi",
"HUGGINGFACE": "huggingface",
+ "MINIMAX": "openai",
"CUSTOM": "custom",
}
diff --git a/surfsense_backend/app/config/global_llm_config.example.yaml b/surfsense_backend/app/config/global_llm_config.example.yaml
index 0bb00c398..6ca3e95e3 100644
--- a/surfsense_backend/app/config/global_llm_config.example.yaml
+++ b/surfsense_backend/app/config/global_llm_config.example.yaml
@@ -183,6 +183,23 @@ global_llm_configs:
use_default_system_instructions: true
citations_enabled: true
+ # Example: MiniMax M2.5 - High-performance with 204K context window
+ - id: -8
+ name: "Global MiniMax M2.5"
+ description: "MiniMax M2.5 with 204K context window and competitive pricing"
+ provider: "MINIMAX"
+ model_name: "MiniMax-M2.5"
+ api_key: "your-minimax-api-key-here"
+ api_base: "https://api.minimax.io/v1"
+ rpm: 60
+ tpm: 100000
+ litellm_params:
+ temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0], cannot be 0
+ max_tokens: 4000
+ system_instructions: ""
+ use_default_system_instructions: true
+ citations_enabled: true
+
# =============================================================================
# Image Generation Configuration
# =============================================================================
diff --git a/surfsense_backend/app/db.py b/surfsense_backend/app/db.py
index 062b11b3a..95ae8e728 100644
--- a/surfsense_backend/app/db.py
+++ b/surfsense_backend/app/db.py
@@ -215,6 +215,7 @@ class LiteLLMProvider(StrEnum):
COMETAPI = "COMETAPI"
HUGGINGFACE = "HUGGINGFACE"
GITHUB_MODELS = "GITHUB_MODELS"
+ MINIMAX = "MINIMAX"
CUSTOM = "CUSTOM"
diff --git a/surfsense_backend/app/services/llm_router_service.py b/surfsense_backend/app/services/llm_router_service.py
index 7a0b6e55b..63d8d10b9 100644
--- a/surfsense_backend/app/services/llm_router_service.py
+++ b/surfsense_backend/app/services/llm_router_service.py
@@ -85,6 +85,7 @@ PROVIDER_MAP = {
"ZHIPU": "openai",
"GITHUB_MODELS": "github",
"HUGGINGFACE": "huggingface",
+ "MINIMAX": "openai",
"CUSTOM": "custom",
}
diff --git a/surfsense_backend/app/services/llm_service.py b/surfsense_backend/app/services/llm_service.py
index fc28f477f..e11abd886 100644
--- a/surfsense_backend/app/services/llm_service.py
+++ b/surfsense_backend/app/services/llm_service.py
@@ -127,6 +127,7 @@ async def validate_llm_config(
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai", # GLM needs special handling
+ "MINIMAX": "openai",
"GITHUB_MODELS": "github",
}
provider_prefix = provider_map.get(provider, provider.lower())
@@ -277,6 +278,7 @@ async def get_search_space_llm_instance(
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai",
+ "MINIMAX": "openai",
}
provider_prefix = provider_map.get(
global_config["provider"], global_config["provider"].lower()
@@ -350,6 +352,7 @@ async def get_search_space_llm_instance(
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai",
+ "MINIMAX": "openai",
"GITHUB_MODELS": "github",
}
provider_prefix = provider_map.get(
diff --git a/surfsense_web/components/icons/providers/index.ts b/surfsense_web/components/icons/providers/index.ts
index 0a9bdcc66..2afed7fa5 100644
--- a/surfsense_web/components/icons/providers/index.ts
+++ b/surfsense_web/components/icons/providers/index.ts
@@ -12,6 +12,7 @@ export { default as FireworksAiIcon } from "./fireworksai.svg";
export { default as GeminiIcon } from "./gemini.svg";
export { default as GroqIcon } from "./groq.svg";
export { default as HuggingFaceIcon } from "./huggingface.svg";
+export { default as MiniMaxIcon } from "./minimax.svg";
export { default as MistralIcon } from "./mistral.svg";
export { default as MoonshotIcon } from "./moonshot.svg";
export { default as NscaleIcon } from "./nscale.svg";
diff --git a/surfsense_web/components/icons/providers/minimax.svg b/surfsense_web/components/icons/providers/minimax.svg
new file mode 100644
index 000000000..85ad3962b
--- /dev/null
+++ b/surfsense_web/components/icons/providers/minimax.svg
@@ -0,0 +1 @@
+
diff --git a/surfsense_web/contracts/enums/llm-models.ts b/surfsense_web/contracts/enums/llm-models.ts
index 91eb0cbd8..31097ca6e 100644
--- a/surfsense_web/contracts/enums/llm-models.ts
+++ b/surfsense_web/contracts/enums/llm-models.ts
@@ -1525,6 +1525,20 @@ export const LLM_MODELS: LLMModel[] = [
provider: "GITHUB_MODELS",
contextWindow: "64K",
},
+
+ // MiniMax
+ {
+ value: "MiniMax-M2.5",
+ label: "MiniMax M2.5",
+ provider: "MINIMAX",
+ contextWindow: "204K",
+ },
+ {
+ value: "MiniMax-M2.5-highspeed",
+ label: "MiniMax M2.5 Highspeed",
+ provider: "MINIMAX",
+ contextWindow: "204K",
+ },
];
// Helper function to get models by provider
diff --git a/surfsense_web/contracts/enums/llm-providers.ts b/surfsense_web/contracts/enums/llm-providers.ts
index ef03ca80c..ce2b6afe9 100644
--- a/surfsense_web/contracts/enums/llm-providers.ts
+++ b/surfsense_web/contracts/enums/llm-providers.ts
@@ -181,6 +181,13 @@ export const LLM_PROVIDERS: LLMProvider[] = [
description: "AI models from GitHub Marketplace",
apiBase: "https://models.github.ai/inference",
},
+ {
+ value: "MINIMAX",
+ label: "MiniMax",
+ example: "MiniMax-M2.5, MiniMax-M2.5-highspeed",
+ description: "High-performance models with 204K context",
+ apiBase: "https://api.minimax.io/v1",
+ },
{
value: "CUSTOM",
label: "Custom Provider",
diff --git a/surfsense_web/contracts/types/new-llm-config.types.ts b/surfsense_web/contracts/types/new-llm-config.types.ts
index 2814f2f25..3bb43680a 100644
--- a/surfsense_web/contracts/types/new-llm-config.types.ts
+++ b/surfsense_web/contracts/types/new-llm-config.types.ts
@@ -34,6 +34,7 @@ export const liteLLMProviderEnum = z.enum([
"COMETAPI",
"HUGGINGFACE",
"GITHUB_MODELS",
+ "MINIMAX",
"CUSTOM",
]);
diff --git a/surfsense_web/lib/provider-icons.tsx b/surfsense_web/lib/provider-icons.tsx
index e405b7766..d017d9aa2 100644
--- a/surfsense_web/lib/provider-icons.tsx
+++ b/surfsense_web/lib/provider-icons.tsx
@@ -15,6 +15,7 @@ import {
GeminiIcon,
GroqIcon,
HuggingFaceIcon,
+ MiniMaxIcon,
MistralIcon,
MoonshotIcon,
NscaleIcon,
@@ -85,6 +86,8 @@ export function getProviderIcon(
return ;
case "HUGGINGFACE":
return ;
+ case "MINIMAX":
+ return ;
case "MISTRAL":
return ;
case "MOONSHOT":