diff --git a/docs/chinese-llm-setup.md b/docs/chinese-llm-setup.md
index 1fb0ce2a..6638dbba 100644
--- a/docs/chinese-llm-setup.md
+++ b/docs/chinese-llm-setup.md
@@ -24,7 +24,7 @@ SurfSense 现已支持以下国产 LLM:
1. 登录 SurfSense Dashboard
2. 进入 **Settings** → **API Keys** (或 **LLM Configurations**)
-3. 点击 **Add LLM Model**
+3. 点击 **Add Model**
4. 从 **Provider** 下拉菜单中选择你的国产 LLM 提供商
5. 填写必填字段(见下方各提供商详细配置)
6. 点击 **Save**
diff --git a/surfsense_backend/alembic/versions/51_add_new_llm_config_table.py b/surfsense_backend/alembic/versions/51_add_new_llm_config_table.py
index 89a5c124..7d90f4b1 100644
--- a/surfsense_backend/alembic/versions/51_add_new_llm_config_table.py
+++ b/surfsense_backend/alembic/versions/51_add_new_llm_config_table.py
@@ -17,10 +17,10 @@ depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
"""
- Add the new_llm_configs table that combines LLM model settings with prompt configuration.
+ Add the new_llm_configs table that combines model settings with prompt configuration.
This table includes:
- - LLM model configuration (provider, model_name, api_key, etc.)
+ - Model configuration (provider, model_name, api_key, etc.)
- Configurable system instructions
- Citation toggle
"""
@@ -41,7 +41,7 @@ def upgrade() -> None:
name VARCHAR(100) NOT NULL,
description VARCHAR(500),
- -- LLM Model Configuration (same as llm_configs, excluding language)
+ -- Model Configuration (same as llm_configs, excluding language)
provider litellmprovider NOT NULL,
custom_provider VARCHAR(100),
model_name VARCHAR(100) NOT NULL,
diff --git a/surfsense_backend/app/config/global_llm_config.example.yaml b/surfsense_backend/app/config/global_llm_config.example.yaml
index 6ca3e95e..49a8d029 100644
--- a/surfsense_backend/app/config/global_llm_config.example.yaml
+++ b/surfsense_backend/app/config/global_llm_config.example.yaml
@@ -17,7 +17,7 @@
# - Configure router_settings below to customize the load balancing behavior
#
# Structure matches NewLLMConfig:
-# - LLM model configuration (provider, model_name, api_key, etc.)
+# - Model configuration (provider, model_name, api_key, etc.)
# - Prompt configuration (system_instructions, citations_enabled)
# Router Settings for Auto Mode
diff --git a/surfsense_backend/app/indexing_pipeline/exceptions.py b/surfsense_backend/app/indexing_pipeline/exceptions.py
index 9155e9ba..666fa4b9 100644
--- a/surfsense_backend/app/indexing_pipeline/exceptions.py
+++ b/surfsense_backend/app/indexing_pipeline/exceptions.py
@@ -59,7 +59,7 @@ class PipelineMessages:
LLM_AUTH = "LLM authentication failed. Check your API key."
LLM_PERMISSION = "LLM request denied. Check your account permissions."
- LLM_NOT_FOUND = "LLM model not found. Check your model configuration."
+ LLM_NOT_FOUND = "Model not found. Check your model configuration."
LLM_BAD_REQUEST = "LLM rejected the request. Document content may be invalid."
LLM_UNPROCESSABLE = (
"Document exceeds the LLM context window even after optimization."
@@ -67,7 +67,7 @@ class PipelineMessages:
LLM_RESPONSE = "LLM returned an invalid response."
LLM_AUTH = "LLM authentication failed. Check your API key."
LLM_PERMISSION = "LLM request denied. Check your account permissions."
- LLM_NOT_FOUND = "LLM model not found. Check your model configuration."
+ LLM_NOT_FOUND = "Model not found. Check your model configuration."
LLM_BAD_REQUEST = "LLM rejected the request. Document content may be invalid."
LLM_UNPROCESSABLE = (
"Document exceeds the LLM context window even after optimization."
diff --git a/surfsense_backend/app/routes/__init__.py b/surfsense_backend/app/routes/__init__.py
index 1937f11c..efa0ff2f 100644
--- a/surfsense_backend/app/routes/__init__.py
+++ b/surfsense_backend/app/routes/__init__.py
@@ -84,7 +84,7 @@ router.include_router(confluence_add_connector_router)
router.include_router(clickup_add_connector_router)
router.include_router(dropbox_add_connector_router)
router.include_router(new_llm_config_router) # LLM configs with prompt configuration
-router.include_router(model_list_router) # Dynamic LLM model catalogue from OpenRouter
+router.include_router(model_list_router) # Dynamic model catalogue from OpenRouter
router.include_router(logs_router)
router.include_router(circleback_webhook_router) # Circleback meeting webhooks
router.include_router(surfsense_docs_router) # Surfsense documentation for citations
diff --git a/surfsense_backend/app/routes/model_list_routes.py b/surfsense_backend/app/routes/model_list_routes.py
index ef6e3051..79ae7221 100644
--- a/surfsense_backend/app/routes/model_list_routes.py
+++ b/surfsense_backend/app/routes/model_list_routes.py
@@ -1,5 +1,5 @@
"""
-API route for fetching the available LLM models catalogue.
+API route for fetching the available models catalogue.
Serves a dynamically-updated list sourced from the OpenRouter public API,
with a local JSON fallback when the API is unreachable.
@@ -30,7 +30,7 @@ async def list_available_models(
user: User = Depends(current_active_user),
):
"""
- Return all available LLM models grouped by provider.
+ Return all available models grouped by provider.
The list is sourced from the OpenRouter public API and cached for 1 hour.
If the API is unreachable, a local fallback file is used instead.
diff --git a/surfsense_backend/app/routes/new_llm_config_routes.py b/surfsense_backend/app/routes/new_llm_config_routes.py
index f784bd27..78907c71 100644
--- a/surfsense_backend/app/routes/new_llm_config_routes.py
+++ b/surfsense_backend/app/routes/new_llm_config_routes.py
@@ -1,7 +1,7 @@
"""
API routes for NewLLMConfig CRUD operations.
-NewLLMConfig combines LLM model settings with prompt configuration:
+NewLLMConfig combines model settings with prompt configuration:
- LLM provider, model, API key, etc.
- Configurable system instructions
- Citation toggle
diff --git a/surfsense_backend/app/schemas/new_llm_config.py b/surfsense_backend/app/schemas/new_llm_config.py
index 9863665b..15ed4ce6 100644
--- a/surfsense_backend/app/schemas/new_llm_config.py
+++ b/surfsense_backend/app/schemas/new_llm_config.py
@@ -1,7 +1,7 @@
"""
Pydantic schemas for the NewLLMConfig API.
-NewLLMConfig combines LLM model settings with prompt configuration:
+NewLLMConfig combines model settings with prompt configuration:
- LLM provider, model, API key, etc.
- Configurable system instructions
- Citation toggle
@@ -26,7 +26,7 @@ class NewLLMConfigBase(BaseModel):
None, max_length=500, description="Optional description"
)
- # LLM Model Configuration
+ # Model Configuration
provider: LiteLLMProvider = Field(..., description="LiteLLM provider type")
custom_provider: str | None = Field(
None, max_length=100, description="Custom provider name when provider is CUSTOM"
@@ -71,7 +71,7 @@ class NewLLMConfigUpdate(BaseModel):
name: str | None = Field(None, max_length=100)
description: str | None = Field(None, max_length=500)
- # LLM Model Configuration
+ # Model Configuration
provider: LiteLLMProvider | None = None
custom_provider: str | None = Field(None, max_length=100)
model_name: str | None = Field(None, max_length=100)
@@ -106,7 +106,7 @@ class NewLLMConfigPublic(BaseModel):
name: str
description: str | None = None
- # LLM Model Configuration (no api_key)
+ # Model Configuration (no api_key)
provider: LiteLLMProvider
custom_provider: str | None = None
model_name: str
@@ -149,7 +149,7 @@ class GlobalNewLLMConfigRead(BaseModel):
name: str
description: str | None = None
- # LLM Model Configuration (no api_key)
+ # Model Configuration (no api_key)
provider: str # String because YAML doesn't enforce enum, "AUTO" for Auto mode
custom_provider: str | None = None
model_name: str
diff --git a/surfsense_backend/app/services/model_list_service.py b/surfsense_backend/app/services/model_list_service.py
index ebc0e0d7..2a81c2d5 100644
--- a/surfsense_backend/app/services/model_list_service.py
+++ b/surfsense_backend/app/services/model_list_service.py
@@ -1,5 +1,5 @@
"""
-Service for fetching and caching the available LLM model list.
+Service for fetching and caching the available model list.
Uses the OpenRouter public API as the primary source, with a local
fallback JSON file when the API is unreachable.
diff --git a/surfsense_web/atoms/new-llm-config/new-llm-config-mutation.atoms.ts b/surfsense_web/atoms/new-llm-config/new-llm-config-mutation.atoms.ts
index d6d3aa82..2a048ca3 100644
--- a/surfsense_web/atoms/new-llm-config/new-llm-config-mutation.atoms.ts
+++ b/surfsense_web/atoms/new-llm-config/new-llm-config-mutation.atoms.ts
@@ -34,7 +34,7 @@ export const createNewLLMConfigMutationAtom = atomWithMutation((get) => {
});
},
onError: (error: Error) => {
- toast.error(error.message || "Failed to create LLM model");
+ toast.error(error.message || "Failed to create model");
},
};
});
diff --git a/surfsense_web/atoms/new-llm-config/new-llm-config-query.atoms.ts b/surfsense_web/atoms/new-llm-config/new-llm-config-query.atoms.ts
index e4c8bcff..32250d39 100644
--- a/surfsense_web/atoms/new-llm-config/new-llm-config-query.atoms.ts
+++ b/surfsense_web/atoms/new-llm-config/new-llm-config-query.atoms.ts
@@ -66,7 +66,7 @@ export const defaultSystemInstructionsAtom = atomWithQuery(() => {
});
/**
- * Query atom for the dynamic LLM model catalogue.
+ * Query atom for the dynamic model catalogue.
* Fetched from the backend (which proxies OpenRouter's public API).
* Falls back to the static hardcoded list on error.
*/
diff --git a/surfsense_web/components/new-chat/model-selector.tsx b/surfsense_web/components/new-chat/model-selector.tsx
index 7a2a471b..b207d82b 100644
--- a/surfsense_web/components/new-chat/model-selector.tsx
+++ b/surfsense_web/components/new-chat/model-selector.tsx
@@ -498,7 +498,7 @@ export function ModelSelector({
}}
>
{canCreate - ? "Add your first LLM model to power document summarization, chat, and other agent capabilities" - : "No LLM models have been added to this space yet. Contact a space owner to add one"} + ? "Add your first model to power document summarization, chat, and other agent capabilities" + : "No models have been added to this space yet. Contact a space owner to add one"}