feat(llm): expand LLM provider options and improve model selection UI

- Added new LLM providers including Google, Azure OpenAI, Bedrock, and others to the backend.
- Updated the model selection UI to dynamically display available models based on the selected provider.
- Enhanced the provider change handling to reset the model selection when the provider is changed.
- Improved the overall user experience by providing contextual information for model selection.
This commit is contained in:
DESKTOP-RTLN3BA\$punk 2025-11-13 02:41:30 -08:00
parent c1e8753567
commit 38dffaffa3
10 changed files with 2197 additions and 126 deletions

View file

@ -0,0 +1,199 @@
"""Update LiteLLMProvider enum with comprehensive provider support
This migration adds support for all major LiteLLM providers including:
- Fast inference platforms (XAI, Cerebras, SambaNova, Fireworks AI)
- Cloud platforms (Cloudflare, Databricks)
- Renames AWS_BEDROCK to BEDROCK for consistency
- Adds CUSTOM for custom OpenAI-compatible endpoints
Revision ID: 35
Revises: 34
"""
from collections.abc import Sequence
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "35"
down_revision: str | None = "34"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
"""
Add new LLM providers to LiteLLMProvider enum and migrate existing data.
New providers added:
- XAI: xAI's Grok models
- FIREWORKS_AI: Fireworks AI platform
- CEREBRAS: Cerebras inference platform (fastest)
- SAMBANOVA: SambaNova inference platform
- CLOUDFLARE: Cloudflare Workers AI
- DATABRICKS: Databricks Model Serving
- BEDROCK: AWS Bedrock (replaces AWS_BEDROCK)
- CUSTOM: Custom OpenAI-compatible endpoints
"""
# Add XAI (xAI Grok models)
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumtypid = 'litellmprovider'::regtype
AND enumlabel = 'XAI'
) THEN
ALTER TYPE litellmprovider ADD VALUE 'XAI';
END IF;
END$$;
"""
)
# Add FIREWORKS_AI
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumtypid = 'litellmprovider'::regtype
AND enumlabel = 'FIREWORKS_AI'
) THEN
ALTER TYPE litellmprovider ADD VALUE 'FIREWORKS_AI';
END IF;
END$$;
"""
)
# Add CEREBRAS (fastest inference platform)
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumtypid = 'litellmprovider'::regtype
AND enumlabel = 'CEREBRAS'
) THEN
ALTER TYPE litellmprovider ADD VALUE 'CEREBRAS';
END IF;
END$$;
"""
)
# Add SAMBANOVA
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumtypid = 'litellmprovider'::regtype
AND enumlabel = 'SAMBANOVA'
) THEN
ALTER TYPE litellmprovider ADD VALUE 'SAMBANOVA';
END IF;
END$$;
"""
)
# Add CLOUDFLARE (Cloudflare Workers AI)
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumtypid = 'litellmprovider'::regtype
AND enumlabel = 'CLOUDFLARE'
) THEN
ALTER TYPE litellmprovider ADD VALUE 'CLOUDFLARE';
END IF;
END$$;
"""
)
# Add DATABRICKS
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumtypid = 'litellmprovider'::regtype
AND enumlabel = 'DATABRICKS'
) THEN
ALTER TYPE litellmprovider ADD VALUE 'DATABRICKS';
END IF;
END$$;
"""
)
# Add BEDROCK (new standardized name)
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumtypid = 'litellmprovider'::regtype
AND enumlabel = 'BEDROCK'
) THEN
ALTER TYPE litellmprovider ADD VALUE 'BEDROCK';
END IF;
END$$;
"""
)
# Add CUSTOM for custom OpenAI-compatible endpoints
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_enum
WHERE enumtypid = 'litellmprovider'::regtype
AND enumlabel = 'CUSTOM'
) THEN
ALTER TYPE litellmprovider ADD VALUE 'CUSTOM';
END IF;
END$$;
"""
)
# Note: Both AWS_BEDROCK and BEDROCK will coexist in the enum for backward compatibility.
# - New configurations will use BEDROCK (via the frontend)
# - Existing AWS_BEDROCK configurations will continue to work
# - The backend service handles both values correctly
#
# Legacy enum values (PALM, NLPCLOUD, ALEPH_ALPHA, PETALS, etc.) also remain in the enum.
# PostgreSQL doesn't support removing enum values without recreating the entire type.
#
# Data migration from AWS_BEDROCK -> BEDROCK is NOT performed because:
# 1. PostgreSQL doesn't allow using new enum values in the same transaction
# 2. Both values work correctly with the backend service
# 3. Users can manually update old configs if desired (not required)
def downgrade() -> None:
"""
Downgrade migration.
Note: PostgreSQL doesn't support removing enum values directly.
This would require recreating the entire enum type and updating all dependent objects.
For safety and data preservation, this downgrade is a no-op.
If you need to downgrade, you should:
1. Ensure no llm_configs are using the new providers
2. Manually remove enum values (requires enum recreation - complex operation)
This is not automated to prevent accidental data loss.
New enum values (XAI, FIREWORKS_AI, CEREBRAS, BEDROCK, etc.) will remain
in the database but won't be selectable in the application after downgrade.
"""
# PostgreSQL doesn't support removing enum values directly
# This is a no-op for safety
pass

View file

@ -86,32 +86,33 @@ class LiteLLMProvider(str, Enum):
OPENAI = "OPENAI"
ANTHROPIC = "ANTHROPIC"
GOOGLE = "GOOGLE"
AZURE_OPENAI = "AZURE_OPENAI"
BEDROCK = "BEDROCK"
VERTEX_AI = "VERTEX_AI"
GROQ = "GROQ"
COHERE = "COHERE"
HUGGINGFACE = "HUGGINGFACE"
AZURE_OPENAI = "AZURE_OPENAI"
GOOGLE = "GOOGLE"
AWS_BEDROCK = "AWS_BEDROCK"
OLLAMA = "OLLAMA"
MISTRAL = "MISTRAL"
TOGETHER_AI = "TOGETHER_AI"
OPENROUTER = "OPENROUTER"
REPLICATE = "REPLICATE"
PALM = "PALM"
VERTEX_AI = "VERTEX_AI"
ANYSCALE = "ANYSCALE"
PERPLEXITY = "PERPLEXITY"
DEEPINFRA = "DEEPINFRA"
AI21 = "AI21"
NLPCLOUD = "NLPCLOUD"
ALEPH_ALPHA = "ALEPH_ALPHA"
PETALS = "PETALS"
COMETAPI = "COMETAPI"
# Chinese LLM Providers (OpenAI-compatible)
DEEPSEEK = "DEEPSEEK"
XAI = "XAI"
OPENROUTER = "OPENROUTER"
TOGETHER_AI = "TOGETHER_AI"
FIREWORKS_AI = "FIREWORKS_AI"
REPLICATE = "REPLICATE"
PERPLEXITY = "PERPLEXITY"
OLLAMA = "OLLAMA"
ALIBABA_QWEN = "ALIBABA_QWEN"
MOONSHOT = "MOONSHOT"
ZHIPU = "ZHIPU"
ANYSCALE = "ANYSCALE"
DEEPINFRA = "DEEPINFRA"
CEREBRAS = "CEREBRAS"
SAMBANOVA = "SAMBANOVA"
AI21 = "AI21"
CLOUDFLARE = "CLOUDFLARE"
DATABRICKS = "DATABRICKS"
COMETAPI = "COMETAPI"
HUGGINGFACE = "HUGGINGFACE"
CUSTOM = "CUSTOM"

View file

@ -61,11 +61,26 @@ async def validate_llm_config(
"AZURE_OPENAI": "azure",
"OPENROUTER": "openrouter",
"COMETAPI": "cometapi",
# Chinese LLM providers (OpenAI-compatible)
"XAI": "xai",
"BEDROCK": "bedrock",
"AWS_BEDROCK": "bedrock", # Legacy support (backward compatibility)
"VERTEX_AI": "vertex_ai",
"TOGETHER_AI": "together_ai",
"FIREWORKS_AI": "fireworks_ai",
"REPLICATE": "replicate",
"PERPLEXITY": "perplexity",
"ANYSCALE": "anyscale",
"DEEPINFRA": "deepinfra",
"CEREBRAS": "cerebras",
"SAMBANOVA": "sambanova",
"AI21": "ai21",
"CLOUDFLARE": "cloudflare",
"DATABRICKS": "databricks",
# Chinese LLM providers
"DEEPSEEK": "openai",
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai",
"ZHIPU": "openai", # GLM needs special handling
}
provider_prefix = provider_map.get(provider, provider.lower())
model_string = f"{provider_prefix}/{model_name}"
@ -187,12 +202,26 @@ async def get_user_llm_instance(
"AZURE_OPENAI": "azure",
"OPENROUTER": "openrouter",
"COMETAPI": "cometapi",
# Chinese LLM providers (OpenAI-compatible)
"DEEPSEEK": "openai", # DeepSeek uses OpenAI-compatible API
"ALIBABA_QWEN": "openai", # Qwen uses OpenAI-compatible API
"MOONSHOT": "openai", # Moonshot (Kimi) uses OpenAI-compatible API
"ZHIPU": "openai", # Zhipu (GLM) uses OpenAI-compatible API
# Add more mappings as needed
"XAI": "xai",
"BEDROCK": "bedrock",
"AWS_BEDROCK": "bedrock", # Legacy support (backward compatibility)
"VERTEX_AI": "vertex_ai",
"TOGETHER_AI": "together_ai",
"FIREWORKS_AI": "fireworks_ai",
"REPLICATE": "replicate",
"PERPLEXITY": "perplexity",
"ANYSCALE": "anyscale",
"DEEPINFRA": "deepinfra",
"CEREBRAS": "cerebras",
"SAMBANOVA": "sambanova",
"AI21": "ai21",
"CLOUDFLARE": "cloudflare",
"DATABRICKS": "databricks",
# Chinese LLM providers
"DEEPSEEK": "openai",
"ALIBABA_QWEN": "openai",
"MOONSHOT": "openai",
"ZHIPU": "openai",
}
provider_prefix = provider_map.get(
llm_config.provider.value, llm_config.provider.value.lower()

View file

@ -1,6 +1,6 @@
"use client";
import { AlertCircle, Bot, Plus, Trash2 } from "lucide-react";
import { AlertCircle, Bot, Check, ChevronsUpDown, Plus, Trash2 } from "lucide-react";
import { motion } from "motion/react";
import { useTranslations } from "next-intl";
import { useState } from "react";
@ -9,8 +9,17 @@ import { Alert, AlertDescription } from "@/components/ui/alert";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card";
import {
Command,
CommandEmpty,
CommandGroup,
CommandInput,
CommandItem,
CommandList,
} from "@/components/ui/command";
import { Input } from "@/components/ui/input";
import { Label } from "@/components/ui/label";
import { Popover, PopoverContent, PopoverTrigger } from "@/components/ui/popover";
import {
Select,
SelectContent,
@ -19,8 +28,10 @@ import {
SelectValue,
} from "@/components/ui/select";
import { LANGUAGES } from "@/contracts/enums/languages";
import { getModelsByProvider, LLM_MODELS } from "@/contracts/enums/llm-models";
import { LLM_PROVIDERS } from "@/contracts/enums/llm-providers";
import { type CreateLLMConfig, useLLMConfigs } from "@/hooks/use-llm-configs";
import { cn } from "@/lib/utils";
import InferenceParamsEditor from "../inference-params-editor";
@ -50,6 +61,7 @@ export function AddProviderStep({
search_space_id: searchSpaceId,
});
const [isSubmitting, setIsSubmitting] = useState(false);
const [modelComboboxOpen, setModelComboboxOpen] = useState(false);
const handleInputChange = (field: keyof CreateLLMConfig, value: string) => {
setFormData((prev) => ({ ...prev, [field]: value }));
@ -85,11 +97,18 @@ export function AddProviderStep({
};
const selectedProvider = LLM_PROVIDERS.find((p) => p.value === formData.provider);
const availableModels = formData.provider ? getModelsByProvider(formData.provider) : [];
const handleParamsChange = (newParams: Record<string, number | string>) => {
setFormData((prev) => ({ ...prev, litellm_params: newParams }));
};
// Reset model when provider changes
const handleProviderChange = (value: string) => {
handleInputChange("provider", value);
setFormData((prev) => ({ ...prev, model_name: "" }));
};
return (
<div className="space-y-6">
{/* Info Alert */}
@ -182,14 +201,11 @@ export function AddProviderStep({
<div className="space-y-2">
<Label htmlFor="provider">{t("provider_required")}</Label>
<Select
value={formData.provider}
onValueChange={(value) => handleInputChange("provider", value)}
>
<Select value={formData.provider} onValueChange={handleProviderChange}>
<SelectTrigger>
<SelectValue placeholder={t("provider_placeholder")} />
</SelectTrigger>
<SelectContent>
<SelectContent className="max-h-[300px]">
{LLM_PROVIDERS.map((provider) => (
<SelectItem key={provider.value} value={provider.value}>
{provider.label}
@ -235,18 +251,95 @@ export function AddProviderStep({
<div className="space-y-2">
<Label htmlFor="model_name">{t("model_name_required")}</Label>
<Input
id="model_name"
placeholder={selectedProvider?.example || t("model_name_placeholder")}
value={formData.model_name}
onChange={(e) => handleInputChange("model_name", e.target.value)}
required
/>
{selectedProvider && (
<p className="text-xs text-muted-foreground">
{t("examples")}: {selectedProvider.example}
</p>
)}
<Popover open={modelComboboxOpen} onOpenChange={setModelComboboxOpen}>
<PopoverTrigger asChild>
<Button
variant="outline"
role="combobox"
aria-expanded={modelComboboxOpen}
className="w-full justify-between font-normal"
>
<span className={cn(!formData.model_name && "text-muted-foreground")}>
{formData.model_name || t("model_name_placeholder")}
</span>
<ChevronsUpDown className="ml-2 h-4 w-4 shrink-0 opacity-50" />
</Button>
</PopoverTrigger>
<PopoverContent className="w-full p-0" align="start" side="bottom">
<Command shouldFilter={false}>
<CommandInput
placeholder={
selectedProvider?.example ||
t("model_name_placeholder") ||
"Type model name..."
}
value={formData.model_name}
onValueChange={(value) => handleInputChange("model_name", value)}
/>
<CommandList>
<CommandEmpty>
<div className="py-2 text-center text-sm text-muted-foreground">
{formData.model_name
? `Using custom model: "${formData.model_name}"`
: "Type your model name above"}
</div>
</CommandEmpty>
{availableModels.length > 0 && (
<CommandGroup heading="Suggested Models">
{availableModels
.filter(
(model) =>
!formData.model_name ||
model.value
.toLowerCase()
.includes(formData.model_name.toLowerCase()) ||
model.label
.toLowerCase()
.includes(formData.model_name.toLowerCase())
)
.map((model) => (
<CommandItem
key={model.value}
value={model.value}
onSelect={(currentValue) => {
handleInputChange("model_name", currentValue);
setModelComboboxOpen(false);
}}
className="flex flex-col items-start py-3"
>
<div className="flex w-full items-center">
<Check
className={cn(
"mr-2 h-4 w-4 shrink-0",
formData.model_name === model.value
? "opacity-100"
: "opacity-0"
)}
/>
<div className="flex-1">
<div className="font-medium">{model.label}</div>
{model.contextWindow && (
<div className="text-xs text-muted-foreground">
Context: {model.contextWindow}
</div>
)}
</div>
</div>
</CommandItem>
))}
</CommandGroup>
)}
</CommandList>
</Command>
</PopoverContent>
</Popover>
<p className="text-xs text-muted-foreground">
{availableModels.length > 0
? `Type freely or select from ${availableModels.length} model suggestions`
: selectedProvider?.example
? `${t("examples")}: ${selectedProvider.example}`
: "Type your model name freely"}
</p>
</div>
<div className="space-y-2">

View file

@ -38,6 +38,7 @@ import {
SelectValue,
} from "@/components/ui/select";
import { LANGUAGES } from "@/contracts/enums/languages";
import { getModelsByProvider } from "@/contracts/enums/llm-models";
import { LLM_PROVIDERS } from "@/contracts/enums/llm-providers";
import { type CreateLLMConfig, type LLMConfig, useLLMConfigs } from "@/hooks/use-llm-configs";
import InferenceParamsEditor from "../inference-params-editor";
@ -93,12 +94,13 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
setFormData((prev) => ({ ...prev, [field]: value }));
};
// Handle provider change with auto-fill API Base URL / 处理 Provider 变更并自动填充 API Base URL
// Handle provider change with auto-fill API Base URL and reset model / 处理 Provider 变更并自动填充 API Base URL 并重置模型
const handleProviderChange = (providerValue: string) => {
const provider = LLM_PROVIDERS.find((p) => p.value === providerValue);
setFormData((prev) => ({
...prev,
provider: providerValue,
model_name: "", // Reset model when provider changes
// Auto-fill API Base URL if provider has a default / 如果提供商有默认值则自动填充
api_base: provider?.apiBase || prev.api_base,
}));
@ -157,6 +159,7 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
};
const selectedProvider = LLM_PROVIDERS.find((p) => p.value === formData.provider);
const availableModels = formData.provider ? getModelsByProvider(formData.provider) : [];
const getProviderInfo = (providerValue: string) => {
return LLM_PROVIDERS.find((p) => p.value === providerValue);
@ -530,17 +533,50 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
<div className="space-y-2">
<Label htmlFor="model_name">Model Name *</Label>
<Input
id="model_name"
placeholder={selectedProvider?.example || "e.g., gpt-4"}
value={formData.model_name}
onChange={(e) => handleInputChange("model_name", e.target.value)}
required
/>
{selectedProvider && (
<p className="text-xs text-muted-foreground">
Examples: {selectedProvider.example}
</p>
{availableModels.length > 0 ? (
<>
<Select
value={formData.model_name}
onValueChange={(value) => handleInputChange("model_name", value)}
>
<SelectTrigger>
<SelectValue placeholder="Select a model" />
</SelectTrigger>
<SelectContent className="max-h-[400px]">
{availableModels.map((model) => (
<SelectItem key={model.value} value={model.value}>
<div className="flex flex-col py-1">
<span className="font-medium">{model.label}</span>
{model.contextWindow && (
<span className="text-xs text-muted-foreground">
Context: {model.contextWindow}
</span>
)}
</div>
</SelectItem>
))}
</SelectContent>
</Select>
<p className="text-xs text-muted-foreground">
{availableModels.length} model{availableModels.length !== 1 ? "s" : ""}{" "}
available
</p>
</>
) : (
<>
<Input
id="model_name"
placeholder={selectedProvider?.example || "e.g., gpt-4"}
value={formData.model_name}
onChange={(e) => handleInputChange("model_name", e.target.value)}
required
/>
{selectedProvider && (
<p className="text-xs text-muted-foreground">
Examples: {selectedProvider.example}
</p>
)}
</>
)}
</div>

View file

@ -0,0 +1,160 @@
"use client";
import { Command as CommandPrimitive } from "cmdk";
import { SearchIcon } from "lucide-react";
import type * as React from "react";
import {
Dialog,
DialogContent,
DialogDescription,
DialogHeader,
DialogTitle,
} from "@/components/ui/dialog";
import { cn } from "@/lib/utils";
function Command({ className, ...props }: React.ComponentProps<typeof CommandPrimitive>) {
return (
<CommandPrimitive
data-slot="command"
className={cn(
"bg-popover text-popover-foreground flex h-full w-full flex-col overflow-hidden rounded-md",
className
)}
{...props}
/>
);
}
function CommandDialog({
title = "Command Palette",
description = "Search for a command to run...",
children,
className,
showCloseButton = true,
...props
}: React.ComponentProps<typeof Dialog> & {
title?: string;
description?: string;
className?: string;
showCloseButton?: boolean;
}) {
return (
<Dialog {...props}>
<DialogHeader className="sr-only">
<DialogTitle>{title}</DialogTitle>
<DialogDescription>{description}</DialogDescription>
</DialogHeader>
<DialogContent
className={cn("overflow-hidden p-0", className)}
showCloseButton={showCloseButton}
>
<Command className="[&_[cmdk-group-heading]]:text-muted-foreground **:data-[slot=command-input-wrapper]:h-12 [&_[cmdk-group-heading]]:px-2 [&_[cmdk-group-heading]]:font-medium [&_[cmdk-group]]:px-2 [&_[cmdk-group]:not([hidden])_~[cmdk-group]]:pt-0 [&_[cmdk-input-wrapper]_svg]:h-5 [&_[cmdk-input-wrapper]_svg]:w-5 [&_[cmdk-input]]:h-12 [&_[cmdk-item]]:px-2 [&_[cmdk-item]]:py-3 [&_[cmdk-item]_svg]:h-5 [&_[cmdk-item]_svg]:w-5">
{children}
</Command>
</DialogContent>
</Dialog>
);
}
function CommandInput({
className,
...props
}: React.ComponentProps<typeof CommandPrimitive.Input>) {
return (
<div data-slot="command-input-wrapper" className="flex h-9 items-center gap-2 border-b px-3">
<SearchIcon className="size-4 shrink-0 opacity-50" />
<CommandPrimitive.Input
data-slot="command-input"
className={cn(
"placeholder:text-muted-foreground flex h-10 w-full rounded-md bg-transparent py-3 text-sm outline-hidden disabled:cursor-not-allowed disabled:opacity-50",
className
)}
{...props}
/>
</div>
);
}
function CommandList({ className, ...props }: React.ComponentProps<typeof CommandPrimitive.List>) {
return (
<CommandPrimitive.List
data-slot="command-list"
className={cn("max-h-[300px] scroll-py-1 overflow-x-hidden overflow-y-auto", className)}
{...props}
/>
);
}
function CommandEmpty({ ...props }: React.ComponentProps<typeof CommandPrimitive.Empty>) {
return (
<CommandPrimitive.Empty
data-slot="command-empty"
className="py-6 text-center text-sm"
{...props}
/>
);
}
function CommandGroup({
className,
...props
}: React.ComponentProps<typeof CommandPrimitive.Group>) {
return (
<CommandPrimitive.Group
data-slot="command-group"
className={cn(
"text-foreground [&_[cmdk-group-heading]]:text-muted-foreground overflow-hidden p-1 [&_[cmdk-group-heading]]:px-2 [&_[cmdk-group-heading]]:py-1.5 [&_[cmdk-group-heading]]:text-xs [&_[cmdk-group-heading]]:font-medium",
className
)}
{...props}
/>
);
}
function CommandSeparator({
className,
...props
}: React.ComponentProps<typeof CommandPrimitive.Separator>) {
return (
<CommandPrimitive.Separator
data-slot="command-separator"
className={cn("bg-border -mx-1 h-px", className)}
{...props}
/>
);
}
function CommandItem({ className, ...props }: React.ComponentProps<typeof CommandPrimitive.Item>) {
return (
<CommandPrimitive.Item
data-slot="command-item"
className={cn(
"data-[selected=true]:bg-accent data-[selected=true]:text-accent-foreground [&_svg:not([class*='text-'])]:text-muted-foreground relative flex cursor-default items-center gap-2 rounded-sm px-2 py-1.5 text-sm outline-hidden select-none data-[disabled=true]:pointer-events-none data-[disabled=true]:opacity-50 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
className
)}
{...props}
/>
);
}
function CommandShortcut({ className, ...props }: React.ComponentProps<"span">) {
return (
<span
data-slot="command-shortcut"
className={cn("text-muted-foreground ml-auto text-xs tracking-widest", className)}
{...props}
/>
);
}
export {
Command,
CommandDialog,
CommandInput,
CommandList,
CommandEmpty,
CommandGroup,
CommandItem,
CommandShortcut,
CommandSeparator,
};

File diff suppressed because it is too large Load diff

View file

@ -3,127 +3,180 @@ export interface LLMProvider {
label: string;
example: string;
description: string;
apiBase?: string; // Default API Base URL for the provider / 提供商的默认 API Base URL
apiBase?: string;
}
export const LLM_PROVIDERS: LLMProvider[] = [
{
value: "OPENAI",
label: "OpenAI",
example: "gpt-4o, gpt-4, gpt-3.5-turbo",
description: "Industry-leading GPT models with broad capabilities",
example: "gpt-4o, gpt-4o-mini, o1, o3-mini",
description: "Industry-leading GPT models",
},
{
value: "ANTHROPIC",
label: "Anthropic",
example: "claude-3-5-sonnet-20241022, claude-3-opus-20240229",
description: "Claude models with strong reasoning and long context windows",
example: "claude-3-5-sonnet, claude-3-opus, claude-4-sonnet",
description: "Claude models with strong reasoning",
},
{
value: "GROQ",
label: "Groq",
example: "llama3-70b-8192, mixtral-8x7b-32768",
description: "Lightning-fast inference with custom LPU hardware",
},
{
value: "COHERE",
label: "Cohere",
example: "command-r-plus, command-r",
description: "Enterprise NLP models optimized for business applications",
},
{
value: "HUGGINGFACE",
label: "HuggingFace",
example: "microsoft/DialoGPT-medium",
description: "Access thousands of open-source models",
value: "GOOGLE",
label: "Google (Gemini)",
example: "gemini-2.5-flash, gemini-2.5-pro, gemini-1.5-pro",
description: "Gemini models with multimodal capabilities",
},
{
value: "AZURE_OPENAI",
label: "Azure OpenAI",
example: "gpt-4, gpt-35-turbo",
description: "OpenAI models with Microsoft Azure enterprise features",
example: "azure/gpt-4o, azure/gpt-4o-mini",
description: "OpenAI models on Azure",
},
{
value: "GOOGLE",
label: "Google",
example: "gemini-pro, gemini-pro-vision",
description: "Gemini models with multimodal capabilities",
},
{
value: "AWS_BEDROCK",
value: "BEDROCK",
label: "AWS Bedrock",
example: "anthropic.claude-v2",
description: "Fully managed foundation models on AWS infrastructure",
example: "anthropic.claude-3-5-sonnet, meta.llama3-70b",
description: "Foundation models on AWS",
},
{
value: "OLLAMA",
label: "Ollama",
example: "llama2, codellama",
description: "Run open-source models locally on your machine",
value: "VERTEX_AI",
label: "Google Vertex AI",
example: "vertex_ai/claude-3-5-sonnet, vertex_ai/gemini-2.5-pro",
description: "Models on Google Cloud Vertex AI",
},
{
value: "GROQ",
label: "Groq",
example: "groq/llama-3.3-70b-versatile, groq/mixtral-8x7b",
description: "Ultra-fast inference",
},
{
value: "COHERE",
label: "Cohere",
example: "command-a-03-2025, command-r-plus",
description: "Enterprise NLP models",
},
{
value: "MISTRAL",
label: "Mistral",
example: "mistral-large-latest, mistral-medium",
description: "High-performance open-source models from Europe",
label: "Mistral AI",
example: "mistral-large-latest, mistral-medium-latest",
description: "European open-source models",
},
{
value: "TOGETHER_AI",
label: "Together AI",
example: "togethercomputer/llama-2-70b-chat",
description: "Scalable cloud platform for open-source models",
value: "DEEPSEEK",
label: "DeepSeek",
example: "deepseek-chat, deepseek-reasoner",
description: "High-performance reasoning models",
apiBase: "https://api.deepseek.com",
},
{
value: "REPLICATE",
label: "Replicate",
example: "meta/llama-2-70b-chat",
description: "Cloud API for running machine learning models",
value: "XAI",
label: "xAI (Grok)",
example: "grok-4, grok-3, grok-3-mini",
description: "Grok models from xAI",
},
{
value: "OPENROUTER",
label: "OpenRouter",
example: "anthropic/claude-opus-4.1, openai/gpt-5",
description: "Unified API gateway for multiple LLM providers",
example: "openrouter/anthropic/claude-4-opus",
description: "Unified API for multiple providers",
},
{
value: "COMETAPI",
label: "CometAPI",
example: "gpt-5-mini, claude-sonnet-4-5",
description: "Access 500+ AI models through one unified API",
value: "TOGETHER_AI",
label: "Together AI",
example: "together_ai/meta-llama/Llama-3.3-70B-Instruct-Turbo",
description: "Fast open-source models",
},
// Chinese LLM Providers / 国产 LLM 提供商
{
value: "DEEPSEEK",
label: "DeepSeek",
example: "deepseek-chat, deepseek-coder",
description: "Chinese high-performance AI models",
apiBase: "https://api.deepseek.com",
value: "FIREWORKS_AI",
label: "Fireworks AI",
example: "fireworks_ai/accounts/fireworks/models/llama-v3p3-70b-instruct",
description: "Scalable inference platform",
},
{
value: "REPLICATE",
label: "Replicate",
example: "replicate/meta/llama-3-70b-instruct",
description: "ML model hosting platform",
},
{
value: "PERPLEXITY",
label: "Perplexity",
example: "perplexity/sonar-pro, perplexity/sonar-reasoning",
description: "Search-augmented models",
},
{
value: "OLLAMA",
label: "Ollama",
example: "ollama/llama3.1, ollama/mistral",
description: "Run models locally",
},
{
value: "ALIBABA_QWEN",
label: "Qwen",
example: "qwen-max, qwen-plus, qwen-turbo",
description: "Alibaba Cloud Qwen LLM",
label: "Alibaba Qwen",
example: "dashscope/qwen-plus, dashscope/qwen-turbo",
description: "Qwen series models",
apiBase: "https://dashscope.aliyuncs.com/compatible-mode/v1",
},
{
value: "MOONSHOT",
label: "Kimi",
example: "moonshot-v1-8k, moonshot-v1-32k, moonshot-v1-128k",
description: "Moonshot AI Kimi models",
label: "Moonshot (Kimi)",
example: "moonshot/kimi-latest, moonshot/kimi-k2-thinking",
description: "Kimi AI models",
apiBase: "https://api.moonshot.cn/v1",
},
{
value: "ZHIPU",
label: "GLM",
example: "glm-4, glm-4-flash, glm-3-turbo",
description: "Zhipu AI GLM series models",
label: "Zhipu (GLM)",
example: "openrouter/z-ai/glm-4.6",
description: "GLM series models",
apiBase: "https://open.bigmodel.cn/api/paas/v4",
},
{
value: "ANYSCALE",
label: "Anyscale",
example: "anyscale/meta-llama/Meta-Llama-3-70B-Instruct",
description: "Ray-based inference platform",
},
{
value: "DEEPINFRA",
label: "DeepInfra",
example: "deepinfra/meta-llama/Meta-Llama-3.3-70B-Instruct",
description: "Serverless GPU inference",
},
{
value: "CEREBRAS",
label: "Cerebras",
example: "cerebras/llama-3.3-70b, cerebras/qwen-3-32b",
description: "Fastest inference with Wafer-Scale Engine",
},
{
value: "SAMBANOVA",
label: "SambaNova",
example: "sambanova/Meta-Llama-3.3-70B-Instruct",
description: "AI inference platform",
},
{
value: "AI21",
label: "AI21 Labs",
example: "jamba-1.5-large, jamba-1.5-mini",
description: "Jamba series models",
},
{
value: "CLOUDFLARE",
label: "Cloudflare Workers AI",
example: "cloudflare/@cf/meta/llama-2-7b-chat",
description: "AI on Cloudflare edge network",
},
{
value: "DATABRICKS",
label: "Databricks",
example: "databricks/databricks-meta-llama-3-3-70b-instruct",
description: "Databricks Model Serving",
},
{
value: "CUSTOM",
label: "Custom Provider",
example: "your-custom-model",
description: "Connect to your own custom model endpoint",
description: "Custom OpenAI-compatible endpoint",
},
];

View file

@ -56,6 +56,7 @@
"canvas-confetti": "^1.9.3",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"cmdk": "^1.1.1",
"date-fns": "^4.1.0",
"dotenv": "^17.2.3",
"drizzle-orm": "^0.44.5",

View file

@ -113,6 +113,9 @@ importers:
clsx:
specifier: ^2.1.1
version: 2.1.1
cmdk:
specifier: ^1.1.1
version: 1.1.1(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
date-fns:
specifier: ^4.1.0
version: 4.1.0
@ -2783,6 +2786,12 @@ packages:
react: ^18.0.0
react-dom: ^18.0.0
cmdk@1.1.1:
resolution: {integrity: sha512-Vsv7kFaXm+ptHDMZ7izaRsP70GgrW9NBNGswt9OZaVBLlE0SNpDq8eu/VGXyF9r7M0azK3Wy7OlYXsuyYLFzHg==}
peerDependencies:
react: ^18 || ^19 || ^19.0.0-rc
react-dom: ^18 || ^19 || ^19.0.0-rc
codemirror@6.0.2:
resolution: {integrity: sha512-VhydHotNW5w1UGK0Qj96BwSk/Zqbp9WbnyK2W/eVMv4QyF41INRGpjUhFJY7/uDNuudSc33a/PKr4iDqRduvHw==}
@ -8493,6 +8502,18 @@ snapshots:
transitivePeerDependencies:
- '@types/react'
cmdk@1.1.1(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0):
dependencies:
'@radix-ui/react-compose-refs': 1.1.2(@types/react@19.1.8)(react@19.1.0)
'@radix-ui/react-dialog': 1.1.14(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
'@radix-ui/react-id': 1.1.1(@types/react@19.1.8)(react@19.1.0)
'@radix-ui/react-primitive': 2.1.3(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
react: 19.1.0
react-dom: 19.1.0(react@19.1.0)
transitivePeerDependencies:
- '@types/react'
- '@types/react-dom'
codemirror@6.0.2:
dependencies:
'@codemirror/autocomplete': 6.18.6