Merge pull request #582 from MODSetter/dev

feat: shifted use-llm-configs hook to jotai and tanstack
This commit is contained in:
Rohan Verma 2025-12-14 22:37:24 -08:00 committed by GitHub
commit 384ddfda71
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 803 additions and 468 deletions

View file

@ -6,8 +6,9 @@ import { AnimatePresence, motion } from "motion/react";
import { useParams, usePathname, useRouter } from "next/navigation";
import { useTranslations } from "next-intl";
import type React from "react";
import { useEffect, useMemo, useState } from "react";
import { useCallback, useEffect, useMemo, useState } from "react";
import { activeChathatUIAtom, activeChatIdAtom } from "@/atoms/chats/ui.atoms";
import { llmPreferencesAtom } from "@/atoms/llm-config/llm-config-query.atoms";
import { activeSearchSpaceIdAtom } from "@/atoms/seach-spaces/seach-space-queries.atom";
import { ChatPanelContainer } from "@/components/chat/ChatPanel/ChatPanelContainer";
import { DashboardBreadcrumb } from "@/components/dashboard-breadcrumb";
@ -17,7 +18,6 @@ import { ThemeTogglerComponent } from "@/components/theme/theme-toggle";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card";
import { Separator } from "@/components/ui/separator";
import { SidebarInset, SidebarProvider, SidebarTrigger } from "@/components/ui/sidebar";
import { useLLMPreferences } from "@/hooks/use-llm-configs";
import { useUserAccess } from "@/hooks/use-rbac";
import { cn } from "@/lib/utils";
@ -60,7 +60,16 @@ export function DashboardClientLayout({
}
}, [activeChatId, isChatPannelOpen]);
const { loading, error, isOnboardingComplete } = useLLMPreferences(searchSpaceIdNum);
const { data: preferences = {}, isFetching: loading, error } = useAtomValue(llmPreferencesAtom);
const isOnboardingComplete = useCallback(() => {
return !!(
preferences.long_context_llm_id &&
preferences.fast_llm_id &&
preferences.strategic_llm_id
);
}, [preferences]);
const { access, loading: accessLoading } = useUserAccess(searchSpaceIdNum);
const [hasCheckedOnboarding, setHasCheckedOnboarding] = useState(false);
@ -182,7 +191,9 @@ export function DashboardClientLayout({
<CardDescription>{t("failed_load_llm_config")}</CardDescription>
</CardHeader>
<CardContent>
<p className="text-sm text-muted-foreground">{error}</p>
<p className="text-sm text-muted-foreground">
{error instanceof Error ? error.message : String(error)}
</p>
</CardContent>
</Card>
</div>

View file

@ -1,18 +1,24 @@
"use client";
import { useAtomValue } from "jotai";
import { FileText, MessageSquare, UserPlus, Users } from "lucide-react";
import { motion } from "motion/react";
import { useParams, useRouter } from "next/navigation";
import { useTranslations } from "next-intl";
import { useCallback, useEffect, useRef, useState } from "react";
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { toast } from "sonner";
import { updateLLMPreferencesMutationAtom } from "@/atoms/llm-config/llm-config-mutation.atoms";
import {
globalLLMConfigsAtom,
llmConfigsAtom,
llmPreferencesAtom,
} from "@/atoms/llm-config/llm-config-query.atoms";
import { OnboardActionCard } from "@/components/onboard/onboard-action-card";
import { OnboardAdvancedSettings } from "@/components/onboard/onboard-advanced-settings";
import { OnboardHeader } from "@/components/onboard/onboard-header";
import { OnboardLLMSetup } from "@/components/onboard/onboard-llm-setup";
import { OnboardLoading } from "@/components/onboard/onboard-loading";
import { OnboardStats } from "@/components/onboard/onboard-stats";
import { useGlobalLLMConfigs, useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
import { getBearerToken, redirectToLogin } from "@/lib/auth-utils";
const OnboardPage = () => {
@ -21,21 +27,38 @@ const OnboardPage = () => {
const params = useParams();
const searchSpaceId = Number(params.search_space_id);
const { llmConfigs, loading: configsLoading, refreshConfigs } = useLLMConfigs(searchSpaceId);
const { globalConfigs, loading: globalConfigsLoading } = useGlobalLLMConfigs();
const {
preferences,
loading: preferencesLoading,
isOnboardingComplete,
updatePreferences,
refreshPreferences,
} = useLLMPreferences(searchSpaceId);
data: llmConfigs = [],
isFetching: configsLoading,
refetch: refreshConfigs,
} = useAtomValue(llmConfigsAtom);
const { data: globalConfigs = [], isFetching: globalConfigsLoading } =
useAtomValue(globalLLMConfigsAtom);
const {
data: preferences = {},
isFetching: preferencesLoading,
refetch: refreshPreferences,
} = useAtomValue(llmPreferencesAtom);
const { mutateAsync: updatePreferences } = useAtomValue(updateLLMPreferencesMutationAtom);
// Compute isOnboardingComplete
const isOnboardingComplete = useMemo(() => {
return !!(
preferences.long_context_llm_id &&
preferences.fast_llm_id &&
preferences.strategic_llm_id
);
}, [preferences]);
const [isAutoConfiguring, setIsAutoConfiguring] = useState(false);
const [autoConfigComplete, setAutoConfigComplete] = useState(false);
const [showAdvancedSettings, setShowAdvancedSettings] = useState(false);
const [showPromptSettings, setShowPromptSettings] = useState(false);
const handleRefreshPreferences = useCallback(async () => {
await refreshPreferences();
}, []);
// Track if we've already attempted auto-configuration
const hasAttemptedAutoConfig = useRef(false);
@ -61,7 +84,7 @@ const OnboardPage = () => {
!configsLoading &&
!globalConfigsLoading
) {
wasCompleteOnMount.current = isOnboardingComplete();
wasCompleteOnMount.current = isOnboardingComplete;
hasCheckedInitialState.current = true;
}
}, [preferencesLoading, configsLoading, globalConfigsLoading, isOnboardingComplete]);
@ -85,7 +108,7 @@ const OnboardPage = () => {
const autoConfigureLLMs = useCallback(async () => {
if (hasAttemptedAutoConfig.current) return;
if (globalConfigs.length === 0) return;
if (isOnboardingComplete()) {
if (isOnboardingComplete) {
setAutoConfigComplete(true);
return;
}
@ -110,15 +133,15 @@ const OnboardPage = () => {
strategic_llm_id: defaultConfigId,
};
const success = await updatePreferences(newPreferences);
if (success) {
await refreshPreferences();
setAutoConfigComplete(true);
toast.success("AI models configured automatically!", {
description: "You can customize these in advanced settings.",
});
}
await updatePreferences({
search_space_id: searchSpaceId,
data: newPreferences,
});
await refreshPreferences();
setAutoConfigComplete(true);
toast.success("AI models configured automatically!", {
description: "You can customize these in advanced settings.",
});
} catch (error) {
console.error("Auto-configuration failed:", error);
} finally {
@ -134,7 +157,7 @@ const OnboardPage = () => {
}, [configsLoading, globalConfigsLoading, preferencesLoading, autoConfigureLLMs]);
const allConfigs = [...globalConfigs, ...llmConfigs];
const isReady = autoConfigComplete || isOnboardingComplete();
const isReady = autoConfigComplete || isOnboardingComplete;
// Loading state
if (configsLoading || preferencesLoading || globalConfigsLoading || isAutoConfiguring) {
@ -152,7 +175,7 @@ const OnboardPage = () => {
// Show LLM setup if no configs available OR if roles are not assigned yet
// This forces users to complete role assignment before seeing the final screen
if (allConfigs.length === 0 || !isOnboardingComplete()) {
if (allConfigs.length === 0 || !isOnboardingComplete) {
return (
<OnboardLLMSetup
searchSpaceId={searchSpaceId}
@ -165,9 +188,9 @@ const OnboardPage = () => {
? t("configure_providers_and_assign_roles")
: t("complete_role_assignment")
}
onConfigCreated={refreshConfigs}
onConfigDeleted={refreshConfigs}
onPreferencesUpdated={refreshPreferences}
onConfigCreated={() => refreshConfigs()}
onConfigDeleted={() => refreshConfigs()}
onPreferencesUpdated={handleRefreshPreferences}
/>
);
}
@ -257,9 +280,9 @@ const OnboardPage = () => {
setShowLLMSettings={setShowAdvancedSettings}
showPromptSettings={showPromptSettings}
setShowPromptSettings={setShowPromptSettings}
onConfigCreated={refreshConfigs}
onConfigDeleted={refreshConfigs}
onPreferencesUpdated={refreshPreferences}
onConfigCreated={() => refreshConfigs()}
onConfigDeleted={() => refreshConfigs()}
onPreferencesUpdated={handleRefreshPreferences}
/>
{/* Footer */}

View file

@ -0,0 +1,110 @@
import { atomWithMutation } from "jotai-tanstack-query";
import { toast } from "sonner";
import { activeSearchSpaceIdAtom } from "@/atoms/seach-spaces/seach-space-queries.atom";
import type {
CreateLLMConfigRequest,
DeleteLLMConfigRequest,
GetLLMConfigsResponse,
UpdateLLMConfigRequest,
UpdateLLMConfigResponse,
UpdateLLMPreferencesRequest,
} from "@/contracts/types/llm-config.types";
import { llmConfigApiService } from "@/lib/apis/llm-config-api.service";
import { cacheKeys } from "@/lib/query-client/cache-keys";
import { queryClient } from "@/lib/query-client/client";
export const createLLMConfigMutationAtom = atomWithMutation((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
mutationKey: cacheKeys.llmConfigs.all(searchSpaceId!),
enabled: !!searchSpaceId,
mutationFn: async (request: CreateLLMConfigRequest) => {
return llmConfigApiService.createLLMConfig(request);
},
onSuccess: () => {
toast.success("LLM configuration created successfully");
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.all(searchSpaceId!),
});
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.global(),
});
},
};
});
export const updateLLMConfigMutationAtom = atomWithMutation((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
mutationKey: cacheKeys.llmConfigs.all(searchSpaceId!),
enabled: !!searchSpaceId,
mutationFn: async (request: UpdateLLMConfigRequest) => {
return llmConfigApiService.updateLLMConfig(request);
},
onSuccess: (_: UpdateLLMConfigResponse, request: UpdateLLMConfigRequest) => {
toast.success("LLM configuration updated successfully");
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.all(searchSpaceId!),
});
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.byId(String(request.id)),
});
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.global(),
});
},
};
});
export const deleteLLMConfigMutationAtom = atomWithMutation((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
const authToken = localStorage.getItem("surfsense_bearer_token");
return {
mutationKey: cacheKeys.llmConfigs.all(searchSpaceId!),
enabled: !!searchSpaceId && !!authToken,
mutationFn: async (request: DeleteLLMConfigRequest) => {
return llmConfigApiService.deleteLLMConfig(request);
},
onSuccess: (_, request: DeleteLLMConfigRequest) => {
toast.success("LLM configuration deleted successfully");
queryClient.setQueryData(
cacheKeys.llmConfigs.all(searchSpaceId!),
(oldData: GetLLMConfigsResponse | undefined) => {
if (!oldData) return oldData;
return oldData.filter((config) => config.id !== request.id);
}
);
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.byId(String(request.id)),
});
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.global(),
});
},
};
});
export const updateLLMPreferencesMutationAtom = atomWithMutation((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
mutationKey: cacheKeys.llmConfigs.preferences(searchSpaceId!),
enabled: !!searchSpaceId,
mutationFn: async (request: UpdateLLMPreferencesRequest) => {
return llmConfigApiService.updateLLMPreferences(request);
},
onSuccess: () => {
toast.success("LLM preferences updated successfully");
queryClient.invalidateQueries({
queryKey: cacheKeys.llmConfigs.preferences(searchSpaceId!),
});
},
};
});

View file

@ -0,0 +1,46 @@
import { atomWithQuery } from "jotai-tanstack-query";
import { activeSearchSpaceIdAtom } from "@/atoms/seach-spaces/seach-space-queries.atom";
import { llmConfigApiService } from "@/lib/apis/llm-config-api.service";
import { cacheKeys } from "@/lib/query-client/cache-keys";
export const llmConfigsAtom = atomWithQuery((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
queryKey: cacheKeys.llmConfigs.all(searchSpaceId!),
enabled: !!searchSpaceId,
staleTime: 5 * 60 * 1000, // 5 minutes
queryFn: async () => {
return llmConfigApiService.getLLMConfigs({
queryParams: {
search_space_id: searchSpaceId!,
},
});
},
};
});
export const globalLLMConfigsAtom = atomWithQuery(() => {
return {
queryKey: cacheKeys.llmConfigs.global(),
staleTime: 10 * 60 * 1000, // 10 minutes
queryFn: async () => {
return llmConfigApiService.getGlobalLLMConfigs();
},
};
});
export const llmPreferencesAtom = atomWithQuery((get) => {
const searchSpaceId = get(activeSearchSpaceIdAtom);
return {
queryKey: cacheKeys.llmConfigs.preferences(String(searchSpaceId)),
enabled: !!searchSpaceId,
staleTime: 5 * 60 * 1000, // 5 minutes
queryFn: async () => {
return llmConfigApiService.getLLMPreferences({
search_space_id: Number(searchSpaceId),
});
},
};
});

View file

@ -1,11 +1,17 @@
"use client";
import { ChatInput } from "@llamaindex/chat-ui";
import { useAtom } from "jotai";
import { useAtom, useAtomValue } from "jotai";
import { Brain, Check, FolderOpen, Minus, Plus, PlusCircle, Zap } from "lucide-react";
import { useParams, useRouter } from "next/navigation";
import React, { Suspense, useCallback, useMemo, useState } from "react";
import { documentTypeCountsAtom } from "@/atoms/documents/document-query.atoms";
import { updateLLMPreferencesMutationAtom } from "@/atoms/llm-config/llm-config-mutation.atoms";
import {
globalLLMConfigsAtom,
llmConfigsAtom,
llmPreferencesAtom,
} from "@/atoms/llm-config/llm-config-query.atoms";
import { DocumentsDataTable } from "@/components/chat/DocumentsDataTable";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
@ -28,7 +34,6 @@ import {
import { Tooltip, TooltipContent, TooltipProvider, TooltipTrigger } from "@/components/ui/tooltip";
import { getConnectorIcon } from "@/contracts/enums/connectorIcons";
import type { Document } from "@/contracts/types/document.types";
import { useGlobalLLMConfigs, useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
import { useSearchSourceConnectors } from "@/hooks/use-search-source-connectors";
const DocumentSelector = React.memo(
@ -539,17 +544,21 @@ const LLMSelector = React.memo(() => {
const { search_space_id } = useParams();
const searchSpaceId = Number(search_space_id);
const { llmConfigs, loading: llmLoading, error } = useLLMConfigs(searchSpaceId);
const {
globalConfigs,
loading: globalConfigsLoading,
error: globalConfigsError,
} = useGlobalLLMConfigs();
data: llmConfigs = [],
isFetching: llmLoading,
isError: error,
} = useAtomValue(llmConfigsAtom);
const {
preferences,
updatePreferences,
loading: preferencesLoading,
} = useLLMPreferences(searchSpaceId);
data: globalConfigs = [],
isFetching: globalConfigsLoading,
isError: globalConfigsError,
} = useAtomValue(globalLLMConfigsAtom);
// Replace useLLMPreferences with jotai atoms
const { data: preferences = {}, isFetching: preferencesLoading } =
useAtomValue(llmPreferencesAtom);
const { mutateAsync: updatePreferences } = useAtomValue(updateLLMPreferencesMutationAtom);
const isLoading = llmLoading || preferencesLoading || globalConfigsLoading;
@ -574,7 +583,9 @@ const LLMSelector = React.memo(() => {
<span className="hidden sm:inline text-muted-foreground text-xs truncate max-w-[60px]">
{selectedConfig.name}
</span>
{selectedConfig.is_global && <span className="text-xs">🌐</span>}
{"is_global" in selectedConfig && selectedConfig.is_global && (
<span className="text-xs">🌐</span>
)}
</div>
);
}, [selectedConfig]);
@ -582,9 +593,12 @@ const LLMSelector = React.memo(() => {
const handleValueChange = React.useCallback(
(value: string) => {
const llmId = value ? parseInt(value, 10) : undefined;
updatePreferences({ fast_llm_id: llmId });
updatePreferences({
search_space_id: searchSpaceId,
data: { fast_llm_id: llmId },
});
},
[updatePreferences]
[updatePreferences, searchSpaceId]
);
// Loading skeleton

View file

@ -1,5 +1,6 @@
"use client";
import { useAtomValue } from "jotai";
import {
AlertCircle,
Bot,
@ -17,6 +18,16 @@ import { motion } from "motion/react";
import { useTranslations } from "next-intl";
import { useEffect, useState } from "react";
import { toast } from "sonner";
import {
createLLMConfigMutationAtom,
deleteLLMConfigMutationAtom,
updateLLMPreferencesMutationAtom,
} from "@/atoms/llm-config/llm-config-mutation.atoms";
import {
globalLLMConfigsAtom,
llmConfigsAtom,
llmPreferencesAtom,
} from "@/atoms/llm-config/llm-config-query.atoms";
import { Alert, AlertDescription } from "@/components/ui/alert";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
@ -43,14 +54,8 @@ import { Separator } from "@/components/ui/separator";
import { LANGUAGES } from "@/contracts/enums/languages";
import { getModelsByProvider } from "@/contracts/enums/llm-models";
import { LLM_PROVIDERS } from "@/contracts/enums/llm-providers";
import {
type CreateLLMConfig,
useGlobalLLMConfigs,
useLLMConfigs,
useLLMPreferences,
} from "@/hooks/use-llm-configs";
import { type CreateLLMConfigRequest, LLMConfig } from "@/contracts/types/llm-config.types";
import { cn } from "@/lib/utils";
import InferenceParamsEditor from "../inference-params-editor";
interface SetupLLMStepProps {
@ -96,15 +101,20 @@ export function SetupLLMStep({
onConfigDeleted,
onPreferencesUpdated,
}: SetupLLMStepProps) {
const { mutate: createLLMConfig, isPending: isCreatingLlmConfig } = useAtomValue(
createLLMConfigMutationAtom
);
const t = useTranslations("onboard");
const { llmConfigs, createLLMConfig, deleteLLMConfig } = useLLMConfigs(searchSpaceId);
const { globalConfigs } = useGlobalLLMConfigs();
const { preferences, updatePreferences } = useLLMPreferences(searchSpaceId);
const { mutateAsync: deleteLLMConfig } = useAtomValue(deleteLLMConfigMutationAtom);
const { data: llmConfigs = [] } = useAtomValue(llmConfigsAtom);
const { data: globalConfigs = [] } = useAtomValue(globalLLMConfigsAtom);
const { data: preferences = {} } = useAtomValue(llmPreferencesAtom);
const { mutateAsync: updatePreferences } = useAtomValue(updateLLMPreferencesMutationAtom);
const [isAddingNew, setIsAddingNew] = useState(false);
const [formData, setFormData] = useState<CreateLLMConfig>({
const [formData, setFormData] = useState<CreateLLMConfigRequest>({
name: "",
provider: "",
provider: "" as CreateLLMConfigRequest["provider"], // Allow it as Default
custom_provider: "",
model_name: "",
api_key: "",
@ -113,7 +123,6 @@ export function SetupLLMStep({
litellm_params: {},
search_space_id: searchSpaceId,
});
const [isSubmitting, setIsSubmitting] = useState(false);
const [modelComboboxOpen, setModelComboboxOpen] = useState(false);
const [showProviderForm, setShowProviderForm] = useState(false);
@ -135,7 +144,7 @@ export function SetupLLMStep({
});
}, [preferences]);
const handleInputChange = (field: keyof CreateLLMConfig, value: string) => {
const handleInputChange = (field: keyof CreateLLMConfigRequest, value: string) => {
setFormData((prev) => ({ ...prev, [field]: value }));
};
@ -146,25 +155,32 @@ export function SetupLLMStep({
return;
}
setIsSubmitting(true);
const result = await createLLMConfig(formData);
setIsSubmitting(false);
if (result) {
setFormData({
name: "",
provider: "",
custom_provider: "",
model_name: "",
api_key: "",
api_base: "",
language: "English",
litellm_params: {},
search_space_id: searchSpaceId,
});
setIsAddingNew(false);
onConfigCreated?.();
}
createLLMConfig(formData, {
onError: (error) => {
console.error("Error creating LLM config:", error);
if (error instanceof Error) {
toast.error(error?.message || "Failed to create LLM config");
}
},
onSuccess: () => {
toast.success("LLM config created successfully");
setFormData({
name: "",
provider: "" as CreateLLMConfigRequest["provider"],
custom_provider: "",
model_name: "",
api_key: "",
api_base: "",
language: "English",
litellm_params: {},
search_space_id: searchSpaceId,
});
onConfigCreated?.();
},
onSettled: () => {
setIsAddingNew(false);
},
});
};
const handleRoleAssignment = async (role: string, configId: string) => {
@ -197,9 +213,12 @@ export function SetupLLMStep({
: newAssignments.strategic_llm_id,
};
const success = await updatePreferences(numericAssignments);
await updatePreferences({
search_space_id: searchSpaceId,
data: numericAssignments,
});
if (success && onPreferencesUpdated) {
if (onPreferencesUpdated) {
await onPreferencesUpdated();
}
}
@ -322,9 +341,11 @@ export function SetupLLMStep({
variant="ghost"
size="sm"
onClick={async () => {
const success = await deleteLLMConfig(config.id);
if (success) {
try {
await deleteLLMConfig({ id: config.id });
onConfigDeleted?.();
} catch (error) {
console.error("Failed to delete config:", error);
}
}}
className="text-destructive hover:text-destructive"
@ -417,7 +438,7 @@ export function SetupLLMStep({
<Input
id="custom_provider"
placeholder={t("custom_provider_placeholder")}
value={formData.custom_provider}
value={formData.custom_provider ?? ""}
onChange={(e) => handleInputChange("custom_provider", e.target.value)}
required
/>
@ -543,7 +564,7 @@ export function SetupLLMStep({
<Input
id="api_base"
placeholder={selectedProvider?.apiBase || t("api_base_placeholder")}
value={formData.api_base}
value={formData.api_base ?? ""}
onChange={(e) => handleInputChange("api_base", e.target.value)}
/>
{/* Ollama-specific help */}
@ -590,15 +611,15 @@ export function SetupLLMStep({
</div>
<div className="flex gap-2 pt-2">
<Button type="submit" disabled={isSubmitting} size="sm">
{isSubmitting ? t("adding") : t("add_provider")}
<Button type="submit" disabled={isCreatingLlmConfig} size="sm">
{isCreatingLlmConfig ? t("adding") : t("add_provider")}
</Button>
<Button
type="button"
variant="outline"
size="sm"
onClick={() => setIsAddingNew(false)}
disabled={isSubmitting}
disabled={isCreatingLlmConfig}
>
{t("cancel")}
</Button>
@ -730,7 +751,7 @@ export function SetupLLMStep({
<div className="flex items-center gap-2 text-sm">
<Bot className="w-4 h-4" />
<span className="font-medium">{t("assigned")}:</span>
{assignedConfig.is_global && (
{"is_global" in assignedConfig && assignedConfig.is_global && (
<Badge variant="secondary" className="text-xs">
🌐 Global
</Badge>

View file

@ -1,5 +1,6 @@
"use client";
import { useAtomValue } from "jotai";
import {
AlertCircle,
Bot,
@ -15,6 +16,12 @@ import {
import { motion } from "motion/react";
import { useEffect, useState } from "react";
import { toast } from "sonner";
import { updateLLMPreferencesMutationAtom } from "@/atoms/llm-config/llm-config-mutation.atoms";
import {
globalLLMConfigsAtom,
llmConfigsAtom,
llmPreferencesAtom,
} from "@/atoms/llm-config/llm-config-query.atoms";
import { Alert, AlertDescription } from "@/components/ui/alert";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
@ -27,7 +34,6 @@ import {
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
import { useGlobalLLMConfigs, useLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
const ROLE_DESCRIPTIONS = {
long_context: {
@ -62,24 +68,25 @@ interface LLMRoleManagerProps {
export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
const {
llmConfigs,
loading: configsLoading,
data: llmConfigs = [],
isFetching: configsLoading,
error: configsError,
refreshConfigs,
} = useLLMConfigs(searchSpaceId);
refetch: refreshConfigs,
} = useAtomValue(llmConfigsAtom);
const {
globalConfigs,
loading: globalConfigsLoading,
data: globalConfigs = [],
isFetching: globalConfigsLoading,
error: globalConfigsError,
refreshGlobalConfigs,
} = useGlobalLLMConfigs();
refetch: refreshGlobalConfigs,
} = useAtomValue(globalLLMConfigsAtom);
const {
preferences,
loading: preferencesLoading,
data: preferences = {},
isFetching: preferencesLoading,
error: preferencesError,
updatePreferences,
refreshPreferences,
} = useLLMPreferences(searchSpaceId);
refetch: refreshPreferences,
} = useAtomValue(llmPreferencesAtom);
const { mutateAsync: updatePreferences } = useAtomValue(updateLLMPreferencesMutationAtom);
const [assignments, setAssignments] = useState({
long_context_llm_id: preferences.long_context_llm_id || "",
@ -148,12 +155,13 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
: assignments.strategic_llm_id,
};
const success = await updatePreferences(numericAssignments);
await updatePreferences({
search_space_id: searchSpaceId,
data: numericAssignments,
});
if (success) {
setHasChanges(false);
toast.success("LLM role assignments saved successfully!");
}
setHasChanges(false);
toast.success("LLM role assignments saved successfully!");
setIsSaving(false);
};
@ -203,7 +211,7 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
<Button
variant="outline"
size="sm"
onClick={refreshConfigs}
onClick={() => refreshConfigs()}
disabled={isLoading}
className="flex items-center gap-2"
>
@ -214,7 +222,7 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
<Button
variant="outline"
size="sm"
onClick={refreshPreferences}
onClick={() => refreshPreferences()}
disabled={isLoading}
className="flex items-center gap-2"
>
@ -230,7 +238,9 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
<Alert variant="destructive">
<AlertCircle className="h-4 w-4" />
<AlertDescription>
{configsError || preferencesError || globalConfigsError}
{(configsError?.message ?? "Failed to load LLM configurations") ||
(preferencesError?.message ?? "Failed to load preferences") ||
(globalConfigsError?.message ?? "Failed to load global configurations")}
</AlertDescription>
</Alert>
)}
@ -484,7 +494,7 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
<span className="font-medium">Assigned:</span>
<Badge variant="secondary">{assignedConfig.provider}</Badge>
<span>{assignedConfig.name}</span>
{assignedConfig.is_global && (
{"is_global" in assignedConfig && assignedConfig.is_global && (
<Badge variant="outline" className="text-xs">
🌐 Global
</Badge>

View file

@ -1,5 +1,6 @@
"use client";
import { useAtomValue } from "jotai";
import {
AlertCircle,
Bot,
@ -17,6 +18,12 @@ import {
import { AnimatePresence, motion } from "motion/react";
import { useEffect, useState } from "react";
import { toast } from "sonner";
import {
createLLMConfigMutationAtom,
deleteLLMConfigMutationAtom,
updateLLMConfigMutationAtom,
} from "@/atoms/llm-config/llm-config-mutation.atoms";
import { globalLLMConfigsAtom, llmConfigsAtom } from "@/atoms/llm-config/llm-config-query.atoms";
import { Alert, AlertDescription } from "@/components/ui/alert";
import {
AlertDialog,
@ -59,12 +66,12 @@ import {
import { LANGUAGES } from "@/contracts/enums/languages";
import { getModelsByProvider } from "@/contracts/enums/llm-models";
import { LLM_PROVIDERS } from "@/contracts/enums/llm-providers";
import {
type CreateLLMConfig,
type LLMConfig,
useGlobalLLMConfigs,
useLLMConfigs,
} from "@/hooks/use-llm-configs";
import type {
CreateLLMConfigRequest,
CreateLLMConfigResponse,
LLMConfig,
UpdateLLMConfigResponse,
} from "@/contracts/types/llm-config.types";
import { cn } from "@/lib/utils";
import InferenceParamsEditor from "../inference-params-editor";
@ -74,20 +81,32 @@ interface ModelConfigManagerProps {
export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
const {
llmConfigs,
loading,
error,
createLLMConfig,
updateLLMConfig,
deleteLLMConfig,
refreshConfigs,
} = useLLMConfigs(searchSpaceId);
const { globalConfigs } = useGlobalLLMConfigs();
mutateAsync: createLLMConfig,
isPending: isCreatingLLMConfig,
error: createLLMConfigError,
} = useAtomValue(createLLMConfigMutationAtom);
const {
mutateAsync: updateLLMConfig,
isPending: isUpdatingLLMConfig,
error: updateLLMConfigError,
} = useAtomValue(updateLLMConfigMutationAtom);
const {
mutateAsync: deleteLLMConfig,
isPending: isDeletingLLMConfig,
error: deleteLLMConfigError,
} = useAtomValue(deleteLLMConfigMutationAtom);
const {
data: llmConfigs,
isFetching: isFetchingLLMConfigs,
error: LLMConfigsFetchError,
refetch: refreshConfigs,
} = useAtomValue(llmConfigsAtom);
const { data: globalConfigs = [] } = useAtomValue(globalLLMConfigsAtom);
const [isAddingNew, setIsAddingNew] = useState(false);
const [editingConfig, setEditingConfig] = useState<LLMConfig | null>(null);
const [formData, setFormData] = useState<CreateLLMConfig>({
const [formData, setFormData] = useState<CreateLLMConfigRequest>({
name: "",
provider: "",
provider: "" as CreateLLMConfigRequest["provider"], // Allow it as Default,
custom_provider: "",
model_name: "",
api_key: "",
@ -96,7 +115,14 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
litellm_params: {},
search_space_id: searchSpaceId,
});
const [isSubmitting, setIsSubmitting] = useState(false);
const isSubmitting = isCreatingLLMConfig || isUpdatingLLMConfig;
const errors = [
createLLMConfigError,
updateLLMConfigError,
deleteLLMConfigError,
LLMConfigsFetchError,
] as Error[];
const isError = Boolean(errors.filter(Boolean).length);
const [modelComboboxOpen, setModelComboboxOpen] = useState(false);
const [configToDelete, setConfigToDelete] = useState<LLMConfig | null>(null);
const [isDeleting, setIsDeleting] = useState(false);
@ -118,12 +144,12 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
}
}, [editingConfig, searchSpaceId]);
const handleInputChange = (field: keyof CreateLLMConfig, value: string) => {
const handleInputChange = (field: keyof CreateLLMConfigRequest, value: string) => {
setFormData((prev) => ({ ...prev, [field]: value }));
};
// Handle provider change with auto-fill API Base URL and reset model / 处理 Provider 变更并自动填充 API Base URL 并重置模型
const handleProviderChange = (providerValue: string) => {
const handleProviderChange = (providerValue: CreateLLMConfigRequest["provider"]) => {
const provider = LLM_PROVIDERS.find((p) => p.value === providerValue);
setFormData((prev) => ({
...prev,
@ -141,23 +167,19 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
return;
}
setIsSubmitting(true);
let result: LLMConfig | null = null;
let result: CreateLLMConfigResponse | UpdateLLMConfigResponse | null = null;
if (editingConfig) {
// Update existing config
result = await updateLLMConfig(editingConfig.id, formData);
result = await updateLLMConfig({ id: editingConfig.id, data: formData });
} else {
// Create new config
result = await createLLMConfig(formData);
}
setIsSubmitting(false);
if (result) {
setFormData({
name: "",
provider: "",
provider: "" as CreateLLMConfigRequest["provider"],
custom_provider: "",
model_name: "",
api_key: "",
@ -177,14 +199,11 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
const handleConfirmDelete = async () => {
if (!configToDelete) return;
setIsDeleting(true);
try {
await deleteLLMConfig(configToDelete.id);
toast.success("Configuration deleted successfully");
await deleteLLMConfig({ id: configToDelete.id });
} catch (error) {
toast.error("Failed to delete configuration");
} finally {
setIsDeleting(false);
setConfigToDelete(null);
}
};
@ -217,26 +236,29 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
<Button
variant="outline"
size="sm"
onClick={refreshConfigs}
disabled={loading}
onClick={() => refreshConfigs()}
disabled={isFetchingLLMConfigs}
className="flex items-center gap-2"
>
<RefreshCw className={`h-4 w-4 ${loading ? "animate-spin" : ""}`} />
<RefreshCw className={`h-4 w-4 ${isFetchingLLMConfigs ? "animate-spin" : ""}`} />
Refresh
</Button>
</div>
</div>
{/* Error Alert */}
{error && (
<Alert variant="destructive">
<AlertCircle className="h-4 w-4" />
<AlertDescription>{error}</AlertDescription>
</Alert>
)}
{isError &&
errors.filter(Boolean).map((err, i) => {
return (
<Alert key={`err.message-${i}`} variant="destructive">
<AlertCircle className="h-4 w-4" />
<AlertDescription>{err?.message ?? "Something went wrong"}</AlertDescription>
</Alert>
);
})}
{/* Global Configs Info Alert */}
{!loading && !error && globalConfigs.length > 0 && (
{!isFetchingLLMConfigs && !isError && globalConfigs.length > 0 && (
<Alert>
<CheckCircle className="h-4 w-4" />
<AlertDescription>
@ -250,7 +272,7 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
)}
{/* Loading State */}
{loading && (
{isFetchingLLMConfigs && (
<Card>
<CardContent className="flex items-center justify-center py-12">
<div className="flex items-center gap-2 text-muted-foreground">
@ -262,14 +284,14 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
)}
{/* Stats Overview */}
{!loading && !error && (
{!isFetchingLLMConfigs && !isError && (
<div className="grid gap-3 grid-cols-3">
<Card className="overflow-hidden">
<div className="h-1 bg-blue-500" />
<CardContent className="p-4">
<div className="flex items-start justify-between gap-2">
<div className="space-y-1 min-w-0">
<p className="text-2xl font-bold tracking-tight">{llmConfigs.length}</p>
<p className="text-2xl font-bold tracking-tight">{llmConfigs?.length}</p>
<p className="text-xs font-medium text-muted-foreground">Total Configs</p>
</div>
<div className="flex h-9 w-9 shrink-0 items-center justify-center rounded-lg bg-blue-500/10">
@ -285,7 +307,7 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
<div className="flex items-start justify-between gap-2">
<div className="space-y-1 min-w-0">
<p className="text-2xl font-bold tracking-tight">
{new Set(llmConfigs.map((c) => c.provider)).size}
{new Set(llmConfigs?.map((c) => c.provider)).size}
</p>
<p className="text-xs font-medium text-muted-foreground">Providers</p>
</div>
@ -314,7 +336,7 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
)}
{/* Configuration Management */}
{!loading && !error && (
{!isFetchingLLMConfigs && !isError && (
<div className="space-y-6">
<div className="flex flex-col space-y-4 sm:flex-row sm:items-center sm:justify-between sm:space-y-0">
<div>
@ -329,7 +351,7 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
</Button>
</div>
{llmConfigs.length === 0 ? (
{llmConfigs?.length === 0 ? (
<Card className="border-dashed border-2 border-muted-foreground/25">
<CardContent className="flex flex-col items-center justify-center py-16 text-center">
<div className="rounded-full bg-muted p-4 mb-6">
@ -350,7 +372,7 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
) : (
<div className="grid gap-4">
<AnimatePresence>
{llmConfigs.map((config) => {
{llmConfigs?.map((config) => {
const providerInfo = getProviderInfo(config.provider);
return (
<motion.div
@ -466,7 +488,7 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
setEditingConfig(null);
setFormData({
name: "",
provider: "",
provider: "" as LLMConfig["provider"],
custom_provider: "",
model_name: "",
api_key: "",
@ -538,7 +560,7 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
<Input
id="custom_provider"
placeholder="e.g., my-custom-provider"
value={formData.custom_provider}
value={formData.custom_provider ?? ""}
onChange={(e) => handleInputChange("custom_provider", e.target.value)}
required
/>
@ -683,7 +705,7 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
<Input
id="api_base"
placeholder={selectedProvider?.apiBase || "e.g., https://api.openai.com/v1"}
value={formData.api_base}
value={formData.api_base ?? ""}
onChange={(e) => handleInputChange("api_base", e.target.value)}
/>
{selectedProvider?.apiBase && formData.api_base === selectedProvider.apiBase && (
@ -765,7 +787,7 @@ export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
setEditingConfig(null);
setFormData({
name: "",
provider: "",
provider: "" as LLMConfig["provider"],
custom_provider: "",
model_name: "",
api_key: "",

View file

@ -0,0 +1,193 @@
import { z } from "zod";
import { paginationQueryParams } from ".";
export const liteLLMProviderEnum = z.enum([
"OPENAI",
"ANTHROPIC",
"GOOGLE",
"AZURE_OPENAI",
"BEDROCK",
"VERTEX_AI",
"GROQ",
"COHERE",
"MISTRAL",
"DEEPSEEK",
"XAI",
"OPENROUTER",
"TOGETHER_AI",
"FIREWORKS_AI",
"REPLICATE",
"PERPLEXITY",
"OLLAMA",
"ALIBABA_QWEN",
"MOONSHOT",
"ZHIPU",
"ANYSCALE",
"DEEPINFRA",
"CEREBRAS",
"SAMBANOVA",
"AI21",
"CLOUDFLARE",
"DATABRICKS",
"COMETAPI",
"HUGGINGFACE",
"CUSTOM",
]);
export const llmConfig = z.object({
id: z.number(),
name: z.string().max(100),
provider: liteLLMProviderEnum,
custom_provider: z.string().nullable().optional(),
model_name: z.string().max(100),
api_key: z.string(),
api_base: z.string().nullable().optional(),
language: z.string().max(50).nullable(),
litellm_params: z.record(z.string(), z.any()).nullable().optional(),
search_space_id: z.number(),
created_at: z.string().nullable(),
updated_at: z.string().nullable().optional(),
});
export const globalLLMConfig = llmConfig
.pick({
id: true,
name: true,
custom_provider: true,
model_name: true,
api_base: true,
language: true,
litellm_params: true,
})
.extend({
provider: z.string(),
is_global: z.literal(true),
});
/**
* Get global LLM configs
*/
export const getGlobalLLMConfigsResponse = z.array(globalLLMConfig);
/**
* Create LLM config
*/
export const createLLMConfigRequest = llmConfig.pick({
name: true,
provider: true,
custom_provider: true,
model_name: true,
api_key: true,
api_base: true,
language: true,
litellm_params: true,
search_space_id: true,
});
export const createLLMConfigResponse = llmConfig;
/**
* Get LLM configs
*/
export const getLLMConfigsRequest = z.object({
queryParams: paginationQueryParams
.pick({ skip: true, limit: true })
.extend({
search_space_id: z.number().or(z.string()),
})
.nullish(),
});
export const getLLMConfigsResponse = z.array(llmConfig);
/**
* Get LLM config by ID
*/
export const getLLMConfigRequest = llmConfig.pick({ id: true });
export const getLLMConfigResponse = llmConfig;
/**
* Update LLM config
*/
export const updateLLMConfigRequest = z.object({
id: z.number(),
data: llmConfig
.pick({
name: true,
provider: true,
custom_provider: true,
model_name: true,
api_key: true,
api_base: true,
language: true,
litellm_params: true,
})
.partial(),
});
export const updateLLMConfigResponse = llmConfig;
/**
* Delete LLM config
*/
export const deleteLLMConfigRequest = llmConfig.pick({ id: true });
export const deleteLLMConfigResponse = z.object({
message: z.literal("LLM configuration deleted successfully"),
});
/**
* LLM Preferences schemas
*/
export const llmPreferences = z.object({
long_context_llm_id: z.number().nullable().optional(),
fast_llm_id: z.number().nullable().optional(),
strategic_llm_id: z.number().nullable().optional(),
long_context_llm: llmConfig.nullable().optional(),
fast_llm: llmConfig.nullable().optional(),
strategic_llm: llmConfig.nullable().optional(),
});
/**
* Get LLM preferences
*/
export const getLLMPreferencesRequest = z.object({
search_space_id: z.number(),
});
export const getLLMPreferencesResponse = llmPreferences;
/**
* Update LLM preferences
*/
export const updateLLMPreferencesRequest = z.object({
search_space_id: z.number(),
data: llmPreferences.pick({
long_context_llm_id: true,
fast_llm_id: true,
strategic_llm_id: true,
}),
});
export const updateLLMPreferencesResponse = llmPreferences;
export type LLMConfig = z.infer<typeof llmConfig>;
export type LiteLLMProvider = z.infer<typeof liteLLMProviderEnum>;
export type GlobalLLMConfig = z.infer<typeof globalLLMConfig>;
export type GetGlobalLLMConfigsResponse = z.infer<typeof getGlobalLLMConfigsResponse>;
export type CreateLLMConfigRequest = z.infer<typeof createLLMConfigRequest>;
export type CreateLLMConfigResponse = z.infer<typeof createLLMConfigResponse>;
export type GetLLMConfigsRequest = z.infer<typeof getLLMConfigsRequest>;
export type GetLLMConfigsResponse = z.infer<typeof getLLMConfigsResponse>;
export type GetLLMConfigRequest = z.infer<typeof getLLMConfigRequest>;
export type GetLLMConfigResponse = z.infer<typeof getLLMConfigResponse>;
export type UpdateLLMConfigRequest = z.infer<typeof updateLLMConfigRequest>;
export type UpdateLLMConfigResponse = z.infer<typeof updateLLMConfigResponse>;
export type DeleteLLMConfigRequest = z.infer<typeof deleteLLMConfigRequest>;
export type DeleteLLMConfigResponse = z.infer<typeof deleteLLMConfigResponse>;
export type LLMPreferences = z.infer<typeof llmPreferences>;
export type GetLLMPreferencesRequest = z.infer<typeof getLLMPreferencesRequest>;
export type GetLLMPreferencesResponse = z.infer<typeof getLLMPreferencesResponse>;
export type UpdateLLMPreferencesRequest = z.infer<typeof updateLLMPreferencesRequest>;
export type UpdateLLMPreferencesResponse = z.infer<typeof updateLLMPreferencesResponse>;

View file

@ -1,303 +0,0 @@
"use client";
import { useEffect, useState } from "react";
import { toast } from "sonner";
import { authenticatedFetch } from "@/lib/auth-utils";
export interface LLMConfig {
id: number;
name: string;
provider: string;
custom_provider?: string;
model_name: string;
api_key: string;
api_base?: string;
language?: string;
litellm_params?: Record<string, any>;
created_at?: string;
search_space_id?: number;
is_global?: boolean;
}
export interface LLMPreferences {
long_context_llm_id?: number;
fast_llm_id?: number;
strategic_llm_id?: number;
long_context_llm?: LLMConfig;
fast_llm?: LLMConfig;
strategic_llm?: LLMConfig;
}
export interface CreateLLMConfig {
name: string;
provider: string;
custom_provider?: string;
model_name: string;
api_key: string;
api_base?: string;
language?: string;
litellm_params?: Record<string, any>;
search_space_id: number;
}
export interface UpdateLLMConfig {
name?: string;
provider?: string;
custom_provider?: string;
model_name?: string;
api_key?: string;
api_base?: string;
litellm_params?: Record<string, any>;
}
export function useLLMConfigs(searchSpaceId: number | null) {
const [llmConfigs, setLlmConfigs] = useState<LLMConfig[]>([]);
const [loading, setLoading] = useState(true);
const [error, setError] = useState<string | null>(null);
const fetchLLMConfigs = async () => {
if (!searchSpaceId) {
setLoading(false);
return;
}
try {
setLoading(true);
const response = await authenticatedFetch(
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/llm-configs?search_space_id=${searchSpaceId}`,
{ method: "GET" }
);
if (!response.ok) {
throw new Error("Failed to fetch LLM configurations");
}
const data = await response.json();
setLlmConfigs(data);
setError(null);
} catch (err: any) {
setError(err.message || "Failed to fetch LLM configurations");
console.error("Error fetching LLM configurations:", err);
} finally {
setLoading(false);
}
};
useEffect(() => {
fetchLLMConfigs();
}, [searchSpaceId]);
const createLLMConfig = async (config: CreateLLMConfig): Promise<LLMConfig | null> => {
try {
const response = await authenticatedFetch(
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/llm-configs`,
{
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(config),
}
);
if (!response.ok) {
const errorData = await response.json();
throw new Error(errorData.detail || "Failed to create LLM configuration");
}
const newConfig = await response.json();
setLlmConfigs((prev) => [...prev, newConfig]);
toast.success("LLM configuration created successfully");
return newConfig;
} catch (err: any) {
toast.error(err.message || "Failed to create LLM configuration");
console.error("Error creating LLM configuration:", err);
return null;
}
};
const deleteLLMConfig = async (id: number): Promise<boolean> => {
try {
const response = await authenticatedFetch(
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/llm-configs/${id}`,
{ method: "DELETE" }
);
if (!response.ok) {
throw new Error("Failed to delete LLM configuration");
}
setLlmConfigs((prev) => prev.filter((config) => config.id !== id));
toast.success("LLM configuration deleted successfully");
return true;
} catch (err: any) {
toast.error(err.message || "Failed to delete LLM configuration");
console.error("Error deleting LLM configuration:", err);
return false;
}
};
const updateLLMConfig = async (
id: number,
config: UpdateLLMConfig
): Promise<LLMConfig | null> => {
try {
const response = await authenticatedFetch(
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/llm-configs/${id}`,
{
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(config),
}
);
if (!response.ok) {
const errorData = await response.json();
throw new Error(errorData.detail || "Failed to update LLM configuration");
}
const updatedConfig = await response.json();
setLlmConfigs((prev) => prev.map((c) => (c.id === id ? updatedConfig : c)));
toast.success("LLM configuration updated successfully");
return updatedConfig;
} catch (err: any) {
toast.error(err.message || "Failed to update LLM configuration");
console.error("Error updating LLM configuration:", err);
return null;
}
};
return {
llmConfigs,
loading,
error,
createLLMConfig,
updateLLMConfig,
deleteLLMConfig,
refreshConfigs: fetchLLMConfigs,
};
}
export function useLLMPreferences(searchSpaceId: number | null) {
const [preferences, setPreferences] = useState<LLMPreferences>({});
const [loading, setLoading] = useState(true);
const [error, setError] = useState<string | null>(null);
const fetchPreferences = async () => {
if (!searchSpaceId) {
setLoading(false);
return;
}
try {
setLoading(true);
const response = await authenticatedFetch(
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/search-spaces/${searchSpaceId}/llm-preferences`,
{ method: "GET" }
);
if (!response.ok) {
throw new Error("Failed to fetch LLM preferences");
}
const data = await response.json();
setPreferences(data);
setError(null);
} catch (err: any) {
setError(err.message || "Failed to fetch LLM preferences");
console.error("Error fetching LLM preferences:", err);
} finally {
setLoading(false);
}
};
useEffect(() => {
fetchPreferences();
}, [searchSpaceId]);
const updatePreferences = async (newPreferences: Partial<LLMPreferences>): Promise<boolean> => {
if (!searchSpaceId) {
toast.error("Search space ID is required");
return false;
}
try {
const response = await authenticatedFetch(
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/search-spaces/${searchSpaceId}/llm-preferences`,
{
method: "PUT",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(newPreferences),
}
);
if (!response.ok) {
const errorData = await response.json();
throw new Error(errorData.detail || "Failed to update LLM preferences");
}
const updatedPreferences = await response.json();
setPreferences(updatedPreferences);
toast.success("LLM preferences updated successfully");
return true;
} catch (err: any) {
toast.error(err.message || "Failed to update LLM preferences");
console.error("Error updating LLM preferences:", err);
return false;
}
};
const isOnboardingComplete = (): boolean => {
return !!(
preferences.long_context_llm_id &&
preferences.fast_llm_id &&
preferences.strategic_llm_id
);
};
return {
preferences,
loading,
error,
updatePreferences,
refreshPreferences: fetchPreferences,
isOnboardingComplete,
};
}
export function useGlobalLLMConfigs() {
const [globalConfigs, setGlobalConfigs] = useState<LLMConfig[]>([]);
const [loading, setLoading] = useState(true);
const [error, setError] = useState<string | null>(null);
const fetchGlobalConfigs = async () => {
try {
setLoading(true);
const response = await authenticatedFetch(
`${process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL}/api/v1/global-llm-configs`,
{ method: "GET" }
);
if (!response.ok) {
throw new Error("Failed to fetch global LLM configurations");
}
const data = await response.json();
setGlobalConfigs(data);
setError(null);
} catch (err: any) {
setError(err.message || "Failed to fetch global LLM configurations");
console.error("Error fetching global LLM configurations:", err);
} finally {
setLoading(false);
}
};
useEffect(() => {
fetchGlobalConfigs();
}, []);
return {
globalConfigs,
loading,
error,
refreshGlobalConfigs: fetchGlobalConfigs,
};
}

View file

@ -0,0 +1,179 @@
import {
type CreateLLMConfigRequest,
createLLMConfigRequest,
createLLMConfigResponse,
type DeleteLLMConfigRequest,
deleteLLMConfigRequest,
deleteLLMConfigResponse,
type GetLLMConfigRequest,
type GetLLMConfigsRequest,
type GetLLMPreferencesRequest,
getGlobalLLMConfigsResponse,
getLLMConfigRequest,
getLLMConfigResponse,
getLLMConfigsRequest,
getLLMConfigsResponse,
getLLMPreferencesRequest,
getLLMPreferencesResponse,
type UpdateLLMConfigRequest,
type UpdateLLMPreferencesRequest,
updateLLMConfigRequest,
updateLLMConfigResponse,
updateLLMPreferencesRequest,
updateLLMPreferencesResponse,
} from "@/contracts/types/llm-config.types";
import { ValidationError } from "../error";
import { baseApiService } from "./base-api.service";
class LLMConfigApiService {
/**
* Get all global LLM configurations available to all users
*/
getGlobalLLMConfigs = async () => {
return baseApiService.get(`/api/v1/global-llm-configs`, getGlobalLLMConfigsResponse);
};
/**
* Create a new LLM configuration for a search space
*/
createLLMConfig = async (request: CreateLLMConfigRequest) => {
const parsedRequest = createLLMConfigRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.errors.map((err) => err.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.post(`/api/v1/llm-configs`, createLLMConfigResponse, {
body: parsedRequest.data,
});
};
/**
* Get a list of LLM configurations for a search space
*/
getLLMConfigs = async (request: GetLLMConfigsRequest) => {
const parsedRequest = getLLMConfigsRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.errors.map((err) => err.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
// Transform query params to be string values
const transformedQueryParams = parsedRequest.data.queryParams
? Object.fromEntries(
Object.entries(parsedRequest.data.queryParams).map(([k, v]) => {
return [k, String(v)];
})
)
: undefined;
const queryParams = transformedQueryParams
? new URLSearchParams(transformedQueryParams).toString()
: "";
return baseApiService.get(`/api/v1/llm-configs?${queryParams}`, getLLMConfigsResponse);
};
/**
* Get a single LLM configuration by ID
*/
getLLMConfig = async (request: GetLLMConfigRequest) => {
const parsedRequest = getLLMConfigRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.errors.map((err) => err.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.get(`/api/v1/llm-configs/${request.id}`, getLLMConfigResponse);
};
/**
* Update an existing LLM configuration
*/
updateLLMConfig = async (request: UpdateLLMConfigRequest) => {
const parsedRequest = updateLLMConfigRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.errors.map((err) => err.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
const { id, data } = parsedRequest.data;
return baseApiService.put(`/api/v1/llm-configs/${id}`, updateLLMConfigResponse, {
body: data,
});
};
/**
* Delete an LLM configuration
*/
deleteLLMConfig = async (request: DeleteLLMConfigRequest) => {
const parsedRequest = deleteLLMConfigRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.errors.map((err) => err.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.delete(`/api/v1/llm-configs/${request.id}`, deleteLLMConfigResponse);
};
/**
* Get LLM preferences for a search space
*/
getLLMPreferences = async (request: GetLLMPreferencesRequest) => {
const parsedRequest = getLLMPreferencesRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.errors.map((err) => err.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
return baseApiService.get(
`/api/v1/search-spaces/${request.search_space_id}/llm-preferences`,
getLLMPreferencesResponse
);
};
/**
* Update LLM preferences for a search space
*/
updateLLMPreferences = async (request: UpdateLLMPreferencesRequest) => {
const parsedRequest = updateLLMPreferencesRequest.safeParse(request);
if (!parsedRequest.success) {
console.error("Invalid request:", parsedRequest.error);
const errorMessage = parsedRequest.error.errors.map((err) => err.message).join(", ");
throw new ValidationError(`Invalid request: ${errorMessage}`);
}
const { search_space_id, data } = parsedRequest.data;
return baseApiService.put(
`/api/v1/search-spaces/${search_space_id}/llm-preferences`,
updateLLMPreferencesResponse,
{
body: data,
}
);
};
}
export const llmConfigApiService = new LLMConfigApiService();

View file

@ -1,5 +1,6 @@
import type { GetChatsRequest } from "@/contracts/types/chat.types";
import type { GetDocumentsRequest } from "@/contracts/types/document.types";
import type { GetLLMConfigsRequest } from "@/contracts/types/llm-config.types";
import type { GetPodcastsRequest } from "@/contracts/types/podcast.types";
export const cacheKeys = {
@ -21,6 +22,14 @@ export const cacheKeys = {
typeCounts: (searchSpaceId?: string) => ["documents", "type-counts", searchSpaceId] as const,
byChunk: (chunkId: string) => ["documents", "by-chunk", chunkId] as const,
},
llmConfigs: {
global: () => ["llm-configs", "global"] as const,
all: (searchSpaceId: string) => ["llm-configs", searchSpaceId] as const,
withQueryParams: (queries: GetLLMConfigsRequest["queryParams"]) =>
["llm-configs", ...(queries ? Object.values(queries) : [])] as const,
byId: (llmConfigId: string) => ["llm-config", llmConfigId] as const,
preferences: (searchSpaceId: string) => ["llm-preferences", searchSpaceId] as const,
},
auth: {
user: ["auth", "user"] as const,
},