feat(llm-config): replace useGlobalLLMConfigs with globalLLMConfigsAtom in llm-role-manager

This commit is contained in:
CREDO23 2025-12-10 09:19:41 +00:00
parent 4eb521e156
commit 62fcec425b
2 changed files with 13 additions and 13 deletions

View file

@ -27,10 +27,10 @@ import {
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
import { useGlobalLLMConfigs, useLLMPreferences } from "@/hooks/use-llm-configs";
import { useLLMPreferences } from "@/hooks/use-llm-configs";
import { useAtomValue } from "jotai";
import { llmConfigsAtom } from "@/atoms/llm-config/llm-config-query.atoms";
import { llmConfigsAtom, globalLLMConfigsAtom } from "@/atoms/llm-config/llm-config-query.atoms";
const ROLE_DESCRIPTIONS = {
long_context: {
icon: Brain,
@ -70,11 +70,11 @@ export function LLMRoleManager({ searchSpaceId }: LLMRoleManagerProps) {
refetch: refreshConfigs
} = useAtomValue(llmConfigsAtom);
const {
globalConfigs,
loading: globalConfigsLoading,
error: globalConfigsError,
refreshGlobalConfigs,
} = useGlobalLLMConfigs();
data: globalConfigs = [],
isFetching: globalConfigsLoading,
isError: globalConfigsError,
refetch: refreshGlobalConfigs,
} = useAtomValue(globalLLMConfigsAtom);
const {
preferences,
loading: preferencesLoading,

View file

@ -67,18 +67,18 @@ import InferenceParamsEditor from "../inference-params-editor";
import { useAtomValue } from "jotai";
import { createLLMConfigMutationAtom, deleteLLMConfigMutationAtom, updateLLMConfigMutationAtom } from "@/atoms/llm-config/llm-config-mutation.atoms";
import { CreateLLMConfigRequest, CreateLLMConfigResponse, LLMConfig, UpdateLLMConfigResponse } from "@/contracts/types/llm-config.types";
import { llmConfigsAtom } from "@/atoms/llm-config/llm-config-query.atoms";
import { globalLLMConfigsAtom, llmConfigsAtom } from "@/atoms/llm-config/llm-config-query.atoms";
interface ModelConfigManagerProps {
searchSpaceId: number;
}
export function ModelConfigManager({ searchSpaceId }: ModelConfigManagerProps) {
const { mutateAsync : createLLMConfig, isPending : isCreatingLLMConfig, error : createLLMConfigError, isError : isCreateLLMConfigError } = useAtomValue(createLLMConfigMutationAtom)
const { mutateAsync : updateLLMConfig, isPending : isUpdatingLLMConfig, error : updateLLMConfigError, isError : isUpdateLLMConfigError} = useAtomValue(updateLLMConfigMutationAtom)
const { mutateAsync : deleteLLMConfig, isPending : isDeletingLLMConfig, error : deleteLLMConfigError, isError : isDeleteLLMConfigError } = useAtomValue(deleteLLMConfigMutationAtom)
const { data : llmConfigs, isFetching : isFetchingLLMConfigs, error : LLMConfigsFetchError, isError : isLLMConfigsFetchError, refetch : refreshConfigs} = useAtomValue(llmConfigsAtom)
const { globalConfigs } = useGlobalLLMConfigs();
const { mutateAsync : createLLMConfig, isPending : isCreatingLLMConfig, error : createLLMConfigError, } = useAtomValue(createLLMConfigMutationAtom)
const { mutateAsync : updateLLMConfig, isPending : isUpdatingLLMConfig, error : updateLLMConfigError,} = useAtomValue(updateLLMConfigMutationAtom)
const { mutateAsync : deleteLLMConfig, isPending : isDeletingLLMConfig, error : deleteLLMConfigError, } = useAtomValue(deleteLLMConfigMutationAtom)
const { data : llmConfigs, isFetching : isFetchingLLMConfigs, error : LLMConfigsFetchError, refetch : refreshConfigs} = useAtomValue(llmConfigsAtom)
const { data : globalConfigs = [] } = useAtomValue(globalLLMConfigsAtom);
const [isAddingNew, setIsAddingNew] = useState(false);
const [editingConfig, setEditingConfig] = useState<LLMConfig | null>(null);
const [formData, setFormData] = useState<CreateLLMConfigRequest>({