feat: add LLM config cache keys and create mutation atom

This commit is contained in:
CREDO23 2025-12-09 11:19:58 +00:00
parent 03279a2836
commit 2200d7b63c
3 changed files with 38 additions and 1 deletions

View file

@ -1,6 +1,7 @@
import type { GetChatsRequest } from "@/contracts/types/chat.types";
import type { GetDocumentsRequest } from "@/contracts/types/document.types";
import type { GetPodcastsRequest } from "@/contracts/types/podcast.types";
import type { GetLLMConfigsRequest } from "@/contracts/types/llm-config.types";
export const cacheKeys = {
chats: {
@ -21,6 +22,14 @@ export const cacheKeys = {
typeCounts: (searchSpaceId?: string) => ["documents", "type-counts", searchSpaceId] as const,
byChunk: (chunkId: string) => ["documents", "by-chunk", chunkId] as const,
},
llmConfigs: {
global: () => ["llm-configs", "global"] as const,
all: (searchSpaceId: string) => ["llm-configs", searchSpaceId] as const,
withQueryParams: (queries: GetLLMConfigsRequest["queryParams"]) =>
["llm-configs", ...(queries ? Object.values(queries) : [])] as const,
byId: (llmConfigId: string) => ["llm-config", llmConfigId] as const,
preferences: (searchSpaceId: string) => ["llm-preferences", searchSpaceId] as const,
},
auth: {
user: ["auth", "user"] as const,
},