SurfSense/surfsense_web/atoms/new-llm-config/system-models-query.atoms.ts
Vonic c1776b3ec8 feat(story-3.5): add cloud-mode LLM model selection with token quota enforcement
Implement system-managed model catalog, subscription tier enforcement,
atomic token quota tracking, and frontend cloud/self-hosted conditional
rendering. Apply all 20 BMAD code review patches including security
fixes (cross-user API key hijack), race condition mitigation (atomic SQL
UPDATE), and SSE mid-stream quota error handling.

Co-Authored-By: Claude Sonnet 4 <noreply@anthropic.com>
2026-04-14 17:01:21 +07:00

30 lines
1.1 KiB
TypeScript

import { atom } from "jotai";
import { atomWithQuery } from "jotai-tanstack-query";
import { newLLMConfigApiService } from "@/lib/apis/new-llm-config-api.service";
import { isCloud } from "@/lib/env-config";
import { cacheKeys } from "@/lib/query-client/cache-keys";
/**
* Query atom for fetching the system-managed LLM catalogue.
* Only fetches in cloud mode (DEPLOYMENT_MODE=cloud).
* Returns models with negative IDs configured in the backend YAML.
*/
export const systemModelsAtom = atomWithQuery(() => {
return {
queryKey: cacheKeys.systemModels.all(),
staleTime: 10 * 60 * 1000, // 10 minutes - system models rarely change
enabled: isCloud(), // Only fetch when in cloud mode
queryFn: async () => {
return newLLMConfigApiService.getSystemModels();
},
};
});
/**
* Atom holding the currently selected system model ID (negative integer).
* null means no explicit selection — backend will use its default.
*
* NOTE: This is a global atom — it persists across search spaces within
* a session. The ChatHeader component should reset it when needed.
*/
export const selectedSystemModelIdAtom = atom<number | null>(null);