From 5c4aa772556d1f2431ec2fe17815979314861613 Mon Sep 17 00:00:00 2001 From: Ramnique Singh <30795890+ramnique@users.noreply.github.com> Date: Wed, 22 Apr 2026 12:26:01 +0530 Subject: [PATCH 1/7] freeze model + provider per run at creation time MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The model dropdown was broken in two ways: it wrote to ~/.rowboat/config/models.json (the BYOK creds file, stamped with a fake `flavor: 'openrouter'` to satisfy zod when signed in), and the runtime ignored that write entirely for signed-in users because `streamAgent` hard-coded `gpt-5.4`. Model selection was also globally scoped, so every chat shared one brain. This change moves model + provider out of the global config and onto the run itself, resolved once at runs:create and frozen for the run's lifetime. ## Resolution `runsCore.createRun` resolves per-field, falling through: run.model = opts.model ?? agent.model ?? defaults.model run.provider = opts.provider ?? agent.provider ?? defaults.provider A new `core/models/defaults.ts` is the only place in the codebase that branches on signed-in state. `getDefaultModelAndProvider()` returns name strings; `resolveProviderConfig(name)` does the name → full LlmProvider lookup at runtime. `createProvider` learns about `flavor: 'rowboat'` so the gateway is just another flavor. `provider` is stored as a name (e.g. `"rowboat"`, `"openai"`), not a full LlmProvider object. API keys never get written into the JSONL log; rotating a key in models.json applies to existing runs without re-creation. Cost: deleting a provider from settings breaks runs that referenced it (clear error surfaced via `resolveProviderConfig`). ## Runtime `streamAgent` no longer resolves anything — it reads `state.runModel` / `state.runProvider`, looks up the provider config, instantiates. Subflows inherit the parent run's pair, so KG / inline-task subagents run on whatever the main run resolved to at creation. The `knowledgeGraphAgents` array, `isKgAgent`, and the per-agent default constants are gone. KG / inline-task / pre-built agents declare their preferred model in YAML frontmatter (claude-haiku-4.5 / claude-sonnet-4.6) — used at resolution time when those agents are themselves the top-level agent of a run (background triggers, scheduled tasks, etc.). ## Standalone callers Non-run LLM call sites (summarize_meeting, track/routing, builtin-tools parseFile) and `agent-schedule/runner` were branching on signed-in independently. They all route through `getDefaultModelAndProvider` + `resolveProviderConfig` + `createProvider` now; `agent-schedule/runner` switched from raw `runsRepo.create` to `runsCore.createRun` so resolution applies to scheduled-agent runs too. ## UI `chat-input-with-mentions` stops calling `models:saveConfig`. The dropdown notifies the parent via `onSelectedModelChange` ({provider, model} as names); App.tsx stashes selection per-tab and passes it to the next `runs:create`. When a run already exists, the input fetches it and renders a static label — model can't change mid-run. ## Legacy runs A lenient zod schema in `repo.ts` (`StartEvent.extend(...optional)` plus `RunEvent.or(LegacyStartEvent)`) parses pre-existing runs. `repo.fetch` fills missing model/provider from current defaults and returns the strict canonical `Run` type. No file-rewriting migration; no impact on the canonical schema in `@x/shared`. Co-Authored-By: Claude Opus 4.7 (1M context) --- apps/x/apps/renderer/src/App.tsx | 18 +++ .../components/chat-input-with-mentions.tsx | 142 +++++++----------- .../renderer/src/components/chat-sidebar.tsx | 5 +- .../core/src/agent-schedule/runner.ts | 5 +- apps/x/packages/core/src/agents/runtime.ts | 42 +++--- .../core/src/application/lib/builtin-tools.ts | 13 +- .../core/src/knowledge/agent_notes_agent.ts | 1 + .../core/src/knowledge/inline_task_agent.ts | 2 +- .../core/src/knowledge/labeling_agent.ts | 2 +- .../core/src/knowledge/note_creation.ts | 2 +- .../core/src/knowledge/note_tagging_agent.ts | 2 +- .../core/src/knowledge/summarize_meeting.ts | 17 +-- .../core/src/knowledge/track/routing.ts | 17 +-- apps/x/packages/core/src/models/defaults.ts | 53 +++++++ apps/x/packages/core/src/models/gateway.ts | 2 +- apps/x/packages/core/src/models/models.ts | 7 +- .../core/src/pre_built/email-draft.md | 2 +- .../core/src/pre_built/meeting-prep.md | 2 +- apps/x/packages/core/src/runs/repo.ts | 64 ++++++-- apps/x/packages/core/src/runs/runs.ts | 14 +- apps/x/packages/shared/src/models.ts | 11 +- apps/x/packages/shared/src/runs.ts | 12 +- 22 files changed, 256 insertions(+), 179 deletions(-) create mode 100644 apps/x/packages/core/src/models/defaults.ts diff --git a/apps/x/apps/renderer/src/App.tsx b/apps/x/apps/renderer/src/App.tsx index 602c0956..de75fb4a 100644 --- a/apps/x/apps/renderer/src/App.tsx +++ b/apps/x/apps/renderer/src/App.tsx @@ -817,6 +817,7 @@ function App() { const chatTabIdCounterRef = useRef(0) const newChatTabId = () => `chat-tab-${++chatTabIdCounterRef.current}` const chatDraftsRef = useRef(new Map()) + const selectedModelByTabRef = useRef(new Map()) const chatScrollTopByTabRef = useRef(new Map()) const [toolOpenByTab, setToolOpenByTab] = useState>>({}) const [chatViewportAnchorByTab, setChatViewportAnchorByTab] = useState>({}) @@ -2165,8 +2166,10 @@ function App() { let isNewRun = false let newRunCreatedAt: string | null = null if (!currentRunId) { + const selected = selectedModelByTabRef.current.get(submitTabId) const run = await window.ipc.invoke('runs:create', { agentId, + ...(selected ? { model: selected.model, provider: selected.provider } : {}), }) currentRunId = run.id newRunCreatedAt = run.createdAt @@ -2471,6 +2474,7 @@ function App() { return next }) chatDraftsRef.current.delete(tabId) + selectedModelByTabRef.current.delete(tabId) chatScrollTopByTabRef.current.delete(tabId) setToolOpenByTab((prev) => { if (!(tabId in prev)) return prev @@ -4644,6 +4648,13 @@ function App() { runId={tabState.runId} initialDraft={chatDraftsRef.current.get(tab.id)} onDraftChange={(text) => setChatDraftForTab(tab.id, text)} + onSelectedModelChange={(m) => { + if (m) { + selectedModelByTabRef.current.set(tab.id, m) + } else { + selectedModelByTabRef.current.delete(tab.id) + } + }} isRecording={isActive && isRecording} recordingText={isActive ? voice.interimText : undefined} recordingState={isActive ? (voice.state === 'connecting' ? 'connecting' : 'listening') : undefined} @@ -4697,6 +4708,13 @@ function App() { onPresetMessageConsumed={() => setPresetMessage(undefined)} getInitialDraft={(tabId) => chatDraftsRef.current.get(tabId)} onDraftChangeForTab={setChatDraftForTab} + onSelectedModelChangeForTab={(tabId, m) => { + if (m) { + selectedModelByTabRef.current.set(tabId, m) + } else { + selectedModelByTabRef.current.delete(tabId) + } + }} pendingAskHumanRequests={pendingAskHumanRequests} allPermissionRequests={allPermissionRequests} permissionResponses={permissionResponses} diff --git a/apps/x/apps/renderer/src/components/chat-input-with-mentions.tsx b/apps/x/apps/renderer/src/components/chat-input-with-mentions.tsx index 37d8d053..0d2eb13d 100644 --- a/apps/x/apps/renderer/src/components/chat-input-with-mentions.tsx +++ b/apps/x/apps/renderer/src/components/chat-input-with-mentions.tsx @@ -69,13 +69,16 @@ const providerDisplayNames: Record = { rowboat: 'Rowboat', } +type ProviderName = "openai" | "anthropic" | "google" | "openrouter" | "aigateway" | "ollama" | "openai-compatible" | "rowboat" + interface ConfiguredModel { - flavor: "openai" | "anthropic" | "google" | "openrouter" | "aigateway" | "ollama" | "openai-compatible" | "rowboat" + provider: ProviderName + model: string +} + +export interface SelectedModel { + provider: string model: string - apiKey?: string - baseURL?: string - headers?: Record - knowledgeGraphModel?: string } function getAttachmentIcon(kind: AttachmentIconKind) { @@ -120,6 +123,8 @@ interface ChatInputInnerProps { ttsMode?: 'summary' | 'full' onToggleTts?: () => void onTtsModeChange?: (mode: 'summary' | 'full') => void + /** Fired when the user picks a different model in the dropdown (only when no run exists yet). */ + onSelectedModelChange?: (model: SelectedModel | null) => void } function ChatInputInner({ @@ -145,6 +150,7 @@ function ChatInputInner({ ttsMode, onToggleTts, onTtsModeChange, + onSelectedModelChange, }: ChatInputInnerProps) { const controller = usePromptInputController() const message = controller.textInput.value @@ -155,10 +161,27 @@ function ChatInputInner({ const [configuredModels, setConfiguredModels] = useState([]) const [activeModelKey, setActiveModelKey] = useState('') + const [lockedModel, setLockedModel] = useState(null) const [searchEnabled, setSearchEnabled] = useState(false) const [searchAvailable, setSearchAvailable] = useState(false) const [isRowboatConnected, setIsRowboatConnected] = useState(false) + // When a run exists, freeze the dropdown to the run's resolved model+provider. + useEffect(() => { + if (!runId) { + setLockedModel(null) + return + } + let cancelled = false + window.ipc.invoke('runs:fetch', { runId }).then((run) => { + if (cancelled) return + if (run.provider && run.model) { + setLockedModel({ provider: run.provider, model: run.model }) + } + }).catch(() => { /* legacy run or fetch failure — leave unlocked */ }) + return () => { cancelled = true } + }, [runId]) + // Check Rowboat sign-in state useEffect(() => { window.ipc.invoke('oauth:getState', null).then((result) => { @@ -176,42 +199,20 @@ function ChatInputInner({ return cleanup }, []) - // Load model config (gateway when signed in, local config when BYOK) + // Load the list of models the user can choose from. + // Signed-in: gateway model list. Signed-out: providers configured in models.json. const loadModelConfig = useCallback(async () => { try { if (isRowboatConnected) { - // Fetch gateway models const listResult = await window.ipc.invoke('models:list', null) const rowboatProvider = listResult.providers?.find( (p: { id: string }) => p.id === 'rowboat' ) const models: ConfiguredModel[] = (rowboatProvider?.models || []).map( - (m: { id: string }) => ({ flavor: 'rowboat', model: m.id }) + (m: { id: string }) => ({ provider: 'rowboat', model: m.id }) ) - - // Read current default from config - let defaultModel = '' - try { - const result = await window.ipc.invoke('workspace:readFile', { path: 'config/models.json' }) - const parsed = JSON.parse(result.data) - defaultModel = parsed?.model || '' - } catch { /* no config yet */ } - - if (defaultModel) { - models.sort((a, b) => { - if (a.model === defaultModel) return -1 - if (b.model === defaultModel) return 1 - return 0 - }) - } - setConfiguredModels(models) - const activeKey = defaultModel - ? `rowboat/${defaultModel}` - : models[0] ? `rowboat/${models[0].model}` : '' - if (activeKey) setActiveModelKey(activeKey) } else { - // BYOK: read from local models.json const result = await window.ipc.invoke('workspace:readFile', { path: 'config/models.json' }) const parsed = JSON.parse(result.data) const models: ConfiguredModel[] = [] @@ -223,32 +224,12 @@ function ChatInputInner({ const allModels = modelList.length > 0 ? modelList : singleModel ? [singleModel] : [] for (const model of allModels) { if (model) { - models.push({ - flavor: flavor as ConfiguredModel['flavor'], - model, - apiKey: (e.apiKey as string) || undefined, - baseURL: (e.baseURL as string) || undefined, - headers: (e.headers as Record) || undefined, - knowledgeGraphModel: (e.knowledgeGraphModel as string) || undefined, - }) + models.push({ provider: flavor as ProviderName, model }) } } } } - const defaultKey = parsed?.provider?.flavor && parsed?.model - ? `${parsed.provider.flavor}/${parsed.model}` - : '' - models.sort((a, b) => { - const aKey = `${a.flavor}/${a.model}` - const bKey = `${b.flavor}/${b.model}` - if (aKey === defaultKey) return -1 - if (bKey === defaultKey) return 1 - return 0 - }) setConfiguredModels(models) - if (defaultKey) { - setActiveModelKey(defaultKey) - } } } catch { // No config yet @@ -284,40 +265,15 @@ function ChatInputInner({ checkSearch() }, [isActive, isRowboatConnected]) - const handleModelChange = useCallback(async (key: string) => { - const entry = configuredModels.find((m) => `${m.flavor}/${m.model}` === key) + // Selecting a model affects only the *next* run created from this tab. + // Once a run exists, model is frozen on the run and the dropdown is read-only. + const handleModelChange = useCallback((key: string) => { + if (lockedModel) return + const entry = configuredModels.find((m) => `${m.provider}/${m.model}` === key) if (!entry) return setActiveModelKey(key) - - try { - if (entry.flavor === 'rowboat') { - // Gateway model — save with valid Zod flavor, no credentials - await window.ipc.invoke('models:saveConfig', { - provider: { flavor: 'openrouter' as const }, - model: entry.model, - knowledgeGraphModel: entry.knowledgeGraphModel, - }) - } else { - // BYOK — preserve full provider config - const providerModels = configuredModels - .filter((m) => m.flavor === entry.flavor) - .map((m) => m.model) - await window.ipc.invoke('models:saveConfig', { - provider: { - flavor: entry.flavor, - apiKey: entry.apiKey, - baseURL: entry.baseURL, - headers: entry.headers, - }, - model: entry.model, - models: providerModels, - knowledgeGraphModel: entry.knowledgeGraphModel, - }) - } - } catch { - toast.error('Failed to switch model') - } - }, [configuredModels]) + onSelectedModelChange?.({ provider: entry.provider, model: entry.model }) + }, [configuredModels, lockedModel, onSelectedModelChange]) // Restore the tab draft when this input mounts. useEffect(() => { @@ -555,7 +511,14 @@ function ChatInputInner({ ) )}
- {configuredModels.length > 0 && ( + {lockedModel ? ( + + {lockedModel.model} + + ) : configuredModels.length > 0 ? ( @@ -571,18 +534,18 @@ function ChatInputInner({ {configuredModels.map((m) => { - const key = `${m.flavor}/${m.model}` + const key = `${m.provider}/${m.model}` return ( {m.model} - {providerDisplayNames[m.flavor] || m.flavor} + {providerDisplayNames[m.provider] || m.provider} ) })} - )} + ) : null} {onToggleTts && ttsAvailable && (
@@ -729,6 +692,7 @@ export interface ChatInputWithMentionsProps { ttsMode?: 'summary' | 'full' onToggleTts?: () => void onTtsModeChange?: (mode: 'summary' | 'full') => void + onSelectedModelChange?: (model: SelectedModel | null) => void } export function ChatInputWithMentions({ @@ -757,6 +721,7 @@ export function ChatInputWithMentions({ ttsMode, onToggleTts, onTtsModeChange, + onSelectedModelChange, }: ChatInputWithMentionsProps) { return ( @@ -783,6 +748,7 @@ export function ChatInputWithMentions({ ttsMode={ttsMode} onToggleTts={onToggleTts} onTtsModeChange={onTtsModeChange} + onSelectedModelChange={onSelectedModelChange} /> ) diff --git a/apps/x/apps/renderer/src/components/chat-sidebar.tsx b/apps/x/apps/renderer/src/components/chat-sidebar.tsx index e51d7c8f..852993a2 100644 --- a/apps/x/apps/renderer/src/components/chat-sidebar.tsx +++ b/apps/x/apps/renderer/src/components/chat-sidebar.tsx @@ -26,7 +26,7 @@ import { type PromptInputMessage, type FileMention } from '@/components/ai-eleme import { FileCardProvider } from '@/contexts/file-card-context' import { MarkdownPreOverride } from '@/components/ai-elements/markdown-code-override' import { TabBar, type ChatTab } from '@/components/tab-bar' -import { ChatInputWithMentions, type StagedAttachment } from '@/components/chat-input-with-mentions' +import { ChatInputWithMentions, type StagedAttachment, type SelectedModel } from '@/components/chat-input-with-mentions' import { ChatMessageAttachments } from '@/components/chat-message-attachments' import { wikiLabel } from '@/lib/wiki-links' import { @@ -158,6 +158,7 @@ interface ChatSidebarProps { onPresetMessageConsumed?: () => void getInitialDraft?: (tabId: string) => string | undefined onDraftChangeForTab?: (tabId: string, text: string) => void + onSelectedModelChangeForTab?: (tabId: string, model: SelectedModel | null) => void pendingAskHumanRequests?: ChatTabViewState['pendingAskHumanRequests'] allPermissionRequests?: ChatTabViewState['allPermissionRequests'] permissionResponses?: ChatTabViewState['permissionResponses'] @@ -211,6 +212,7 @@ export function ChatSidebar({ onPresetMessageConsumed, getInitialDraft, onDraftChangeForTab, + onSelectedModelChangeForTab, pendingAskHumanRequests = new Map(), allPermissionRequests = new Map(), permissionResponses = new Map(), @@ -662,6 +664,7 @@ export function ChatSidebar({ runId={tabState.runId} initialDraft={getInitialDraft?.(tab.id)} onDraftChange={onDraftChangeForTab ? (text) => onDraftChangeForTab(tab.id, text) : undefined} + onSelectedModelChange={onSelectedModelChangeForTab ? (m) => onSelectedModelChangeForTab(tab.id, m) : undefined} isRecording={isActive && isRecording} recordingText={isActive ? recordingText : undefined} recordingState={isActive ? recordingState : undefined} diff --git a/apps/x/packages/core/src/agent-schedule/runner.ts b/apps/x/packages/core/src/agent-schedule/runner.ts index 4eab6081..5fca6878 100644 --- a/apps/x/packages/core/src/agent-schedule/runner.ts +++ b/apps/x/packages/core/src/agent-schedule/runner.ts @@ -8,6 +8,7 @@ import { IMonotonicallyIncreasingIdGenerator } from "../application/lib/id-gen.j import { AgentScheduleConfig, AgentScheduleEntry } from "@x/shared/dist/agent-schedule.js"; import { AgentScheduleState, AgentScheduleStateEntry } from "@x/shared/dist/agent-schedule-state.js"; import { MessageEvent } from "@x/shared/dist/runs.js"; +import { createRun } from "../runs/runs.js"; import z from "zod"; const DEFAULT_STARTING_MESSAGE = "go"; @@ -162,8 +163,8 @@ async function runAgent( }); try { - // Create a new run - const run = await runsRepo.create({ agentId: agentName }); + // Create a new run via core (resolves agent + default model+provider). + const run = await createRun({ agentId: agentName }); console.log(`[AgentRunner] Created run ${run.id} for agent ${agentName}`); // Add the starting message as a user message diff --git a/apps/x/packages/core/src/agents/runtime.ts b/apps/x/packages/core/src/agents/runtime.ts index ae69d60c..6c84ac8b 100644 --- a/apps/x/packages/core/src/agents/runtime.ts +++ b/apps/x/packages/core/src/agents/runtime.ts @@ -16,8 +16,7 @@ import { isBlocked, extractCommandNames } from "../application/lib/command-execu import container from "../di/container.js"; import { IModelConfigRepo } from "../models/repo.js"; import { createProvider } from "../models/models.js"; -import { isSignedIn } from "../account/account.js"; -import { getGatewayProvider } from "../models/gateway.js"; +import { resolveProviderConfig } from "../models/defaults.js"; import { IAgentsRepo } from "./repo.js"; import { IMonotonicallyIncreasingIdGenerator } from "../application/lib/id-gen.js"; import { IBus } from "../application/lib/bus.js"; @@ -649,6 +648,8 @@ export class AgentState { runId: string | null = null; agent: z.infer | null = null; agentName: string | null = null; + runModel: string | null = null; + runProvider: string | null = null; messages: z.infer = []; lastAssistantMsg: z.infer | null = null; subflowStates: Record = {}; @@ -762,13 +763,18 @@ export class AgentState { case "start": this.runId = event.runId; this.agentName = event.agentName; + this.runModel = event.model; + this.runProvider = event.provider; break; case "spawn-subflow": // Seed the subflow state with its agent so downstream loadAgent works. + // Subflows inherit the parent run's model+provider — there's one pair per run. if (!this.subflowStates[event.toolCallId]) { this.subflowStates[event.toolCallId] = new AgentState(); } this.subflowStates[event.toolCallId].agentName = event.agentName; + this.subflowStates[event.toolCallId].runModel = this.runModel; + this.subflowStates[event.toolCallId].runProvider = this.runProvider; break; case "message": this.messages.push(event.message); @@ -857,35 +863,23 @@ export async function* streamAgent({ yield event; } - const modelConfig = await modelConfigRepo.getConfig(); - if (!modelConfig) { - throw new Error("Model config not found"); - } - // set up agent const agent = await loadAgent(state.agentName!); // set up tools const tools = await buildTools(agent); - // set up provider + model - const signedIn = await isSignedIn(); - const provider = signedIn - ? await getGatewayProvider() - : createProvider(modelConfig.provider); - const knowledgeGraphAgents = ["note_creation", "email-draft", "meeting-prep", "labeling_agent", "note_tagging_agent", "agent_notes_agent"]; - const isKgAgent = knowledgeGraphAgents.includes(state.agentName!); - const isInlineTaskAgent = state.agentName === "inline_task_agent"; - const defaultModel = signedIn ? "gpt-5.4" : modelConfig.model; - const defaultKgModel = signedIn ? "anthropic/claude-haiku-4.5" : defaultModel; - const defaultInlineTaskModel = signedIn ? "anthropic/claude-sonnet-4.6" : defaultModel; - const modelId = isInlineTaskAgent - ? defaultInlineTaskModel - : (isKgAgent && modelConfig.knowledgeGraphModel) - ? modelConfig.knowledgeGraphModel - : isKgAgent ? defaultKgModel : defaultModel; + // model+provider were resolved and frozen on the run at runs:create time. + // Look up the named provider's current credentials from models.json and + // instantiate the LLM client. No selection happens here. + if (!state.runModel || !state.runProvider) { + throw new Error(`Run ${runId} is missing model/provider on its start event`); + } + const modelId = state.runModel; + const providerConfig = await resolveProviderConfig(state.runProvider); + const provider = createProvider(providerConfig); const model = provider.languageModel(modelId); - logger.log(`using model: ${modelId}`); + logger.log(`using model: ${modelId} (provider: ${state.runProvider})`); let loopCounter = 0; let voiceInput = false; diff --git a/apps/x/packages/core/src/application/lib/builtin-tools.ts b/apps/x/packages/core/src/application/lib/builtin-tools.ts index a2b68427..52083277 100644 --- a/apps/x/packages/core/src/application/lib/builtin-tools.ts +++ b/apps/x/packages/core/src/application/lib/builtin-tools.ts @@ -21,9 +21,8 @@ import { BrowserControlInputSchema, type BrowserControlInput } from "@x/shared/d import type { ToolContext } from "./exec-tool.js"; import { generateText } from "ai"; import { createProvider } from "../../models/models.js"; -import { IModelConfigRepo } from "../../models/repo.js"; +import { getDefaultModelAndProvider, resolveProviderConfig } from "../../models/defaults.js"; import { isSignedIn } from "../../account/account.js"; -import { getGatewayProvider } from "../../models/gateway.js"; import { getAccessToken } from "../../auth/tokens.js"; import { API_URL } from "../../config/env.js"; import { updateContent, updateTrackBlock } from "../../knowledge/track/fileops.js"; @@ -746,13 +745,9 @@ export const BuiltinTools: z.infer = { const base64 = buffer.toString('base64'); - // Resolve model config from DI container - const modelConfigRepo = container.resolve('modelConfigRepo'); - const modelConfig = await modelConfigRepo.getConfig(); - const provider = await isSignedIn() - ? await getGatewayProvider() - : createProvider(modelConfig.provider); - const model = provider.languageModel(modelConfig.model); + const { model: modelId, provider: providerName } = await getDefaultModelAndProvider(); + const providerConfig = await resolveProviderConfig(providerName); + const model = createProvider(providerConfig).languageModel(modelId); const userPrompt = prompt || 'Convert this file to well-structured markdown.'; diff --git a/apps/x/packages/core/src/knowledge/agent_notes_agent.ts b/apps/x/packages/core/src/knowledge/agent_notes_agent.ts index 58aa22a7..d7087405 100644 --- a/apps/x/packages/core/src/knowledge/agent_notes_agent.ts +++ b/apps/x/packages/core/src/knowledge/agent_notes_agent.ts @@ -1,5 +1,6 @@ export function getRaw(): string { return `--- +model: anthropic/claude-haiku-4.5 tools: workspace-writeFile: type: builtin diff --git a/apps/x/packages/core/src/knowledge/inline_task_agent.ts b/apps/x/packages/core/src/knowledge/inline_task_agent.ts index d25ff74b..9c3e2568 100644 --- a/apps/x/packages/core/src/knowledge/inline_task_agent.ts +++ b/apps/x/packages/core/src/knowledge/inline_task_agent.ts @@ -13,7 +13,7 @@ export function getRaw(): string { const defaultEndISO = defaultEnd.toISOString(); return `--- -model: gpt-5.2 +model: anthropic/claude-sonnet-4.6 tools: ${toolEntries} --- diff --git a/apps/x/packages/core/src/knowledge/labeling_agent.ts b/apps/x/packages/core/src/knowledge/labeling_agent.ts index d28649b1..bb4a6efe 100644 --- a/apps/x/packages/core/src/knowledge/labeling_agent.ts +++ b/apps/x/packages/core/src/knowledge/labeling_agent.ts @@ -2,7 +2,7 @@ import { renderTagSystemForEmails } from './tag_system.js'; export function getRaw(): string { return `--- -model: gpt-5.2 +model: anthropic/claude-haiku-4.5 tools: workspace-readFile: type: builtin diff --git a/apps/x/packages/core/src/knowledge/note_creation.ts b/apps/x/packages/core/src/knowledge/note_creation.ts index 1740bdb7..283c77ec 100644 --- a/apps/x/packages/core/src/knowledge/note_creation.ts +++ b/apps/x/packages/core/src/knowledge/note_creation.ts @@ -3,7 +3,7 @@ import { renderNoteEffectRules } from './tag_system.js'; export function getRaw(): string { return `--- -model: gpt-5.2 +model: anthropic/claude-haiku-4.5 tools: workspace-writeFile: type: builtin diff --git a/apps/x/packages/core/src/knowledge/note_tagging_agent.ts b/apps/x/packages/core/src/knowledge/note_tagging_agent.ts index 0dc581f1..71b10910 100644 --- a/apps/x/packages/core/src/knowledge/note_tagging_agent.ts +++ b/apps/x/packages/core/src/knowledge/note_tagging_agent.ts @@ -2,7 +2,7 @@ import { renderTagSystemForNotes } from './tag_system.js'; export function getRaw(): string { return `--- -model: gpt-5.2 +model: anthropic/claude-haiku-4.5 tools: workspace-readFile: type: builtin diff --git a/apps/x/packages/core/src/knowledge/summarize_meeting.ts b/apps/x/packages/core/src/knowledge/summarize_meeting.ts index 30e3c5d4..a10aac28 100644 --- a/apps/x/packages/core/src/knowledge/summarize_meeting.ts +++ b/apps/x/packages/core/src/knowledge/summarize_meeting.ts @@ -1,11 +1,8 @@ import fs from 'fs'; import path from 'path'; import { generateText } from 'ai'; -import container from '../di/container.js'; -import type { IModelConfigRepo } from '../models/repo.js'; import { createProvider } from '../models/models.js'; -import { isSignedIn } from '../account/account.js'; -import { getGatewayProvider } from '../models/gateway.js'; +import { getDefaultModelAndProvider, resolveProviderConfig } from '../models/defaults.js'; import { WorkDir } from '../config/config.js'; const CALENDAR_SYNC_DIR = path.join(WorkDir, 'calendar_sync'); @@ -138,15 +135,9 @@ function loadCalendarEventContext(calendarEventJson: string): string { } export async function summarizeMeeting(transcript: string, meetingStartTime?: string, calendarEventJson?: string): Promise { - const repo = container.resolve('modelConfigRepo'); - const config = await repo.getConfig(); - const signedIn = await isSignedIn(); - const provider = signedIn - ? await getGatewayProvider() - : createProvider(config.provider); - const modelId = config.meetingNotesModel - || (signedIn ? "gpt-5.4" : config.model); - const model = provider.languageModel(modelId); + const { model: modelId, provider: providerName } = await getDefaultModelAndProvider(); + const providerConfig = await resolveProviderConfig(providerName); + const model = createProvider(providerConfig).languageModel(modelId); // If a specific calendar event was linked, use it directly. // Otherwise fall back to scanning events within ±3 hours. diff --git a/apps/x/packages/core/src/knowledge/track/routing.ts b/apps/x/packages/core/src/knowledge/track/routing.ts index f876106e..53e6f7b3 100644 --- a/apps/x/packages/core/src/knowledge/track/routing.ts +++ b/apps/x/packages/core/src/knowledge/track/routing.ts @@ -1,11 +1,8 @@ import { generateObject } from 'ai'; import { trackBlock, PrefixLogger } from '@x/shared'; import type { KnowledgeEvent } from '@x/shared/dist/track-block.js'; -import container from '../../di/container.js'; -import type { IModelConfigRepo } from '../../models/repo.js'; import { createProvider } from '../../models/models.js'; -import { isSignedIn } from '../../account/account.js'; -import { getGatewayProvider } from '../../models/gateway.js'; +import { getDefaultModelAndProvider, resolveProviderConfig } from '../../models/defaults.js'; const log = new PrefixLogger('TrackRouting'); @@ -37,15 +34,9 @@ Rules: - For each candidate, return BOTH trackId and filePath exactly as given. trackIds are not globally unique.`; async function resolveModel() { - const repo = container.resolve('modelConfigRepo'); - const config = await repo.getConfig(); - const signedIn = await isSignedIn(); - const provider = signedIn - ? await getGatewayProvider() - : createProvider(config.provider); - const modelId = config.knowledgeGraphModel - || (signedIn ? 'gpt-5.4' : config.model); - return provider.languageModel(modelId); + const { model, provider } = await getDefaultModelAndProvider(); + const config = await resolveProviderConfig(provider); + return createProvider(config).languageModel(model); } function buildRoutingPrompt(event: KnowledgeEvent, batch: ParsedTrack[]): string { diff --git a/apps/x/packages/core/src/models/defaults.ts b/apps/x/packages/core/src/models/defaults.ts new file mode 100644 index 00000000..b9df52da --- /dev/null +++ b/apps/x/packages/core/src/models/defaults.ts @@ -0,0 +1,53 @@ +import z from "zod"; +import { LlmProvider } from "@x/shared/dist/models.js"; +import { IModelConfigRepo } from "./repo.js"; +import { isSignedIn } from "../account/account.js"; +import container from "../di/container.js"; + +const SIGNED_IN_DEFAULT_MODEL = "gpt-5.4"; +const SIGNED_IN_DEFAULT_PROVIDER = "rowboat"; + +/** + * The single source of truth for "what model+provider should we use when + * the caller didn't specify and the agent didn't declare". Returns names only. + * This is the only place that branches on signed-in state. + */ +export async function getDefaultModelAndProvider(): Promise<{ model: string; provider: string }> { + if (await isSignedIn()) { + return { model: SIGNED_IN_DEFAULT_MODEL, provider: SIGNED_IN_DEFAULT_PROVIDER }; + } + const repo = container.resolve("modelConfigRepo"); + const cfg = await repo.getConfig(); + return { model: cfg.model, provider: cfg.provider.flavor }; +} + +/** + * Resolve a provider name (as stored on a run, an agent, or returned by + * getDefaultModelAndProvider) into the full LlmProvider config that + * createProvider expects (apiKey/baseURL/headers). + * + * - "rowboat" → gateway provider (auth via OAuth bearer; no creds field). + * - other names → look up models.json's `providers[name]` map. + * - fallback: if the name matches the active default's flavor (legacy + * single-provider configs that didn't write to the providers map yet). + */ +export async function resolveProviderConfig(name: string): Promise> { + if (name === "rowboat") { + return { flavor: "rowboat" }; + } + const repo = container.resolve("modelConfigRepo"); + const cfg = await repo.getConfig(); + const entry = cfg.providers?.[name]; + if (entry) { + return LlmProvider.parse({ + flavor: name, + apiKey: entry.apiKey, + baseURL: entry.baseURL, + headers: entry.headers, + }); + } + if (cfg.provider.flavor === name) { + return cfg.provider; + } + throw new Error(`Provider '${name}' is referenced but not configured`); +} diff --git a/apps/x/packages/core/src/models/gateway.ts b/apps/x/packages/core/src/models/gateway.ts index df9b413c..6f613704 100644 --- a/apps/x/packages/core/src/models/gateway.ts +++ b/apps/x/packages/core/src/models/gateway.ts @@ -10,7 +10,7 @@ const authedFetch: typeof fetch = async (input, init) => { return fetch(input, { ...init, headers }); }; -export async function getGatewayProvider(): Promise { +export function getGatewayProvider(): ProviderV2 { return createOpenRouter({ baseURL: `${API_URL}/v1/llm`, apiKey: 'managed-by-rowboat', diff --git a/apps/x/packages/core/src/models/models.ts b/apps/x/packages/core/src/models/models.ts index 38b6801f..92353f0a 100644 --- a/apps/x/packages/core/src/models/models.ts +++ b/apps/x/packages/core/src/models/models.ts @@ -8,7 +8,6 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider'; import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; import { LlmModelConfig, LlmProvider } from "@x/shared/dist/models.js"; import z from "zod"; -import { isSignedIn } from "../account/account.js"; import { getGatewayProvider } from "./gateway.js"; export const Provider = LlmProvider; @@ -65,6 +64,8 @@ export function createProvider(config: z.infer): ProviderV2 { baseURL, headers, }) as unknown as ProviderV2; + case "rowboat": + return getGatewayProvider(); default: throw new Error(`Unsupported provider flavor: ${config.flavor}`); } @@ -80,9 +81,7 @@ export async function testModelConnection( const controller = new AbortController(); const timeout = setTimeout(() => controller.abort(), effectiveTimeout); try { - const provider = await isSignedIn() - ? await getGatewayProvider() - : createProvider(providerConfig); + const provider = createProvider(providerConfig); const languageModel = provider.languageModel(model); await generateText({ model: languageModel, diff --git a/apps/x/packages/core/src/pre_built/email-draft.md b/apps/x/packages/core/src/pre_built/email-draft.md index f863271b..7a353d26 100644 --- a/apps/x/packages/core/src/pre_built/email-draft.md +++ b/apps/x/packages/core/src/pre_built/email-draft.md @@ -1,5 +1,5 @@ --- -model: gpt-4.1 +model: anthropic/claude-haiku-4.5 tools: workspace-readFile: type: builtin diff --git a/apps/x/packages/core/src/pre_built/meeting-prep.md b/apps/x/packages/core/src/pre_built/meeting-prep.md index ca6bb2fc..5dc46eda 100644 --- a/apps/x/packages/core/src/pre_built/meeting-prep.md +++ b/apps/x/packages/core/src/pre_built/meeting-prep.md @@ -1,5 +1,5 @@ --- -model: gpt-4.1 +model: anthropic/claude-haiku-4.5 tools: workspace-readFile: type: builtin diff --git a/apps/x/packages/core/src/runs/repo.ts b/apps/x/packages/core/src/runs/repo.ts index 5d563f1f..502976e6 100644 --- a/apps/x/packages/core/src/runs/repo.ts +++ b/apps/x/packages/core/src/runs/repo.ts @@ -6,9 +6,28 @@ import fsp from "fs/promises"; import fs from "fs"; import readline from "readline"; import { Run, RunEvent, StartEvent, CreateRunOptions, ListRunsResponse, MessageEvent } from "@x/shared/dist/runs.js"; +import { getDefaultModelAndProvider } from "../models/defaults.js"; + +/** + * Reading-only schemas: extend the canonical `StartEvent` / `RunEvent` to + * accept legacy run files written before `model`/`provider` were required. + * + * `RunEvent.or(LegacyStartEvent)` works because zod unions try left-to-right: + * for any non-start event RunEvent matches first; for a strict start event + * RunEvent still matches; only a legacy start event falls through and parses + * as LegacyStartEvent. New event types stay maintained in one place + * (`@x/shared/dist/runs.js`) — the lenient form just adds one fallback variant. + */ +const LegacyStartEvent = StartEvent.extend({ + model: z.string().optional(), + provider: z.string().optional(), +}); +const ReadRunEvent = RunEvent.or(LegacyStartEvent); + +export type CreateRunRepoOptions = Required>; export interface IRunsRepo { - create(options: z.infer): Promise>; + create(options: CreateRunRepoOptions): Promise>; fetch(id: string): Promise>; list(cursor?: string): Promise>; appendEvents(runId: string, events: z.infer[]): Promise; @@ -69,16 +88,19 @@ export class FSRunsRepo implements IRunsRepo { /** * Read file line-by-line using streams, stopping early once we have * the start event and title (or determine there's no title). + * + * Parses the start event with `LegacyStartEvent` so runs written before + * `model`/`provider` were required still surface in the list view. */ private async readRunMetadata(filePath: string): Promise<{ - start: z.infer; + start: z.infer; title: string | undefined; } | null> { return new Promise((resolve) => { const stream = fs.createReadStream(filePath, { encoding: 'utf8' }); const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); - let start: z.infer | null = null; + let start: z.infer | null = null; let title: string | undefined; let lineIndex = 0; @@ -88,11 +110,10 @@ export class FSRunsRepo implements IRunsRepo { try { if (lineIndex === 0) { - // First line should be the start event - start = StartEvent.parse(JSON.parse(trimmed)); + start = LegacyStartEvent.parse(JSON.parse(trimmed)); } else { // Subsequent lines - look for first user message or assistant response - const event = RunEvent.parse(JSON.parse(trimmed)); + const event = ReadRunEvent.parse(JSON.parse(trimmed)); if (event.type === 'message') { const msg = event.message; if (msg.role === 'user') { @@ -157,13 +178,15 @@ export class FSRunsRepo implements IRunsRepo { ); } - async create(options: z.infer): Promise> { + async create(options: CreateRunRepoOptions): Promise> { const runId = await this.idGenerator.next(); const ts = new Date().toISOString(); const start: z.infer = { type: "start", runId, agentName: options.agentId, + model: options.model, + provider: options.provider, subflow: [], ts, }; @@ -172,24 +195,41 @@ export class FSRunsRepo implements IRunsRepo { id: runId, createdAt: ts, agentId: options.agentId, + model: options.model, + provider: options.provider, log: [start], }; } async fetch(id: string): Promise> { const contents = await fsp.readFile(path.join(WorkDir, 'runs', `${id}.jsonl`), 'utf8'); - const events = contents.split('\n') + // Parse with the lenient schema so legacy start events (no model/provider) load. + const rawEvents = contents.split('\n') .filter(line => line.trim() !== '') - .map(line => RunEvent.parse(JSON.parse(line))); - if (events.length === 0 || events[0].type !== 'start') { + .map(line => ReadRunEvent.parse(JSON.parse(line))); + if (rawEvents.length === 0 || rawEvents[0].type !== 'start') { throw new Error('Corrupt run data'); } + // Backfill model/provider on the start event from current defaults if missing, + // then promote to the canonical strict types for callers. + const rawStart = rawEvents[0]; + const defaults = (!rawStart.model || !rawStart.provider) + ? await getDefaultModelAndProvider() + : null; + const start: z.infer = { + ...rawStart, + model: rawStart.model ?? defaults!.model, + provider: rawStart.provider ?? defaults!.provider, + }; + const events: z.infer[] = [start, ...rawEvents.slice(1) as z.infer[]]; const title = this.extractTitle(events); return { id, title, - createdAt: events[0].ts!, - agentId: events[0].agentName, + createdAt: start.ts!, + agentId: start.agentName, + model: start.model, + provider: start.provider, log: events, }; } diff --git a/apps/x/packages/core/src/runs/runs.ts b/apps/x/packages/core/src/runs/runs.ts index 8ea4688b..5b8395a9 100644 --- a/apps/x/packages/core/src/runs/runs.ts +++ b/apps/x/packages/core/src/runs/runs.ts @@ -10,11 +10,21 @@ import { IRunsLock } from "./lock.js"; import { forceCloseAllMcpClients } from "../mcp/mcp.js"; import { extractCommandNames } from "../application/lib/command-executor.js"; import { addToSecurityConfig } from "../config/security.js"; +import { loadAgent } from "../agents/runtime.js"; +import { getDefaultModelAndProvider } from "../models/defaults.js"; export async function createRun(opts: z.infer): Promise> { const repo = container.resolve('runsRepo'); const bus = container.resolve('bus'); - const run = await repo.create(opts); + + // Resolve model+provider once at creation: opts > agent declaration > defaults. + // Both fields are plain strings (provider is a name, looked up at runtime). + const agent = await loadAgent(opts.agentId); + const defaults = await getDefaultModelAndProvider(); + const model = opts.model ?? agent.model ?? defaults.model; + const provider = opts.provider ?? agent.provider ?? defaults.provider; + + const run = await repo.create({ agentId: opts.agentId, model, provider }); await bus.publish(run.log[0]); return run; } @@ -110,4 +120,4 @@ export async function fetchRun(runId: string): Promise> { export async function listRuns(cursor?: string): Promise> { const repo = container.resolve('runsRepo'); return repo.list(cursor); -} \ No newline at end of file +} diff --git a/apps/x/packages/shared/src/models.ts b/apps/x/packages/shared/src/models.ts index 2c1588e8..feec148f 100644 --- a/apps/x/packages/shared/src/models.ts +++ b/apps/x/packages/shared/src/models.ts @@ -1,7 +1,7 @@ import { z } from "zod"; export const LlmProvider = z.object({ - flavor: z.enum(["openai", "anthropic", "google", "openrouter", "aigateway", "ollama", "openai-compatible"]), + flavor: z.enum(["openai", "anthropic", "google", "openrouter", "aigateway", "ollama", "openai-compatible", "rowboat"]), apiKey: z.string().optional(), baseURL: z.string().optional(), headers: z.record(z.string(), z.string()).optional(), @@ -11,6 +11,15 @@ export const LlmModelConfig = z.object({ provider: LlmProvider, model: z.string(), models: z.array(z.string()).optional(), + providers: z.record(z.string(), z.object({ + apiKey: z.string().optional(), + baseURL: z.string().optional(), + headers: z.record(z.string(), z.string()).optional(), + model: z.string().optional(), + models: z.array(z.string()).optional(), + })).optional(), + // Deprecated: per-run model+provider supersedes these. Kept on the schema so + // existing settings/onboarding UIs continue to compile until they're cleaned up. knowledgeGraphModel: z.string().optional(), meetingNotesModel: z.string().optional(), }); diff --git a/apps/x/packages/shared/src/runs.ts b/apps/x/packages/shared/src/runs.ts index 5f52f611..2c5bcc7a 100644 --- a/apps/x/packages/shared/src/runs.ts +++ b/apps/x/packages/shared/src/runs.ts @@ -19,6 +19,8 @@ export const RunProcessingEndEvent = BaseRunEvent.extend({ export const StartEvent = BaseRunEvent.extend({ type: z.literal("start"), agentName: z.string(), + model: z.string(), + provider: z.string(), }); export const SpawnSubFlowEvent = BaseRunEvent.extend({ @@ -121,6 +123,8 @@ export const Run = z.object({ title: z.string().optional(), createdAt: z.iso.datetime(), agentId: z.string(), + model: z.string(), + provider: z.string(), log: z.array(RunEvent), }); @@ -134,6 +138,8 @@ export const ListRunsResponse = z.object({ nextCursor: z.string().optional(), }); -export const CreateRunOptions = Run.pick({ - agentId: true, -}); \ No newline at end of file +export const CreateRunOptions = z.object({ + agentId: z.string(), + model: z.string().optional(), + provider: z.string().optional(), +}); From f4dbb58a7782a841895322691db06280641d6511 Mon Sep 17 00:00:00 2001 From: Arjun <6592213+arkml@users.noreply.github.com> Date: Thu, 23 Apr 2026 00:35:08 +0530 Subject: [PATCH 2/7] add rowboat meeting notes to graph --- apps/x/packages/core/src/knowledge/build_graph.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/apps/x/packages/core/src/knowledge/build_graph.ts b/apps/x/packages/core/src/knowledge/build_graph.ts index 100af5d8..60c0572e 100644 --- a/apps/x/packages/core/src/knowledge/build_graph.ts +++ b/apps/x/packages/core/src/knowledge/build_graph.ts @@ -38,6 +38,7 @@ const SOURCE_FOLDERS = [ 'gmail_sync', path.join('knowledge', 'Meetings', 'fireflies'), path.join('knowledge', 'Meetings', 'granola'), + path.join('knowledge', 'Meetings', 'rowboat'), ]; // Voice memos are now created directly in knowledge/Voice Memos// From 75842fa06b1aa936eff45c03e07369fee92f8c86 Mon Sep 17 00:00:00 2001 From: Arjun <6592213+arkml@users.noreply.github.com> Date: Thu, 23 Apr 2026 00:49:06 +0530 Subject: [PATCH 3/7] assistant chat ui shows the model name properly --- .../renderer/src/components/chat-input-with-mentions.tsx | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/apps/x/apps/renderer/src/components/chat-input-with-mentions.tsx b/apps/x/apps/renderer/src/components/chat-input-with-mentions.tsx index 0d2eb13d..e1fb950f 100644 --- a/apps/x/apps/renderer/src/components/chat-input-with-mentions.tsx +++ b/apps/x/apps/renderer/src/components/chat-input-with-mentions.tsx @@ -81,6 +81,10 @@ export interface SelectedModel { model: string } +function getSelectedModelDisplayName(model: string) { + return model.split('/').pop() || model +} + function getAttachmentIcon(kind: AttachmentIconKind) { switch (kind) { case 'audio': @@ -516,7 +520,7 @@ function ChatInputInner({ className="flex h-7 shrink-0 items-center gap-1 rounded-full px-2 text-xs text-muted-foreground" title={`${providerDisplayNames[lockedModel.provider] || lockedModel.provider} — fixed for this chat`} > - {lockedModel.model} + {getSelectedModelDisplayName(lockedModel.model)} ) : configuredModels.length > 0 ? ( @@ -526,7 +530,7 @@ function ChatInputInner({ className="flex h-7 shrink-0 items-center gap-1 rounded-full px-2 text-xs text-muted-foreground transition-colors hover:bg-muted hover:text-foreground" > - {configuredModels.find((m) => `${m.provider}/${m.model}` === activeModelKey)?.model || configuredModels[0]?.model || 'Model'} + {getSelectedModelDisplayName(configuredModels.find((m) => `${m.provider}/${m.model}` === activeModelKey)?.model || configuredModels[0]?.model || 'Model')} From 0bb256879c756f2bed40cc783d10d2492ed4f7d8 Mon Sep 17 00:00:00 2001 From: Arjun <6592213+arkml@users.noreply.github.com> Date: Thu, 23 Apr 2026 21:29:51 +0530 Subject: [PATCH 4/7] preserve formatting in chat input text --- apps/x/apps/renderer/package.json | 1 + apps/x/apps/renderer/src/App.tsx | 23 +++++++++++++++++-- .../renderer/src/components/chat-sidebar.tsx | 23 +++++++++++++++++-- apps/x/pnpm-lock.yaml | 20 ++++++++++++++++ 4 files changed, 63 insertions(+), 4 deletions(-) diff --git a/apps/x/apps/renderer/package.json b/apps/x/apps/renderer/package.json index a8c67a43..d9216de1 100644 --- a/apps/x/apps/renderer/package.json +++ b/apps/x/apps/renderer/package.json @@ -49,6 +49,7 @@ "react": "^19.2.0", "react-dom": "^19.2.0", "recharts": "^3.8.0", + "remark-breaks": "^4.0.0", "sonner": "^2.0.7", "streamdown": "^1.6.10", "tailwind-merge": "^3.4.0", diff --git a/apps/x/apps/renderer/src/App.tsx b/apps/x/apps/renderer/src/App.tsx index de75fb4a..67f3f06a 100644 --- a/apps/x/apps/renderer/src/App.tsx +++ b/apps/x/apps/renderer/src/App.tsx @@ -62,6 +62,8 @@ import { BrowserPane } from '@/components/browser-pane/BrowserPane' import { VersionHistoryPanel } from '@/components/version-history-panel' import { FileCardProvider } from '@/contexts/file-card-context' import { MarkdownPreOverride } from '@/components/ai-elements/markdown-code-override' +import { defaultRemarkPlugins } from 'streamdown' +import remarkBreaks from 'remark-breaks' import { TabBar, type ChatTab, type FileTab } from '@/components/tab-bar' import { type ChatMessage, @@ -104,6 +106,11 @@ interface TreeNode extends DirEntry { const streamdownComponents = { pre: MarkdownPreOverride } +// Render user messages with markdown so bullets, bold, links, etc. survive the +// round-trip from the input textarea. `remarkBreaks` turns single newlines +// into
so typed line breaks are preserved without requiring blank lines. +const userMessageRemarkPlugins = [...Object.values(defaultRemarkPlugins), remarkBreaks] + function SmoothStreamingMessage({ text, components }: { text: string; components: typeof streamdownComponents }) { const smoothText = useSmoothedText(text) return {smoothText} @@ -3974,7 +3981,14 @@ function App() { {item.content && ( - {item.content} + + + {item.content} + + )} ) @@ -3995,7 +4009,12 @@ function App() { ))}
)} - {message} + + {message} + ) diff --git a/apps/x/apps/renderer/src/components/chat-sidebar.tsx b/apps/x/apps/renderer/src/components/chat-sidebar.tsx index 852993a2..0a407d5d 100644 --- a/apps/x/apps/renderer/src/components/chat-sidebar.tsx +++ b/apps/x/apps/renderer/src/components/chat-sidebar.tsx @@ -25,6 +25,8 @@ import { Suggestions } from '@/components/ai-elements/suggestions' import { type PromptInputMessage, type FileMention } from '@/components/ai-elements/prompt-input' import { FileCardProvider } from '@/contexts/file-card-context' import { MarkdownPreOverride } from '@/components/ai-elements/markdown-code-override' +import { defaultRemarkPlugins } from 'streamdown' +import remarkBreaks from 'remark-breaks' import { TabBar, type ChatTab } from '@/components/tab-bar' import { ChatInputWithMentions, type StagedAttachment, type SelectedModel } from '@/components/chat-input-with-mentions' import { ChatMessageAttachments } from '@/components/chat-message-attachments' @@ -49,6 +51,11 @@ import { const streamdownComponents = { pre: MarkdownPreOverride } +// Render user messages with markdown so bullets, bold, links, etc. survive the +// round-trip from the input textarea. `remarkBreaks` turns single newlines +// into
so typed line breaks are preserved without requiring blank lines. +const userMessageRemarkPlugins = [...Object.values(defaultRemarkPlugins), remarkBreaks] + /* ─── Billing error helpers ─── */ const BILLING_ERROR_PATTERNS = [ @@ -353,7 +360,14 @@ export function ChatSidebar({ {item.content && ( - {item.content} + + + {item.content} + + )} ) @@ -374,7 +388,12 @@ export function ChatSidebar({ ))}
)} - {message} + + {message} + ) diff --git a/apps/x/pnpm-lock.yaml b/apps/x/pnpm-lock.yaml index 51248fff..ac219371 100644 --- a/apps/x/pnpm-lock.yaml +++ b/apps/x/pnpm-lock.yaml @@ -247,6 +247,9 @@ importers: recharts: specifier: ^3.8.0 version: 3.8.1(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react-is@16.13.1)(react@19.2.3)(redux@5.0.1) + remark-breaks: + specifier: ^4.0.0 + version: 4.0.0 sonner: specifier: ^2.0.7 version: 2.0.7(react-dom@19.2.3(react@19.2.3))(react@19.2.3) @@ -5808,6 +5811,9 @@ packages: mdast-util-mdxjs-esm@2.0.1: resolution: {integrity: sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==} + mdast-util-newline-to-break@2.0.0: + resolution: {integrity: sha512-MbgeFca0hLYIEx/2zGsszCSEJJ1JSCdiY5xQxRcLDDGa8EPvlLPupJ4DSajbMPAnC0je8jfb9TiUATnxxrHUog==} + mdast-util-phrasing@4.1.0: resolution: {integrity: sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==} @@ -6768,6 +6774,9 @@ packages: rehype-raw@7.0.0: resolution: {integrity: sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==} + remark-breaks@4.0.0: + resolution: {integrity: sha512-IjEjJOkH4FuJvHZVIW0QCDWxcG96kCq7An/KVH2NfJe6rKZU2AsHeB3OEjPNRxi4QC34Xdx7I2KGYn6IpT7gxQ==} + remark-cjk-friendly-gfm-strikethrough@1.2.3: resolution: {integrity: sha512-bXfMZtsaomK6ysNN/UGRIcasQAYkC10NtPmP0oOHOV8YOhA2TXmwRXCku4qOzjIFxAPfish5+XS0eIug2PzNZA==} engines: {node: '>=16'} @@ -14414,6 +14423,11 @@ snapshots: transitivePeerDependencies: - supports-color + mdast-util-newline-to-break@2.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-find-and-replace: 3.0.2 + mdast-util-phrasing@4.1.0: dependencies: '@types/mdast': 4.0.4 @@ -15608,6 +15622,12 @@ snapshots: hast-util-raw: 9.1.0 vfile: 6.0.3 + remark-breaks@4.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-newline-to-break: 2.0.0 + unified: 11.0.5 + remark-cjk-friendly-gfm-strikethrough@1.2.3(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(unified@11.0.5): dependencies: micromark-extension-cjk-friendly-gfm-strikethrough: 1.2.3(micromark-util-types@2.0.2)(micromark@4.0.2) From bdf270b7a1d93111b0967964048dc2b427499bd0 Mon Sep 17 00:00:00 2001 From: Ramnique Singh <30795890+ramnique@users.noreply.github.com> Date: Fri, 24 Apr 2026 11:15:24 +0530 Subject: [PATCH 5/7] convert Today.md track blocks to event-driven and batch Gmail sync events Removes polling schedules from the up-next and calendar track blocks on Today.md so they refresh only on calendar.synced events, and rewrites the emails track instruction to consume a multi-thread digest payload. Batches Gmail sync so one email.synced event covers a whole sync run (capped at 10 threads per digest) instead of one event per thread, which collapses Pass 1 routing calls for multi-thread syncs. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../core/src/knowledge/ensure_daily_note.ts | 24 ++-- .../packages/core/src/knowledge/sync_gmail.ts | 127 ++++++++++++------ 2 files changed, 96 insertions(+), 55 deletions(-) diff --git a/apps/x/packages/core/src/knowledge/ensure_daily_note.ts b/apps/x/packages/core/src/knowledge/ensure_daily_note.ts index 4a6872f4..ac54d029 100644 --- a/apps/x/packages/core/src/knowledge/ensure_daily_note.ts +++ b/apps/x/packages/core/src/knowledge/ensure_daily_note.ts @@ -21,14 +21,14 @@ const SECTIONS: Section[] = [ instruction: `Write 1-3 sentences of plain markdown giving the user a shoulder-tap about what's next on their calendar today. -Data: read today's events from calendar_sync/ (workspace-readdir, then workspace-readFile each .json file). Filter to events whose start datetime is today and hasn't started yet. +This section refreshes on calendar changes, not on a clock tick — do NOT promise live minute countdowns. Frame urgency in buckets based on the event's start time relative to now: +- Start time is in the past or within roughly half an hour → imminent: name the meeting and say it's starting soon (e.g. "Standup is starting — join link in the Calendar section below."). +- Start time is later this morning or this afternoon → upcoming: name the meeting and roughly when (e.g. "Design review later this morning." / "1:1 with Sam this afternoon."). +- Start time is several hours out or nothing before then → focus block: frame the gap (e.g. "Next up is the all-hands at 3pm — good long focus block until then."). -Lead based on how soon the next event is: -- Under 15 minutes → urgent ("Standup starts in 10 minutes — join link in the Calendar section below.") -- Under 2 hours → lead with the event ("Design review in 40 minutes.") -- 2+ hours → frame the gap as focus time ("Next up is standup at noon — you've got a solid 3-hour focus block.") +Use the event's start time of day ("at 3pm", "this afternoon") rather than a countdown ("in 40 minutes"). Countdowns go stale between syncs. -Always compute minutes-to-start against the actual current local time — never say "nothing in the next X hours" if an event is in that window. +Data: read today's events from calendar_sync/ (workspace-readdir, then workspace-readFile each .json file). Filter to events whose start datetime is today and hasn't ended yet — for finding the next event, pick the earliest upcoming one; if all have passed, treat as clear. If you find quick context in knowledge/ that's genuinely useful, add one short clause ("Ramnique pushed the OAuth PR yesterday — might come up"). Use workspace-grep / workspace-readFile conservatively; don't stall on deep research. @@ -38,10 +38,6 @@ Plain markdown prose only — no calendar block, no email block, no headings.`, eventMatchCriteria: `Calendar event changes affecting today — new meetings, reschedules, cancellations, meetings starting soon. Skip changes to events on other days.`, active: true, - schedule: { - type: 'cron', - expression: '*/15 * * * *', - }, }, }, { @@ -53,16 +49,14 @@ Plain markdown prose only — no calendar block, no email block, no headings.`, Data: read calendar_sync/ via workspace-readdir, then workspace-readFile each .json event file. Filter to events occurring today. After 10am local time, drop meetings that have already ended — only include meetings that haven't ended yet. +This section refreshes on calendar changes, not on a clock tick — the "drop ended meetings" rule applies on each refresh, so an ended meeting disappears the next time any calendar event changes (not exactly on the clock hour). That's fine. + Always emit the calendar block, even when there are no remaining events (in that case use events: [] and showJoinButton: false). Set showJoinButton: true whenever any event has a conferenceLink. After the block, you MAY add one short markdown line per event giving useful prep context pulled from knowledge/ ("Design review: last week we agreed to revisit the type-picker UX."). Keep it tight — one line each, only when meaningful. Skip routine/recurring meetings.`, eventMatchCriteria: `Calendar event changes affecting today — additions, updates, cancellations, reschedules.`, active: true, - schedule: { - type: 'cron', - expression: '0 * * * *', - }, }, }, { @@ -72,7 +66,7 @@ After the block, you MAY add one short markdown line per event giving useful pre instruction: `Maintain a digest of email threads worth the user's attention today, rendered as zero or more email blocks (one per thread). -Event-driven path (primary): the agent message will include a freshly-synced thread's markdown as the event payload. Decide whether THIS thread warrants surfacing. If it's marketing, an auto-notification, a thread already closed out, or otherwise low-signal, skip the update — do NOT call update-track-content. If it's attention-worthy, integrate it into the digest: add a new email block, or update the existing one if the same threadId is already shown. +Event-driven path (primary): the agent message will include a "Gmail sync update" digest payload describing one or more freshly-synced threads from a single sync run. The digest lists each thread with its subject, sender, date, threadId, and body. Iterate over every thread in the payload and decide per thread whether it warrants surfacing. Skip marketing, auto-notifications, closed-out threads, and other low-signal mail. For threads that are attention-worthy, integrate them into the existing digest: add a new email block for a new threadId, or update the existing block if the threadId is already shown. If NONE of the threads in the payload are attention-worthy, skip the update — do NOT call update-track-content. Emit at most one update-track-content call that covers the full set of changes from this event. Manual path (fallback): with no event payload, scan gmail_sync/ via workspace-readdir (skip sync_state.json and attachments/). Read threads with workspace-readFile. Prioritize threads whose frontmatter action field is "reply" or "respond", plus other high-signal recent threads. diff --git a/apps/x/packages/core/src/knowledge/sync_gmail.ts b/apps/x/packages/core/src/knowledge/sync_gmail.ts index d00557a0..2aa48944 100644 --- a/apps/x/packages/core/src/knowledge/sync_gmail.ts +++ b/apps/x/packages/core/src/knowledge/sync_gmail.ts @@ -15,8 +15,52 @@ import { createEvent } from './track/events.js'; const SYNC_DIR = path.join(WorkDir, 'gmail_sync'); const SYNC_INTERVAL_MS = 5 * 60 * 1000; // Check every 5 minutes const REQUIRED_SCOPE = 'https://www.googleapis.com/auth/gmail.readonly'; +const MAX_THREADS_IN_DIGEST = 10; const nhm = new NodeHtmlMarkdown(); +interface SyncedThread { + threadId: string; + markdown: string; +} + +function summarizeGmailSync(threads: SyncedThread[]): string { + const lines: string[] = [ + `# Gmail sync update`, + ``, + `${threads.length} new/updated thread${threads.length === 1 ? '' : 's'}.`, + ``, + ]; + + const shown = threads.slice(0, MAX_THREADS_IN_DIGEST); + const hidden = threads.length - shown.length; + + if (shown.length > 0) { + lines.push(`## Threads`, ``); + for (const { markdown } of shown) { + lines.push(markdown.trimEnd(), ``, `---`, ``); + } + if (hidden > 0) { + lines.push(`_…and ${hidden} more thread(s) omitted from digest._`, ``); + } + } + + return lines.join('\n'); +} + +async function publishGmailSyncEvent(threads: SyncedThread[]): Promise { + if (threads.length === 0) return; + try { + await createEvent({ + source: 'gmail', + type: 'email.synced', + createdAt: new Date().toISOString(), + payload: summarizeGmailSync(threads), + }); + } catch (err) { + console.error('[Gmail] Failed to publish sync event:', err); + } +} + // --- Wake Signal for Immediate Sync Trigger --- let wakeResolve: (() => void) | null = null; @@ -113,14 +157,14 @@ async function saveAttachment(gmail: gmail.Gmail, userId: string, msgId: string, // --- Sync Logic --- -async function processThread(auth: OAuth2Client, threadId: string, syncDir: string, attachmentsDir: string) { +async function processThread(auth: OAuth2Client, threadId: string, syncDir: string, attachmentsDir: string): Promise { const gmail = google.gmail({ version: 'v1', auth }); try { const res = await gmail.users.threads.get({ userId: 'me', id: threadId }); const thread = res.data; const messages = thread.messages; - if (!messages || messages.length === 0) return; + if (!messages || messages.length === 0) return null; // Subject from first message const firstHeader = messages[0].payload?.headers; @@ -173,15 +217,11 @@ async function processThread(auth: OAuth2Client, threadId: string, syncDir: stri fs.writeFileSync(path.join(syncDir, `${threadId}.md`), mdContent); console.log(`Synced Thread: ${subject} (${threadId})`); - await createEvent({ - source: 'gmail', - type: 'email.synced', - createdAt: new Date().toISOString(), - payload: mdContent, - }); + return { threadId, markdown: mdContent }; } catch (error) { console.error(`Error processing thread ${threadId}:`, error); + return null; } } @@ -262,10 +302,14 @@ async function fullSync(auth: OAuth2Client, syncDir: string, attachmentsDir: str truncated: limitedThreads.truncated, }); + const synced: SyncedThread[] = []; for (const threadId of threadIds) { - await processThread(auth, threadId, syncDir, attachmentsDir); + const result = await processThread(auth, threadId, syncDir, attachmentsDir); + if (result) synced.push(result); } + await publishGmailSyncEvent(synced); + saveState(currentHistoryId, stateFile); await serviceLogger.log({ type: 'run_complete', @@ -365,10 +409,14 @@ async function partialSync(auth: OAuth2Client, startHistoryId: string, syncDir: truncated: limitedThreads.truncated, }); + const synced: SyncedThread[] = []; for (const tid of threadIdList) { - await processThread(auth, tid, syncDir, attachmentsDir); + const result = await processThread(auth, tid, syncDir, attachmentsDir); + if (result) synced.push(result); } + await publishGmailSyncEvent(synced); + const profile = await gmail.users.getProfile({ userId: 'me' }); saveState(profile.data.historyId!, stateFile); await serviceLogger.log({ @@ -565,7 +613,12 @@ function extractBodyFromPayload(payload: Record): string { return ''; } -async function processThreadComposio(connectedAccountId: string, threadId: string, syncDir: string): Promise { +interface ComposioThreadResult { + synced: SyncedThread | null; + newestIsoPlusOne: string | null; +} + +async function processThreadComposio(connectedAccountId: string, threadId: string, syncDir: string): Promise { let threadResult; try { threadResult = await executeAction( @@ -579,40 +632,34 @@ async function processThreadComposio(connectedAccountId: string, threadId: strin ); } catch (error) { console.warn(`[Gmail] Skipping thread ${threadId} (fetch failed):`, error instanceof Error ? error.message : error); - return null; + return { synced: null, newestIsoPlusOne: null }; } if (!threadResult.successful || !threadResult.data) { console.error(`[Gmail] Failed to fetch thread ${threadId}:`, threadResult.error); - return null; + return { synced: null, newestIsoPlusOne: null }; } const data = threadResult.data as Record; const messages = data.messages as Array> | undefined; let newestDate: Date | null = null; + let mdContent: string; + let subjectForLog: string; if (!messages || messages.length === 0) { const parsed = parseMessageData(data); - const mdContent = `# ${parsed.subject}\n\n` + + mdContent = `# ${parsed.subject}\n\n` + `**Thread ID:** ${threadId}\n` + `**Message Count:** 1\n\n---\n\n` + `### From: ${parsed.from}\n` + `**Date:** ${parsed.date}\n\n` + `${parsed.body}\n\n---\n\n`; - - fs.writeFileSync(path.join(syncDir, `${cleanFilename(threadId)}.md`), mdContent); - console.log(`[Gmail] Synced Thread: ${parsed.subject} (${threadId})`); - await createEvent({ - source: 'gmail', - type: 'email.synced', - createdAt: new Date().toISOString(), - payload: mdContent, - }); + subjectForLog = parsed.subject; newestDate = tryParseDate(parsed.date); } else { const firstParsed = parseMessageData(messages[0]); - let mdContent = `# ${firstParsed.subject}\n\n`; + mdContent = `# ${firstParsed.subject}\n\n`; mdContent += `**Thread ID:** ${threadId}\n`; mdContent += `**Message Count:** ${messages.length}\n\n---\n\n`; @@ -628,19 +675,14 @@ async function processThreadComposio(connectedAccountId: string, threadId: strin newestDate = msgDate; } } - - fs.writeFileSync(path.join(syncDir, `${cleanFilename(threadId)}.md`), mdContent); - console.log(`[Gmail] Synced Thread: ${firstParsed.subject} (${threadId})`); - await createEvent({ - source: 'gmail', - type: 'email.synced', - createdAt: new Date().toISOString(), - payload: mdContent, - }); + subjectForLog = firstParsed.subject; } - if (!newestDate) return null; - return new Date(newestDate.getTime() + 1000).toISOString(); + fs.writeFileSync(path.join(syncDir, `${cleanFilename(threadId)}.md`), mdContent); + console.log(`[Gmail] Synced Thread: ${subjectForLog} (${threadId})`); + + const newestIsoPlusOne = newestDate ? new Date(newestDate.getTime() + 1000).toISOString() : null; + return { synced: { threadId, markdown: mdContent }, newestIsoPlusOne }; } async function performSyncComposio() { @@ -751,19 +793,22 @@ async function performSyncComposio() { let highWaterMark: string | null = state?.last_sync ?? null; let processedCount = 0; + const synced: SyncedThread[] = []; for (const threadId of allThreadIds) { // Re-check connection in case user disconnected mid-sync if (!composioAccountsRepo.isConnected('gmail')) { console.log('[Gmail] Account disconnected during sync. Stopping.'); - return; + break; } try { - const newestInThread = await processThreadComposio(connectedAccountId, threadId, SYNC_DIR); + const result = await processThreadComposio(connectedAccountId, threadId, SYNC_DIR); processedCount++; - if (newestInThread) { - if (!highWaterMark || new Date(newestInThread) > new Date(highWaterMark)) { - highWaterMark = newestInThread; + if (result.synced) synced.push(result.synced); + + if (result.newestIsoPlusOne) { + if (!highWaterMark || new Date(result.newestIsoPlusOne) > new Date(highWaterMark)) { + highWaterMark = result.newestIsoPlusOne; } saveComposioState(STATE_FILE, highWaterMark); } @@ -772,6 +817,8 @@ async function performSyncComposio() { } } + await publishGmailSyncEvent(synced); + await serviceLogger.log({ type: 'run_complete', service: run!.service, From caf00fae0c18684f3d7da34e996e3e775f769452 Mon Sep 17 00:00:00 2001 From: Ramnique Singh <30795890+ramnique@users.noreply.github.com> Date: Fri, 24 Apr 2026 16:44:02 +0530 Subject: [PATCH 6/7] configurable kg / meeting / track-block model overrides MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bring back per-category model selection that 5c4aa772 dropped, plus add a new track-block category. Each is a BYOK-only override on `LlmModelConfig` (`knowledgeGraphModel`, `meetingNotesModel`, `trackBlockModel`); signed-in users always get the curated gateway default and never hit the on-disk config. Three helpers in core/models/defaults.ts — `getKgModel`, `getTrackBlockModel`, `getMeetingNotesModel` — each check `isSignedIn` first (fast path) and fall through to `cfg. ?? cfg.model` for BYOK. The model is now picked at the invocation site rather than via runtime agent-name branching: each top-level `createRun` for a polling KG agent or a track-block update passes `model: await getXxxModel()`. The `model:` declarations on the affected agent YAMLs are dropped — they were dead code under the per-call override. Standalone (non-run) callers `track/routing` and `summarize_meeting` use the helpers inline. Settings dialog and the two onboarding flows surface the two new fields ("Meeting Notes Model", "Track Block Model") next to the existing "Knowledge Graph Model"; `repo.setConfig` persists all three per-provider. Note: the signed-in `RowboatModelSettings` panel still has its now-defunct kg selector; that's a UI cleanup for a later pass. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../src/components/onboarding-modal.tsx | 88 +++++++++++++++-- .../onboarding/steps/llm-setup-step.tsx | 70 +++++++++++++ .../onboarding/use-onboarding-state.ts | 24 +++-- .../src/components/settings-dialog.tsx | 98 +++++++++++++++++-- .../core/src/knowledge/agent_notes.ts | 3 +- .../core/src/knowledge/agent_notes_agent.ts | 1 - .../core/src/knowledge/inline_task_agent.ts | 1 - .../core/src/knowledge/inline_tasks.ts | 5 +- .../core/src/knowledge/label_emails.ts | 2 + .../core/src/knowledge/labeling_agent.ts | 1 - .../core/src/knowledge/note_creation.ts | 1 - .../core/src/knowledge/note_tagging_agent.ts | 1 - .../core/src/knowledge/summarize_meeting.ts | 5 +- .../packages/core/src/knowledge/tag_notes.ts | 2 + .../core/src/knowledge/track/routing.ts | 5 +- .../core/src/knowledge/track/runner.ts | 3 +- apps/x/packages/core/src/models/defaults.ts | 35 +++++++ apps/x/packages/core/src/models/repo.ts | 1 + .../core/src/pre_built/email-draft.md | 1 - .../core/src/pre_built/meeting-prep.md | 1 - apps/x/packages/core/src/pre_built/runner.ts | 2 + apps/x/packages/shared/src/models.ts | 5 +- 22 files changed, 309 insertions(+), 46 deletions(-) diff --git a/apps/x/apps/renderer/src/components/onboarding-modal.tsx b/apps/x/apps/renderer/src/components/onboarding-modal.tsx index c7f723ac..469ac35d 100644 --- a/apps/x/apps/renderer/src/components/onboarding-modal.tsx +++ b/apps/x/apps/renderer/src/components/onboarding-modal.tsx @@ -59,14 +59,14 @@ export function OnboardingModal({ open, onComplete }: OnboardingModalProps) { const [modelsCatalog, setModelsCatalog] = useState>({}) const [modelsLoading, setModelsLoading] = useState(false) const [modelsError, setModelsError] = useState(null) - const [providerConfigs, setProviderConfigs] = useState>({ - openai: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "" }, - anthropic: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "" }, - google: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "" }, - openrouter: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "" }, - aigateway: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "" }, - ollama: { apiKey: "", baseURL: "http://localhost:11434", model: "", knowledgeGraphModel: "" }, - "openai-compatible": { apiKey: "", baseURL: "http://localhost:1234/v1", model: "", knowledgeGraphModel: "" }, + const [providerConfigs, setProviderConfigs] = useState>({ + openai: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + anthropic: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + google: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + openrouter: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + aigateway: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + ollama: { apiKey: "", baseURL: "http://localhost:11434", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + "openai-compatible": { apiKey: "", baseURL: "http://localhost:1234/v1", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, }) const [testState, setTestState] = useState<{ status: "idle" | "testing" | "success" | "error"; error?: string }>({ status: "idle", @@ -109,7 +109,7 @@ export function OnboardingModal({ open, onComplete }: OnboardingModalProps) { const [googleCalendarConnecting, setGoogleCalendarConnecting] = useState(false) const updateProviderConfig = useCallback( - (provider: LlmProviderFlavor, updates: Partial<{ apiKey: string; baseURL: string; model: string; knowledgeGraphModel: string }>) => { + (provider: LlmProviderFlavor, updates: Partial<{ apiKey: string; baseURL: string; model: string; knowledgeGraphModel: string; meetingNotesModel: string; trackBlockModel: string }>) => { setProviderConfigs(prev => ({ ...prev, [provider]: { ...prev[provider], ...updates }, @@ -458,6 +458,8 @@ export function OnboardingModal({ open, onComplete }: OnboardingModalProps) { const baseURL = activeConfig.baseURL.trim() || undefined const model = activeConfig.model.trim() const knowledgeGraphModel = activeConfig.knowledgeGraphModel.trim() || undefined + const meetingNotesModel = activeConfig.meetingNotesModel.trim() || undefined + const trackBlockModel = activeConfig.trackBlockModel.trim() || undefined const providerConfig = { provider: { flavor: llmProvider, @@ -466,6 +468,8 @@ export function OnboardingModal({ open, onComplete }: OnboardingModalProps) { }, model, knowledgeGraphModel, + meetingNotesModel, + trackBlockModel, } const result = await window.ipc.invoke("models:test", providerConfig) if (result.success) { @@ -1157,6 +1161,72 @@ export function OnboardingModal({ open, onComplete }: OnboardingModalProps) { )} + +
+ Meeting notes model + {modelsLoading ? ( +
+ + Loading... +
+ ) : showModelInput ? ( + updateProviderConfig(llmProvider, { meetingNotesModel: e.target.value })} + placeholder={activeConfig.model || "Enter model"} + /> + ) : ( + + )} +
+ +
+ Track block model + {modelsLoading ? ( +
+ + Loading... +
+ ) : showModelInput ? ( + updateProviderConfig(llmProvider, { trackBlockModel: e.target.value })} + placeholder={activeConfig.model || "Enter model"} + /> + ) : ( + + )} +
{showApiKey && ( diff --git a/apps/x/apps/renderer/src/components/onboarding/steps/llm-setup-step.tsx b/apps/x/apps/renderer/src/components/onboarding/steps/llm-setup-step.tsx index a9956245..a11b0d5f 100644 --- a/apps/x/apps/renderer/src/components/onboarding/steps/llm-setup-step.tsx +++ b/apps/x/apps/renderer/src/components/onboarding/steps/llm-setup-step.tsx @@ -221,6 +221,76 @@ export function LlmSetupStep({ state }: LlmSetupStepProps) { )} + +
+ + {modelsLoading ? ( +
+ + Loading... +
+ ) : showModelInput ? ( + updateProviderConfig(llmProvider, { meetingNotesModel: e.target.value })} + placeholder={activeConfig.model || "Enter model"} + /> + ) : ( + + )} +
+ +
+ + {modelsLoading ? ( +
+ + Loading... +
+ ) : showModelInput ? ( + updateProviderConfig(llmProvider, { trackBlockModel: e.target.value })} + placeholder={activeConfig.model || "Enter model"} + /> + ) : ( + + )} +
{showApiKey && ( diff --git a/apps/x/apps/renderer/src/components/onboarding/use-onboarding-state.ts b/apps/x/apps/renderer/src/components/onboarding/use-onboarding-state.ts index a55b23fe..edb3616b 100644 --- a/apps/x/apps/renderer/src/components/onboarding/use-onboarding-state.ts +++ b/apps/x/apps/renderer/src/components/onboarding/use-onboarding-state.ts @@ -29,14 +29,14 @@ export function useOnboardingState(open: boolean, onComplete: () => void) { const [modelsCatalog, setModelsCatalog] = useState>({}) const [modelsLoading, setModelsLoading] = useState(false) const [modelsError, setModelsError] = useState(null) - const [providerConfigs, setProviderConfigs] = useState>({ - openai: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "" }, - anthropic: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "" }, - google: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "" }, - openrouter: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "" }, - aigateway: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "" }, - ollama: { apiKey: "", baseURL: "http://localhost:11434", model: "", knowledgeGraphModel: "" }, - "openai-compatible": { apiKey: "", baseURL: "http://localhost:1234/v1", model: "", knowledgeGraphModel: "" }, + const [providerConfigs, setProviderConfigs] = useState>({ + openai: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + anthropic: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + google: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + openrouter: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + aigateway: { apiKey: "", baseURL: "", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + ollama: { apiKey: "", baseURL: "http://localhost:11434", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + "openai-compatible": { apiKey: "", baseURL: "http://localhost:1234/v1", model: "", knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, }) const [testState, setTestState] = useState<{ status: "idle" | "testing" | "success" | "error"; error?: string }>({ status: "idle", @@ -81,7 +81,7 @@ export function useOnboardingState(open: boolean, onComplete: () => void) { const [googleCalendarConnecting, setGoogleCalendarConnecting] = useState(false) const updateProviderConfig = useCallback( - (provider: LlmProviderFlavor, updates: Partial<{ apiKey: string; baseURL: string; model: string; knowledgeGraphModel: string }>) => { + (provider: LlmProviderFlavor, updates: Partial<{ apiKey: string; baseURL: string; model: string; knowledgeGraphModel: string; meetingNotesModel: string; trackBlockModel: string }>) => { setProviderConfigs(prev => ({ ...prev, [provider]: { ...prev[provider], ...updates }, @@ -435,6 +435,8 @@ export function useOnboardingState(open: boolean, onComplete: () => void) { const baseURL = activeConfig.baseURL.trim() || undefined const model = activeConfig.model.trim() const knowledgeGraphModel = activeConfig.knowledgeGraphModel.trim() || undefined + const meetingNotesModel = activeConfig.meetingNotesModel.trim() || undefined + const trackBlockModel = activeConfig.trackBlockModel.trim() || undefined const providerConfig = { provider: { flavor: llmProvider, @@ -443,6 +445,8 @@ export function useOnboardingState(open: boolean, onComplete: () => void) { }, model, knowledgeGraphModel, + meetingNotesModel, + trackBlockModel, } const result = await window.ipc.invoke("models:test", providerConfig) if (result.success) { @@ -459,7 +463,7 @@ export function useOnboardingState(open: boolean, onComplete: () => void) { setTestState({ status: "error", error: "Connection test failed" }) toast.error("Connection test failed") } - }, [activeConfig.apiKey, activeConfig.baseURL, activeConfig.model, activeConfig.knowledgeGraphModel, canTest, llmProvider, handleNext]) + }, [activeConfig.apiKey, activeConfig.baseURL, activeConfig.model, activeConfig.knowledgeGraphModel, activeConfig.meetingNotesModel, activeConfig.trackBlockModel, canTest, llmProvider, handleNext]) // Check connection status for all providers const refreshAllStatuses = useCallback(async () => { diff --git a/apps/x/apps/renderer/src/components/settings-dialog.tsx b/apps/x/apps/renderer/src/components/settings-dialog.tsx index 143c6292..ddc506c9 100644 --- a/apps/x/apps/renderer/src/components/settings-dialog.tsx +++ b/apps/x/apps/renderer/src/components/settings-dialog.tsx @@ -196,14 +196,14 @@ const defaultBaseURLs: Partial> = { function ModelSettings({ dialogOpen }: { dialogOpen: boolean }) { const [provider, setProvider] = useState("openai") const [defaultProvider, setDefaultProvider] = useState(null) - const [providerConfigs, setProviderConfigs] = useState>({ - openai: { apiKey: "", baseURL: "", models: [""], knowledgeGraphModel: "" }, - anthropic: { apiKey: "", baseURL: "", models: [""], knowledgeGraphModel: "" }, - google: { apiKey: "", baseURL: "", models: [""], knowledgeGraphModel: "" }, - openrouter: { apiKey: "", baseURL: "", models: [""], knowledgeGraphModel: "" }, - aigateway: { apiKey: "", baseURL: "", models: [""], knowledgeGraphModel: "" }, - ollama: { apiKey: "", baseURL: "http://localhost:11434", models: [""], knowledgeGraphModel: "" }, - "openai-compatible": { apiKey: "", baseURL: "http://localhost:1234/v1", models: [""], knowledgeGraphModel: "" }, + const [providerConfigs, setProviderConfigs] = useState>({ + openai: { apiKey: "", baseURL: "", models: [""], knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + anthropic: { apiKey: "", baseURL: "", models: [""], knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + google: { apiKey: "", baseURL: "", models: [""], knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + openrouter: { apiKey: "", baseURL: "", models: [""], knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + aigateway: { apiKey: "", baseURL: "", models: [""], knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + ollama: { apiKey: "", baseURL: "http://localhost:11434", models: [""], knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, + "openai-compatible": { apiKey: "", baseURL: "http://localhost:1234/v1", models: [""], knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, }) const [modelsCatalog, setModelsCatalog] = useState>({}) const [modelsLoading, setModelsLoading] = useState(false) @@ -229,7 +229,7 @@ function ModelSettings({ dialogOpen }: { dialogOpen: boolean }) { (!requiresBaseURL || activeConfig.baseURL.trim().length > 0) const updateConfig = useCallback( - (prov: LlmProviderFlavor, updates: Partial<{ apiKey: string; baseURL: string; models: string[]; knowledgeGraphModel: string }>) => { + (prov: LlmProviderFlavor, updates: Partial<{ apiKey: string; baseURL: string; models: string[]; knowledgeGraphModel: string; meetingNotesModel: string; trackBlockModel: string }>) => { setProviderConfigs(prev => ({ ...prev, [prov]: { ...prev[prov], ...updates }, @@ -302,6 +302,8 @@ function ModelSettings({ dialogOpen }: { dialogOpen: boolean }) { baseURL: e.baseURL || (defaultBaseURLs[key as LlmProviderFlavor] || ""), models: savedModels, knowledgeGraphModel: e.knowledgeGraphModel || "", + meetingNotesModel: e.meetingNotesModel || "", + trackBlockModel: e.trackBlockModel || "", }; } } @@ -318,6 +320,8 @@ function ModelSettings({ dialogOpen }: { dialogOpen: boolean }) { baseURL: parsed.provider.baseURL || (defaultBaseURLs[flavor] || ""), models: activeModels.length > 0 ? activeModels : [""], knowledgeGraphModel: parsed.knowledgeGraphModel || "", + meetingNotesModel: parsed.meetingNotesModel || "", + trackBlockModel: parsed.trackBlockModel || "", }; } return next; @@ -391,6 +395,8 @@ function ModelSettings({ dialogOpen }: { dialogOpen: boolean }) { model: allModels[0] || "", models: allModels, knowledgeGraphModel: activeConfig.knowledgeGraphModel.trim() || undefined, + meetingNotesModel: activeConfig.meetingNotesModel.trim() || undefined, + trackBlockModel: activeConfig.trackBlockModel.trim() || undefined, } const result = await window.ipc.invoke("models:test", providerConfig) if (result.success) { @@ -423,6 +429,8 @@ function ModelSettings({ dialogOpen }: { dialogOpen: boolean }) { model: allModels[0], models: allModels, knowledgeGraphModel: config.knowledgeGraphModel.trim() || undefined, + meetingNotesModel: config.meetingNotesModel.trim() || undefined, + trackBlockModel: config.trackBlockModel.trim() || undefined, }) setDefaultProvider(prov) window.dispatchEvent(new Event('models-config-changed')) @@ -452,6 +460,8 @@ function ModelSettings({ dialogOpen }: { dialogOpen: boolean }) { parsed.model = defModels[0] || "" parsed.models = defModels parsed.knowledgeGraphModel = defConfig.knowledgeGraphModel.trim() || undefined + parsed.meetingNotesModel = defConfig.meetingNotesModel.trim() || undefined + parsed.trackBlockModel = defConfig.trackBlockModel.trim() || undefined } await window.ipc.invoke("workspace:writeFile", { path: "config/models.json", @@ -459,7 +469,7 @@ function ModelSettings({ dialogOpen }: { dialogOpen: boolean }) { }) setProviderConfigs(prev => ({ ...prev, - [prov]: { apiKey: "", baseURL: defaultBaseURLs[prov] || "", models: [""], knowledgeGraphModel: "" }, + [prov]: { apiKey: "", baseURL: defaultBaseURLs[prov] || "", models: [""], knowledgeGraphModel: "", meetingNotesModel: "", trackBlockModel: "" }, })) setTestState({ status: "idle" }) window.dispatchEvent(new Event('models-config-changed')) @@ -649,6 +659,74 @@ function ModelSettings({ dialogOpen }: { dialogOpen: boolean }) { )} + + {/* Meeting notes model */} +
+ Meeting notes model + {modelsLoading ? ( +
+ + Loading... +
+ ) : showModelInput ? ( + updateConfig(provider, { meetingNotesModel: e.target.value })} + placeholder={primaryModel || "Enter model"} + /> + ) : ( + + )} +
+ + {/* Track block model */} +
+ Track block model + {modelsLoading ? ( +
+ + Loading... +
+ ) : showModelInput ? ( + updateConfig(provider, { trackBlockModel: e.target.value })} + placeholder={primaryModel || "Enter model"} + /> + ) : ( + + )} +
{/* API Key */} diff --git a/apps/x/packages/core/src/knowledge/agent_notes.ts b/apps/x/packages/core/src/knowledge/agent_notes.ts index 16307bb5..359976dd 100644 --- a/apps/x/packages/core/src/knowledge/agent_notes.ts +++ b/apps/x/packages/core/src/knowledge/agent_notes.ts @@ -3,6 +3,7 @@ import path from 'path'; import { google } from 'googleapis'; import { WorkDir } from '../config/config.js'; import { createRun, createMessage } from '../runs/runs.js'; +import { getKgModel } from '../models/defaults.js'; import { waitForRunCompletion } from '../agents/utils.js'; import { serviceLogger } from '../services/service_logger.js'; import { loadUserConfig, updateUserEmail } from '../config/user_config.js'; @@ -305,7 +306,7 @@ async function processAgentNotes(): Promise { const timestamp = new Date().toISOString(); const message = `Current timestamp: ${timestamp}\n\nProcess the following source material and update the Agent Notes folder accordingly.\n\n${messageParts.join('\n\n')}`; - const agentRun = await createRun({ agentId: AGENT_ID }); + const agentRun = await createRun({ agentId: AGENT_ID, model: await getKgModel() }); await createMessage(agentRun.id, message); await waitForRunCompletion(agentRun.id); diff --git a/apps/x/packages/core/src/knowledge/agent_notes_agent.ts b/apps/x/packages/core/src/knowledge/agent_notes_agent.ts index d7087405..58aa22a7 100644 --- a/apps/x/packages/core/src/knowledge/agent_notes_agent.ts +++ b/apps/x/packages/core/src/knowledge/agent_notes_agent.ts @@ -1,6 +1,5 @@ export function getRaw(): string { return `--- -model: anthropic/claude-haiku-4.5 tools: workspace-writeFile: type: builtin diff --git a/apps/x/packages/core/src/knowledge/inline_task_agent.ts b/apps/x/packages/core/src/knowledge/inline_task_agent.ts index 9c3e2568..fd90875b 100644 --- a/apps/x/packages/core/src/knowledge/inline_task_agent.ts +++ b/apps/x/packages/core/src/knowledge/inline_task_agent.ts @@ -13,7 +13,6 @@ export function getRaw(): string { const defaultEndISO = defaultEnd.toISOString(); return `--- -model: anthropic/claude-sonnet-4.6 tools: ${toolEntries} --- diff --git a/apps/x/packages/core/src/knowledge/inline_tasks.ts b/apps/x/packages/core/src/knowledge/inline_tasks.ts index 01d22352..953f86bd 100644 --- a/apps/x/packages/core/src/knowledge/inline_tasks.ts +++ b/apps/x/packages/core/src/knowledge/inline_tasks.ts @@ -4,6 +4,7 @@ import { CronExpressionParser } from 'cron-parser'; import { generateText } from 'ai'; import { WorkDir } from '../config/config.js'; import { createRun, createMessage, fetchRun } from '../runs/runs.js'; +import { getKgModel } from '../models/defaults.js'; import container from '../di/container.js'; import type { IModelConfigRepo } from '../models/repo.js'; import { createProvider } from '../models/models.js'; @@ -467,7 +468,7 @@ async function processInlineTasks(): Promise { console.log(`[InlineTasks] Running task: "${task.instruction.slice(0, 80)}..."`); try { - const run = await createRun({ agentId: INLINE_TASK_AGENT }); + const run = await createRun({ agentId: INLINE_TASK_AGENT, model: await getKgModel() }); const message = [ `Execute the following instruction from the note "${relativePath}":`, @@ -547,7 +548,7 @@ export async function processRowboatInstruction( scheduleLabel: string | null; response: string | null; }> { - const run = await createRun({ agentId: INLINE_TASK_AGENT }); + const run = await createRun({ agentId: INLINE_TASK_AGENT, model: await getKgModel() }); const message = [ `Process the following @rowboat instruction from the note "${notePath}":`, diff --git a/apps/x/packages/core/src/knowledge/label_emails.ts b/apps/x/packages/core/src/knowledge/label_emails.ts index 98b10c2f..95b6217b 100644 --- a/apps/x/packages/core/src/knowledge/label_emails.ts +++ b/apps/x/packages/core/src/knowledge/label_emails.ts @@ -2,6 +2,7 @@ import fs from 'fs'; import path from 'path'; import { WorkDir } from '../config/config.js'; import { createRun, createMessage } from '../runs/runs.js'; +import { getKgModel } from '../models/defaults.js'; import { bus } from '../runs/bus.js'; import { waitForRunCompletion } from '../agents/utils.js'; import { serviceLogger } from '../services/service_logger.js'; @@ -71,6 +72,7 @@ async function labelEmailBatch( ): Promise<{ runId: string; filesEdited: Set }> { const run = await createRun({ agentId: LABELING_AGENT, + model: await getKgModel(), }); let message = `Label the following ${files.length} email files by prepending YAML frontmatter.\n\n`; diff --git a/apps/x/packages/core/src/knowledge/labeling_agent.ts b/apps/x/packages/core/src/knowledge/labeling_agent.ts index bb4a6efe..8842891a 100644 --- a/apps/x/packages/core/src/knowledge/labeling_agent.ts +++ b/apps/x/packages/core/src/knowledge/labeling_agent.ts @@ -2,7 +2,6 @@ import { renderTagSystemForEmails } from './tag_system.js'; export function getRaw(): string { return `--- -model: anthropic/claude-haiku-4.5 tools: workspace-readFile: type: builtin diff --git a/apps/x/packages/core/src/knowledge/note_creation.ts b/apps/x/packages/core/src/knowledge/note_creation.ts index 283c77ec..0a4d8981 100644 --- a/apps/x/packages/core/src/knowledge/note_creation.ts +++ b/apps/x/packages/core/src/knowledge/note_creation.ts @@ -3,7 +3,6 @@ import { renderNoteEffectRules } from './tag_system.js'; export function getRaw(): string { return `--- -model: anthropic/claude-haiku-4.5 tools: workspace-writeFile: type: builtin diff --git a/apps/x/packages/core/src/knowledge/note_tagging_agent.ts b/apps/x/packages/core/src/knowledge/note_tagging_agent.ts index 71b10910..8e9e3320 100644 --- a/apps/x/packages/core/src/knowledge/note_tagging_agent.ts +++ b/apps/x/packages/core/src/knowledge/note_tagging_agent.ts @@ -2,7 +2,6 @@ import { renderTagSystemForNotes } from './tag_system.js'; export function getRaw(): string { return `--- -model: anthropic/claude-haiku-4.5 tools: workspace-readFile: type: builtin diff --git a/apps/x/packages/core/src/knowledge/summarize_meeting.ts b/apps/x/packages/core/src/knowledge/summarize_meeting.ts index a10aac28..c7e7a71f 100644 --- a/apps/x/packages/core/src/knowledge/summarize_meeting.ts +++ b/apps/x/packages/core/src/knowledge/summarize_meeting.ts @@ -2,7 +2,7 @@ import fs from 'fs'; import path from 'path'; import { generateText } from 'ai'; import { createProvider } from '../models/models.js'; -import { getDefaultModelAndProvider, resolveProviderConfig } from '../models/defaults.js'; +import { getDefaultModelAndProvider, getMeetingNotesModel, resolveProviderConfig } from '../models/defaults.js'; import { WorkDir } from '../config/config.js'; const CALENDAR_SYNC_DIR = path.join(WorkDir, 'calendar_sync'); @@ -135,7 +135,8 @@ function loadCalendarEventContext(calendarEventJson: string): string { } export async function summarizeMeeting(transcript: string, meetingStartTime?: string, calendarEventJson?: string): Promise { - const { model: modelId, provider: providerName } = await getDefaultModelAndProvider(); + const modelId = await getMeetingNotesModel(); + const { provider: providerName } = await getDefaultModelAndProvider(); const providerConfig = await resolveProviderConfig(providerName); const model = createProvider(providerConfig).languageModel(modelId); diff --git a/apps/x/packages/core/src/knowledge/tag_notes.ts b/apps/x/packages/core/src/knowledge/tag_notes.ts index 8fdabb86..2d074ab7 100644 --- a/apps/x/packages/core/src/knowledge/tag_notes.ts +++ b/apps/x/packages/core/src/knowledge/tag_notes.ts @@ -2,6 +2,7 @@ import fs from 'fs'; import path from 'path'; import { WorkDir } from '../config/config.js'; import { createRun, createMessage } from '../runs/runs.js'; +import { getKgModel } from '../models/defaults.js'; import { bus } from '../runs/bus.js'; import { waitForRunCompletion } from '../agents/utils.js'; import { serviceLogger } from '../services/service_logger.js'; @@ -84,6 +85,7 @@ async function tagNoteBatch( ): Promise<{ runId: string; filesEdited: Set }> { const run = await createRun({ agentId: NOTE_TAGGING_AGENT, + model: await getKgModel(), }); let message = `Tag the following ${files.length} knowledge notes by prepending YAML frontmatter with appropriate tags.\n\n`; diff --git a/apps/x/packages/core/src/knowledge/track/routing.ts b/apps/x/packages/core/src/knowledge/track/routing.ts index 53e6f7b3..6f8f3824 100644 --- a/apps/x/packages/core/src/knowledge/track/routing.ts +++ b/apps/x/packages/core/src/knowledge/track/routing.ts @@ -2,7 +2,7 @@ import { generateObject } from 'ai'; import { trackBlock, PrefixLogger } from '@x/shared'; import type { KnowledgeEvent } from '@x/shared/dist/track-block.js'; import { createProvider } from '../../models/models.js'; -import { getDefaultModelAndProvider, resolveProviderConfig } from '../../models/defaults.js'; +import { getDefaultModelAndProvider, getTrackBlockModel, resolveProviderConfig } from '../../models/defaults.js'; const log = new PrefixLogger('TrackRouting'); @@ -34,7 +34,8 @@ Rules: - For each candidate, return BOTH trackId and filePath exactly as given. trackIds are not globally unique.`; async function resolveModel() { - const { model, provider } = await getDefaultModelAndProvider(); + const model = await getTrackBlockModel(); + const { provider } = await getDefaultModelAndProvider(); const config = await resolveProviderConfig(provider); return createProvider(config).languageModel(model); } diff --git a/apps/x/packages/core/src/knowledge/track/runner.ts b/apps/x/packages/core/src/knowledge/track/runner.ts index 5ee90024..35f7e7ac 100644 --- a/apps/x/packages/core/src/knowledge/track/runner.ts +++ b/apps/x/packages/core/src/knowledge/track/runner.ts @@ -1,6 +1,7 @@ import z from 'zod'; import { fetchAll, updateTrackBlock } from './fileops.js'; import { createRun, createMessage } from '../../runs/runs.js'; +import { getTrackBlockModel } from '../../models/defaults.js'; import { extractAgentResponse, waitForRunCompletion } from '../../agents/utils.js'; import { trackBus } from './bus.js'; import type { TrackStateSchema } from './types.js'; @@ -102,7 +103,7 @@ export async function triggerTrackUpdate( const contentBefore = track.content; // Emit start event — runId is set after agent run is created - const agentRun = await createRun({ agentId: 'track-run' }); + const agentRun = await createRun({ agentId: 'track-run', model: await getTrackBlockModel() }); // Set lastRunAt and lastRunId immediately (before agent executes) so // the scheduler's next poll won't re-trigger this track. diff --git a/apps/x/packages/core/src/models/defaults.ts b/apps/x/packages/core/src/models/defaults.ts index b9df52da..66dda9e0 100644 --- a/apps/x/packages/core/src/models/defaults.ts +++ b/apps/x/packages/core/src/models/defaults.ts @@ -6,6 +6,8 @@ import container from "../di/container.js"; const SIGNED_IN_DEFAULT_MODEL = "gpt-5.4"; const SIGNED_IN_DEFAULT_PROVIDER = "rowboat"; +const SIGNED_IN_KG_MODEL = "anthropic/claude-haiku-4.5"; +const SIGNED_IN_TRACK_BLOCK_MODEL = "anthropic/claude-haiku-4.5"; /** * The single source of truth for "what model+provider should we use when @@ -51,3 +53,36 @@ export async function resolveProviderConfig(name: string): Promise { + if (await isSignedIn()) return SIGNED_IN_KG_MODEL; + const cfg = await container.resolve("modelConfigRepo").getConfig(); + return cfg.knowledgeGraphModel ?? cfg.model; +} + +/** + * Model used by track-block runner + routing classifier. + * Signed-in: curated default. BYOK: user override (`trackBlockModel`) or + * assistant model. + */ +export async function getTrackBlockModel(): Promise { + if (await isSignedIn()) return SIGNED_IN_TRACK_BLOCK_MODEL; + const cfg = await container.resolve("modelConfigRepo").getConfig(); + return cfg.trackBlockModel ?? cfg.model; +} + +/** + * Model used by the meeting-notes summarizer. No special signed-in default — + * historically meetings used the assistant model. BYOK: user override + * (`meetingNotesModel`) or assistant model. + */ +export async function getMeetingNotesModel(): Promise { + if (await isSignedIn()) return SIGNED_IN_DEFAULT_MODEL; + const cfg = await container.resolve("modelConfigRepo").getConfig(); + return cfg.meetingNotesModel ?? cfg.model; +} diff --git a/apps/x/packages/core/src/models/repo.ts b/apps/x/packages/core/src/models/repo.ts index 44a9d475..8f8fb158 100644 --- a/apps/x/packages/core/src/models/repo.ts +++ b/apps/x/packages/core/src/models/repo.ts @@ -52,6 +52,7 @@ export class FSModelConfigRepo implements IModelConfigRepo { models: config.models, knowledgeGraphModel: config.knowledgeGraphModel, meetingNotesModel: config.meetingNotesModel, + trackBlockModel: config.trackBlockModel, }; const toWrite = { ...config, providers: existingProviders }; diff --git a/apps/x/packages/core/src/pre_built/email-draft.md b/apps/x/packages/core/src/pre_built/email-draft.md index 7a353d26..7ddd6ffb 100644 --- a/apps/x/packages/core/src/pre_built/email-draft.md +++ b/apps/x/packages/core/src/pre_built/email-draft.md @@ -1,5 +1,4 @@ --- -model: anthropic/claude-haiku-4.5 tools: workspace-readFile: type: builtin diff --git a/apps/x/packages/core/src/pre_built/meeting-prep.md b/apps/x/packages/core/src/pre_built/meeting-prep.md index 5dc46eda..3391fc47 100644 --- a/apps/x/packages/core/src/pre_built/meeting-prep.md +++ b/apps/x/packages/core/src/pre_built/meeting-prep.md @@ -1,5 +1,4 @@ --- -model: anthropic/claude-haiku-4.5 tools: workspace-readFile: type: builtin diff --git a/apps/x/packages/core/src/pre_built/runner.ts b/apps/x/packages/core/src/pre_built/runner.ts index c1985380..51dae3a0 100644 --- a/apps/x/packages/core/src/pre_built/runner.ts +++ b/apps/x/packages/core/src/pre_built/runner.ts @@ -2,6 +2,7 @@ import fs from 'fs'; import path from 'path'; import { WorkDir } from '../config/config.js'; import { createRun, createMessage } from '../runs/runs.js'; +import { getKgModel } from '../models/defaults.js'; import { waitForRunCompletion } from '../agents/utils.js'; import { loadConfig, @@ -41,6 +42,7 @@ async function runAgent(agentName: string): Promise { // The agent file is expected to be in the agents directory with the same name const run = await createRun({ agentId: agentName, + model: await getKgModel(), }); // Build trigger message with user context diff --git a/apps/x/packages/shared/src/models.ts b/apps/x/packages/shared/src/models.ts index feec148f..e5b0e82f 100644 --- a/apps/x/packages/shared/src/models.ts +++ b/apps/x/packages/shared/src/models.ts @@ -18,8 +18,9 @@ export const LlmModelConfig = z.object({ model: z.string().optional(), models: z.array(z.string()).optional(), })).optional(), - // Deprecated: per-run model+provider supersedes these. Kept on the schema so - // existing settings/onboarding UIs continue to compile until they're cleaned up. + // Per-category model overrides (BYOK only — signed-in users always get + // the curated gateway defaults). Read by helpers in core/models/defaults.ts. knowledgeGraphModel: z.string().optional(), meetingNotesModel: z.string().optional(), + trackBlockModel: z.string().optional(), }); From d42fb26bcc5ba4dde1de49c4df7054ebb9cd5575 Mon Sep 17 00:00:00 2001 From: Ramnique Singh <30795890+ramnique@users.noreply.github.com> Date: Fri, 24 Apr 2026 16:58:18 +0530 Subject: [PATCH 7/7] allow per-track model + provider overrides Track block YAML gains optional `model` and `provider` fields. When set, the track runner passes them through to `createRun` so this specific track runs on the chosen model/provider; when unset the global default flows through (`getTrackBlockModel()` + the resolved provider). The track skill picks up the new fields automatically via the embedded `z.toJSONSchema(TrackBlockSchema)` and adds an explicit "Do Not Set" section: copilot leaves them omitted unless the user named a specific model or provider for the track. Common bad reasons ("might be faster", "in case it matters", complex instruction) are called out so the defaults stay the path of least resistance. Track modal Details tab shows the values when set, in the same conditional `
/
` style as the lastRun fields. Co-Authored-By: Claude Opus 4.7 (1M context) --- .../renderer/src/components/track-modal.tsx | 8 ++++++++ .../assistant/skills/tracks/skill.ts | 17 +++++++++++++++++ .../packages/core/src/knowledge/track/runner.ts | 11 +++++++++-- apps/x/packages/shared/src/track-block.ts | 2 ++ 4 files changed, 36 insertions(+), 2 deletions(-) diff --git a/apps/x/apps/renderer/src/components/track-modal.tsx b/apps/x/apps/renderer/src/components/track-modal.tsx index 8e261977..a4c0b512 100644 --- a/apps/x/apps/renderer/src/components/track-modal.tsx +++ b/apps/x/apps/renderer/src/components/track-modal.tsx @@ -156,6 +156,8 @@ export function TrackModal() { const lastRunAt = track?.lastRunAt ?? '' const lastRunId = track?.lastRunId ?? '' const lastRunSummary = track?.lastRunSummary ?? '' + const model = track?.model ?? '' + const provider = track?.provider ?? '' const scheduleSummary = useMemo(() => summarizeSchedule(schedule), [schedule]) const triggerType: 'scheduled' | 'event' | 'manual' = schedule ? 'scheduled' : eventMatchCriteria ? 'event' : 'manual' @@ -393,6 +395,12 @@ export function TrackModal() {
Track ID
{trackId}
File
{detail.filePath}
Status
{active ? 'Active' : 'Paused'}
+ {model && (<> +
Model
{model}
+ )} + {provider && (<> +
Provider
{provider}
+ )} {lastRunAt && (<>
Last run
{formatDateTime(lastRunAt)}
)} diff --git a/apps/x/packages/core/src/application/assistant/skills/tracks/skill.ts b/apps/x/packages/core/src/application/assistant/skills/tracks/skill.ts index ff345acf..17521806 100644 --- a/apps/x/packages/core/src/application/assistant/skills/tracks/skill.ts +++ b/apps/x/packages/core/src/application/assistant/skills/tracks/skill.ts @@ -87,6 +87,23 @@ ${schemaYaml} **Runtime-managed fields — never write these yourself:** ` + "`" + `lastRunAt` + "`" + `, ` + "`" + `lastRunId` + "`" + `, ` + "`" + `lastRunSummary` + "`" + `. +## Do Not Set ` + "`" + `model` + "`" + ` or ` + "`" + `provider` + "`" + ` (almost always) + +The schema includes optional ` + "`" + `model` + "`" + ` and ` + "`" + `provider` + "`" + ` fields. **Omit them.** A user-configurable global default already picks the right model and provider for tracks; setting per-track values bypasses that and is almost always wrong. + +The only time these belong on a track: + +- The user **explicitly** named a model or provider for *this specific track* in their request ("use Claude Opus for this one", "force this track onto OpenAI"). Quote the user's wording back when confirming. + +Things that are **not** reasons to set these: + +- "Tracks should be fast" / "I want a small model" — that's a global preference, not a per-track one. Leave it; the global default exists. +- "This track is complex" — write a clearer instruction; don't reach for a different model. +- "Just to be safe" / "in case it matters" — this is the antipattern. Leave them out. +- The user changed their main chat model — that has nothing to do with tracks. Leave them out. + +When in doubt: omit both fields. Never volunteer them. Never include them in a starter template you suggest. If you find yourself adding them as a sensible default, stop — you're wrong. + ## Choosing a trackId - Kebab-case, short, descriptive: ` + "`" + `chicago-time` + "`" + `, ` + "`" + `sfo-weather` + "`" + `, ` + "`" + `hn-top5` + "`" + `, ` + "`" + `btc-usd` + "`" + `. diff --git a/apps/x/packages/core/src/knowledge/track/runner.ts b/apps/x/packages/core/src/knowledge/track/runner.ts index 35f7e7ac..1eec3da1 100644 --- a/apps/x/packages/core/src/knowledge/track/runner.ts +++ b/apps/x/packages/core/src/knowledge/track/runner.ts @@ -102,8 +102,15 @@ export async function triggerTrackUpdate( const contentBefore = track.content; - // Emit start event — runId is set after agent run is created - const agentRun = await createRun({ agentId: 'track-run', model: await getTrackBlockModel() }); + // Per-track model/provider overrides win when set; otherwise fall back + // to the configured trackBlockModel default and the run-creation + // provider default (signed-in: rowboat; BYOK: active provider). + const model = track.track.model ?? await getTrackBlockModel(); + const agentRun = await createRun({ + agentId: 'track-run', + model, + ...(track.track.provider ? { provider: track.track.provider } : {}), + }); // Set lastRunAt and lastRunId immediately (before agent executes) so // the scheduler's next poll won't re-trigger this track. diff --git a/apps/x/packages/shared/src/track-block.ts b/apps/x/packages/shared/src/track-block.ts index c9e738b7..6d9ce3af 100644 --- a/apps/x/packages/shared/src/track-block.ts +++ b/apps/x/packages/shared/src/track-block.ts @@ -25,6 +25,8 @@ export const TrackBlockSchema = z.object({ eventMatchCriteria: z.string().optional().describe('When set, this track participates in event-based triggering. Describe what kinds of events should consider this track for an update (e.g. "Emails about Q3 planning"). Omit to disable event triggers — the track will only run on schedule or manually.'), active: z.boolean().default(true).describe('Set false to pause without deleting'), schedule: TrackScheduleSchema.optional(), + model: z.string().optional().describe('ADVANCED — leave unset. Per-track LLM model override (e.g. "anthropic/claude-sonnet-4.6"). Only set when the user explicitly asked for a specific model for THIS track. The global default already picks a tuned model for tracks; overriding usually makes things worse, not better.'), + provider: z.string().optional().describe('ADVANCED — leave unset. Per-track provider name override (e.g. "openai", "anthropic"). Only set when the user explicitly asked for a specific provider for THIS track. Almost always omitted; the global default flows through correctly.'), lastRunAt: z.string().optional().describe('Runtime-managed — never write this yourself'), lastRunId: z.string().optional().describe('Runtime-managed — never write this yourself'), lastRunSummary: z.string().optional().describe('Runtime-managed — never write this yourself'),