freeze model + provider per run at creation time

The model dropdown was broken in two ways: it wrote to ~/.rowboat/config/models.json
(the BYOK creds file, stamped with a fake `flavor: 'openrouter'` to satisfy zod
when signed in), and the runtime ignored that write entirely for signed-in users
because `streamAgent` hard-coded `gpt-5.4`. Model selection was also globally
scoped, so every chat shared one brain.

This change moves model + provider out of the global config and onto the run
itself, resolved once at runs:create and frozen for the run's lifetime.

## Resolution

`runsCore.createRun` resolves per-field, falling through:

  run.model    = opts.model    ?? agent.model    ?? defaults.model
  run.provider = opts.provider ?? agent.provider ?? defaults.provider

A new `core/models/defaults.ts` is the only place in the codebase that branches
on signed-in state. `getDefaultModelAndProvider()` returns name strings;
`resolveProviderConfig(name)` does the name → full LlmProvider lookup at
runtime. `createProvider` learns about `flavor: 'rowboat'` so the gateway is
just another flavor.

`provider` is stored as a name (e.g. `"rowboat"`, `"openai"`), not a full
LlmProvider object. API keys never get written into the JSONL log; rotating a
key in models.json applies to existing runs without re-creation. Cost: deleting
a provider from settings breaks runs that referenced it (clear error surfaced
via `resolveProviderConfig`).

## Runtime

`streamAgent` no longer resolves anything — it reads `state.runModel` /
`state.runProvider`, looks up the provider config, instantiates. Subflows
inherit the parent run's pair, so KG / inline-task subagents run on whatever
the main run resolved to at creation. The `knowledgeGraphAgents` array,
`isKgAgent`, and the per-agent default constants are gone.

KG / inline-task / pre-built agents declare their preferred model in YAML
frontmatter (claude-haiku-4.5 / claude-sonnet-4.6) — used at resolution time
when those agents are themselves the top-level agent of a run (background
triggers, scheduled tasks, etc.).

## Standalone callers

Non-run LLM call sites (summarize_meeting, track/routing, builtin-tools
parseFile) and `agent-schedule/runner` were branching on signed-in
independently. They all route through `getDefaultModelAndProvider` +
`resolveProviderConfig` + `createProvider` now; `agent-schedule/runner`
switched from raw `runsRepo.create` to `runsCore.createRun` so resolution
applies to scheduled-agent runs too.

## UI

`chat-input-with-mentions` stops calling `models:saveConfig`. The dropdown
notifies the parent via `onSelectedModelChange` ({provider, model} as names);
App.tsx stashes selection per-tab and passes it to the next `runs:create`.
When a run already exists, the input fetches it and renders a static label —
model can't change mid-run.

## Legacy runs

A lenient zod schema in `repo.ts` (`StartEvent.extend(...optional)` plus
`RunEvent.or(LegacyStartEvent)`) parses pre-existing runs. `repo.fetch` fills
missing model/provider from current defaults and returns the strict canonical
`Run` type. No file-rewriting migration; no impact on the canonical schema in
`@x/shared`.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Ramnique Singh 2026-04-22 12:26:01 +05:30
parent 51f2ad6e8a
commit 5c4aa77255
22 changed files with 256 additions and 179 deletions

View file

@ -817,6 +817,7 @@ function App() {
const chatTabIdCounterRef = useRef(0) const chatTabIdCounterRef = useRef(0)
const newChatTabId = () => `chat-tab-${++chatTabIdCounterRef.current}` const newChatTabId = () => `chat-tab-${++chatTabIdCounterRef.current}`
const chatDraftsRef = useRef(new Map<string, string>()) const chatDraftsRef = useRef(new Map<string, string>())
const selectedModelByTabRef = useRef(new Map<string, { provider: string; model: string }>())
const chatScrollTopByTabRef = useRef(new Map<string, number>()) const chatScrollTopByTabRef = useRef(new Map<string, number>())
const [toolOpenByTab, setToolOpenByTab] = useState<Record<string, Record<string, boolean>>>({}) const [toolOpenByTab, setToolOpenByTab] = useState<Record<string, Record<string, boolean>>>({})
const [chatViewportAnchorByTab, setChatViewportAnchorByTab] = useState<Record<string, ChatViewportAnchorState>>({}) const [chatViewportAnchorByTab, setChatViewportAnchorByTab] = useState<Record<string, ChatViewportAnchorState>>({})
@ -2165,8 +2166,10 @@ function App() {
let isNewRun = false let isNewRun = false
let newRunCreatedAt: string | null = null let newRunCreatedAt: string | null = null
if (!currentRunId) { if (!currentRunId) {
const selected = selectedModelByTabRef.current.get(submitTabId)
const run = await window.ipc.invoke('runs:create', { const run = await window.ipc.invoke('runs:create', {
agentId, agentId,
...(selected ? { model: selected.model, provider: selected.provider } : {}),
}) })
currentRunId = run.id currentRunId = run.id
newRunCreatedAt = run.createdAt newRunCreatedAt = run.createdAt
@ -2471,6 +2474,7 @@ function App() {
return next return next
}) })
chatDraftsRef.current.delete(tabId) chatDraftsRef.current.delete(tabId)
selectedModelByTabRef.current.delete(tabId)
chatScrollTopByTabRef.current.delete(tabId) chatScrollTopByTabRef.current.delete(tabId)
setToolOpenByTab((prev) => { setToolOpenByTab((prev) => {
if (!(tabId in prev)) return prev if (!(tabId in prev)) return prev
@ -4644,6 +4648,13 @@ function App() {
runId={tabState.runId} runId={tabState.runId}
initialDraft={chatDraftsRef.current.get(tab.id)} initialDraft={chatDraftsRef.current.get(tab.id)}
onDraftChange={(text) => setChatDraftForTab(tab.id, text)} onDraftChange={(text) => setChatDraftForTab(tab.id, text)}
onSelectedModelChange={(m) => {
if (m) {
selectedModelByTabRef.current.set(tab.id, m)
} else {
selectedModelByTabRef.current.delete(tab.id)
}
}}
isRecording={isActive && isRecording} isRecording={isActive && isRecording}
recordingText={isActive ? voice.interimText : undefined} recordingText={isActive ? voice.interimText : undefined}
recordingState={isActive ? (voice.state === 'connecting' ? 'connecting' : 'listening') : undefined} recordingState={isActive ? (voice.state === 'connecting' ? 'connecting' : 'listening') : undefined}
@ -4697,6 +4708,13 @@ function App() {
onPresetMessageConsumed={() => setPresetMessage(undefined)} onPresetMessageConsumed={() => setPresetMessage(undefined)}
getInitialDraft={(tabId) => chatDraftsRef.current.get(tabId)} getInitialDraft={(tabId) => chatDraftsRef.current.get(tabId)}
onDraftChangeForTab={setChatDraftForTab} onDraftChangeForTab={setChatDraftForTab}
onSelectedModelChangeForTab={(tabId, m) => {
if (m) {
selectedModelByTabRef.current.set(tabId, m)
} else {
selectedModelByTabRef.current.delete(tabId)
}
}}
pendingAskHumanRequests={pendingAskHumanRequests} pendingAskHumanRequests={pendingAskHumanRequests}
allPermissionRequests={allPermissionRequests} allPermissionRequests={allPermissionRequests}
permissionResponses={permissionResponses} permissionResponses={permissionResponses}

View file

@ -69,13 +69,16 @@ const providerDisplayNames: Record<string, string> = {
rowboat: 'Rowboat', rowboat: 'Rowboat',
} }
type ProviderName = "openai" | "anthropic" | "google" | "openrouter" | "aigateway" | "ollama" | "openai-compatible" | "rowboat"
interface ConfiguredModel { interface ConfiguredModel {
flavor: "openai" | "anthropic" | "google" | "openrouter" | "aigateway" | "ollama" | "openai-compatible" | "rowboat" provider: ProviderName
model: string
}
export interface SelectedModel {
provider: string
model: string model: string
apiKey?: string
baseURL?: string
headers?: Record<string, string>
knowledgeGraphModel?: string
} }
function getAttachmentIcon(kind: AttachmentIconKind) { function getAttachmentIcon(kind: AttachmentIconKind) {
@ -120,6 +123,8 @@ interface ChatInputInnerProps {
ttsMode?: 'summary' | 'full' ttsMode?: 'summary' | 'full'
onToggleTts?: () => void onToggleTts?: () => void
onTtsModeChange?: (mode: 'summary' | 'full') => void onTtsModeChange?: (mode: 'summary' | 'full') => void
/** Fired when the user picks a different model in the dropdown (only when no run exists yet). */
onSelectedModelChange?: (model: SelectedModel | null) => void
} }
function ChatInputInner({ function ChatInputInner({
@ -145,6 +150,7 @@ function ChatInputInner({
ttsMode, ttsMode,
onToggleTts, onToggleTts,
onTtsModeChange, onTtsModeChange,
onSelectedModelChange,
}: ChatInputInnerProps) { }: ChatInputInnerProps) {
const controller = usePromptInputController() const controller = usePromptInputController()
const message = controller.textInput.value const message = controller.textInput.value
@ -155,10 +161,27 @@ function ChatInputInner({
const [configuredModels, setConfiguredModels] = useState<ConfiguredModel[]>([]) const [configuredModels, setConfiguredModels] = useState<ConfiguredModel[]>([])
const [activeModelKey, setActiveModelKey] = useState('') const [activeModelKey, setActiveModelKey] = useState('')
const [lockedModel, setLockedModel] = useState<SelectedModel | null>(null)
const [searchEnabled, setSearchEnabled] = useState(false) const [searchEnabled, setSearchEnabled] = useState(false)
const [searchAvailable, setSearchAvailable] = useState(false) const [searchAvailable, setSearchAvailable] = useState(false)
const [isRowboatConnected, setIsRowboatConnected] = useState(false) const [isRowboatConnected, setIsRowboatConnected] = useState(false)
// When a run exists, freeze the dropdown to the run's resolved model+provider.
useEffect(() => {
if (!runId) {
setLockedModel(null)
return
}
let cancelled = false
window.ipc.invoke('runs:fetch', { runId }).then((run) => {
if (cancelled) return
if (run.provider && run.model) {
setLockedModel({ provider: run.provider, model: run.model })
}
}).catch(() => { /* legacy run or fetch failure — leave unlocked */ })
return () => { cancelled = true }
}, [runId])
// Check Rowboat sign-in state // Check Rowboat sign-in state
useEffect(() => { useEffect(() => {
window.ipc.invoke('oauth:getState', null).then((result) => { window.ipc.invoke('oauth:getState', null).then((result) => {
@ -176,42 +199,20 @@ function ChatInputInner({
return cleanup return cleanup
}, []) }, [])
// Load model config (gateway when signed in, local config when BYOK) // Load the list of models the user can choose from.
// Signed-in: gateway model list. Signed-out: providers configured in models.json.
const loadModelConfig = useCallback(async () => { const loadModelConfig = useCallback(async () => {
try { try {
if (isRowboatConnected) { if (isRowboatConnected) {
// Fetch gateway models
const listResult = await window.ipc.invoke('models:list', null) const listResult = await window.ipc.invoke('models:list', null)
const rowboatProvider = listResult.providers?.find( const rowboatProvider = listResult.providers?.find(
(p: { id: string }) => p.id === 'rowboat' (p: { id: string }) => p.id === 'rowboat'
) )
const models: ConfiguredModel[] = (rowboatProvider?.models || []).map( const models: ConfiguredModel[] = (rowboatProvider?.models || []).map(
(m: { id: string }) => ({ flavor: 'rowboat', model: m.id }) (m: { id: string }) => ({ provider: 'rowboat', model: m.id })
) )
// Read current default from config
let defaultModel = ''
try {
const result = await window.ipc.invoke('workspace:readFile', { path: 'config/models.json' })
const parsed = JSON.parse(result.data)
defaultModel = parsed?.model || ''
} catch { /* no config yet */ }
if (defaultModel) {
models.sort((a, b) => {
if (a.model === defaultModel) return -1
if (b.model === defaultModel) return 1
return 0
})
}
setConfiguredModels(models) setConfiguredModels(models)
const activeKey = defaultModel
? `rowboat/${defaultModel}`
: models[0] ? `rowboat/${models[0].model}` : ''
if (activeKey) setActiveModelKey(activeKey)
} else { } else {
// BYOK: read from local models.json
const result = await window.ipc.invoke('workspace:readFile', { path: 'config/models.json' }) const result = await window.ipc.invoke('workspace:readFile', { path: 'config/models.json' })
const parsed = JSON.parse(result.data) const parsed = JSON.parse(result.data)
const models: ConfiguredModel[] = [] const models: ConfiguredModel[] = []
@ -223,32 +224,12 @@ function ChatInputInner({
const allModels = modelList.length > 0 ? modelList : singleModel ? [singleModel] : [] const allModels = modelList.length > 0 ? modelList : singleModel ? [singleModel] : []
for (const model of allModels) { for (const model of allModels) {
if (model) { if (model) {
models.push({ models.push({ provider: flavor as ProviderName, model })
flavor: flavor as ConfiguredModel['flavor'],
model,
apiKey: (e.apiKey as string) || undefined,
baseURL: (e.baseURL as string) || undefined,
headers: (e.headers as Record<string, string>) || undefined,
knowledgeGraphModel: (e.knowledgeGraphModel as string) || undefined,
})
} }
} }
} }
} }
const defaultKey = parsed?.provider?.flavor && parsed?.model
? `${parsed.provider.flavor}/${parsed.model}`
: ''
models.sort((a, b) => {
const aKey = `${a.flavor}/${a.model}`
const bKey = `${b.flavor}/${b.model}`
if (aKey === defaultKey) return -1
if (bKey === defaultKey) return 1
return 0
})
setConfiguredModels(models) setConfiguredModels(models)
if (defaultKey) {
setActiveModelKey(defaultKey)
}
} }
} catch { } catch {
// No config yet // No config yet
@ -284,40 +265,15 @@ function ChatInputInner({
checkSearch() checkSearch()
}, [isActive, isRowboatConnected]) }, [isActive, isRowboatConnected])
const handleModelChange = useCallback(async (key: string) => { // Selecting a model affects only the *next* run created from this tab.
const entry = configuredModels.find((m) => `${m.flavor}/${m.model}` === key) // Once a run exists, model is frozen on the run and the dropdown is read-only.
const handleModelChange = useCallback((key: string) => {
if (lockedModel) return
const entry = configuredModels.find((m) => `${m.provider}/${m.model}` === key)
if (!entry) return if (!entry) return
setActiveModelKey(key) setActiveModelKey(key)
onSelectedModelChange?.({ provider: entry.provider, model: entry.model })
try { }, [configuredModels, lockedModel, onSelectedModelChange])
if (entry.flavor === 'rowboat') {
// Gateway model — save with valid Zod flavor, no credentials
await window.ipc.invoke('models:saveConfig', {
provider: { flavor: 'openrouter' as const },
model: entry.model,
knowledgeGraphModel: entry.knowledgeGraphModel,
})
} else {
// BYOK — preserve full provider config
const providerModels = configuredModels
.filter((m) => m.flavor === entry.flavor)
.map((m) => m.model)
await window.ipc.invoke('models:saveConfig', {
provider: {
flavor: entry.flavor,
apiKey: entry.apiKey,
baseURL: entry.baseURL,
headers: entry.headers,
},
model: entry.model,
models: providerModels,
knowledgeGraphModel: entry.knowledgeGraphModel,
})
}
} catch {
toast.error('Failed to switch model')
}
}, [configuredModels])
// Restore the tab draft when this input mounts. // Restore the tab draft when this input mounts.
useEffect(() => { useEffect(() => {
@ -555,7 +511,14 @@ function ChatInputInner({
) )
)} )}
<div className="flex-1" /> <div className="flex-1" />
{configuredModels.length > 0 && ( {lockedModel ? (
<span
className="flex h-7 shrink-0 items-center gap-1 rounded-full px-2 text-xs text-muted-foreground"
title={`${providerDisplayNames[lockedModel.provider] || lockedModel.provider} — fixed for this chat`}
>
<span className="max-w-[150px] truncate">{lockedModel.model}</span>
</span>
) : configuredModels.length > 0 ? (
<DropdownMenu> <DropdownMenu>
<DropdownMenuTrigger asChild> <DropdownMenuTrigger asChild>
<button <button
@ -563,7 +526,7 @@ function ChatInputInner({
className="flex h-7 shrink-0 items-center gap-1 rounded-full px-2 text-xs text-muted-foreground transition-colors hover:bg-muted hover:text-foreground" className="flex h-7 shrink-0 items-center gap-1 rounded-full px-2 text-xs text-muted-foreground transition-colors hover:bg-muted hover:text-foreground"
> >
<span className="max-w-[150px] truncate"> <span className="max-w-[150px] truncate">
{configuredModels.find((m) => `${m.flavor}/${m.model}` === activeModelKey)?.model || configuredModels[0]?.model || 'Model'} {configuredModels.find((m) => `${m.provider}/${m.model}` === activeModelKey)?.model || configuredModels[0]?.model || 'Model'}
</span> </span>
<ChevronDown className="h-3 w-3" /> <ChevronDown className="h-3 w-3" />
</button> </button>
@ -571,18 +534,18 @@ function ChatInputInner({
<DropdownMenuContent align="end"> <DropdownMenuContent align="end">
<DropdownMenuRadioGroup value={activeModelKey} onValueChange={handleModelChange}> <DropdownMenuRadioGroup value={activeModelKey} onValueChange={handleModelChange}>
{configuredModels.map((m) => { {configuredModels.map((m) => {
const key = `${m.flavor}/${m.model}` const key = `${m.provider}/${m.model}`
return ( return (
<DropdownMenuRadioItem key={key} value={key}> <DropdownMenuRadioItem key={key} value={key}>
<span className="truncate">{m.model}</span> <span className="truncate">{m.model}</span>
<span className="ml-2 text-xs text-muted-foreground">{providerDisplayNames[m.flavor] || m.flavor}</span> <span className="ml-2 text-xs text-muted-foreground">{providerDisplayNames[m.provider] || m.provider}</span>
</DropdownMenuRadioItem> </DropdownMenuRadioItem>
) )
})} })}
</DropdownMenuRadioGroup> </DropdownMenuRadioGroup>
</DropdownMenuContent> </DropdownMenuContent>
</DropdownMenu> </DropdownMenu>
)} ) : null}
{onToggleTts && ttsAvailable && ( {onToggleTts && ttsAvailable && (
<div className="flex shrink-0 items-center"> <div className="flex shrink-0 items-center">
<Tooltip> <Tooltip>
@ -729,6 +692,7 @@ export interface ChatInputWithMentionsProps {
ttsMode?: 'summary' | 'full' ttsMode?: 'summary' | 'full'
onToggleTts?: () => void onToggleTts?: () => void
onTtsModeChange?: (mode: 'summary' | 'full') => void onTtsModeChange?: (mode: 'summary' | 'full') => void
onSelectedModelChange?: (model: SelectedModel | null) => void
} }
export function ChatInputWithMentions({ export function ChatInputWithMentions({
@ -757,6 +721,7 @@ export function ChatInputWithMentions({
ttsMode, ttsMode,
onToggleTts, onToggleTts,
onTtsModeChange, onTtsModeChange,
onSelectedModelChange,
}: ChatInputWithMentionsProps) { }: ChatInputWithMentionsProps) {
return ( return (
<PromptInputProvider knowledgeFiles={knowledgeFiles} recentFiles={recentFiles} visibleFiles={visibleFiles}> <PromptInputProvider knowledgeFiles={knowledgeFiles} recentFiles={recentFiles} visibleFiles={visibleFiles}>
@ -783,6 +748,7 @@ export function ChatInputWithMentions({
ttsMode={ttsMode} ttsMode={ttsMode}
onToggleTts={onToggleTts} onToggleTts={onToggleTts}
onTtsModeChange={onTtsModeChange} onTtsModeChange={onTtsModeChange}
onSelectedModelChange={onSelectedModelChange}
/> />
</PromptInputProvider> </PromptInputProvider>
) )

View file

@ -26,7 +26,7 @@ import { type PromptInputMessage, type FileMention } from '@/components/ai-eleme
import { FileCardProvider } from '@/contexts/file-card-context' import { FileCardProvider } from '@/contexts/file-card-context'
import { MarkdownPreOverride } from '@/components/ai-elements/markdown-code-override' import { MarkdownPreOverride } from '@/components/ai-elements/markdown-code-override'
import { TabBar, type ChatTab } from '@/components/tab-bar' import { TabBar, type ChatTab } from '@/components/tab-bar'
import { ChatInputWithMentions, type StagedAttachment } from '@/components/chat-input-with-mentions' import { ChatInputWithMentions, type StagedAttachment, type SelectedModel } from '@/components/chat-input-with-mentions'
import { ChatMessageAttachments } from '@/components/chat-message-attachments' import { ChatMessageAttachments } from '@/components/chat-message-attachments'
import { wikiLabel } from '@/lib/wiki-links' import { wikiLabel } from '@/lib/wiki-links'
import { import {
@ -158,6 +158,7 @@ interface ChatSidebarProps {
onPresetMessageConsumed?: () => void onPresetMessageConsumed?: () => void
getInitialDraft?: (tabId: string) => string | undefined getInitialDraft?: (tabId: string) => string | undefined
onDraftChangeForTab?: (tabId: string, text: string) => void onDraftChangeForTab?: (tabId: string, text: string) => void
onSelectedModelChangeForTab?: (tabId: string, model: SelectedModel | null) => void
pendingAskHumanRequests?: ChatTabViewState['pendingAskHumanRequests'] pendingAskHumanRequests?: ChatTabViewState['pendingAskHumanRequests']
allPermissionRequests?: ChatTabViewState['allPermissionRequests'] allPermissionRequests?: ChatTabViewState['allPermissionRequests']
permissionResponses?: ChatTabViewState['permissionResponses'] permissionResponses?: ChatTabViewState['permissionResponses']
@ -211,6 +212,7 @@ export function ChatSidebar({
onPresetMessageConsumed, onPresetMessageConsumed,
getInitialDraft, getInitialDraft,
onDraftChangeForTab, onDraftChangeForTab,
onSelectedModelChangeForTab,
pendingAskHumanRequests = new Map(), pendingAskHumanRequests = new Map(),
allPermissionRequests = new Map(), allPermissionRequests = new Map(),
permissionResponses = new Map(), permissionResponses = new Map(),
@ -662,6 +664,7 @@ export function ChatSidebar({
runId={tabState.runId} runId={tabState.runId}
initialDraft={getInitialDraft?.(tab.id)} initialDraft={getInitialDraft?.(tab.id)}
onDraftChange={onDraftChangeForTab ? (text) => onDraftChangeForTab(tab.id, text) : undefined} onDraftChange={onDraftChangeForTab ? (text) => onDraftChangeForTab(tab.id, text) : undefined}
onSelectedModelChange={onSelectedModelChangeForTab ? (m) => onSelectedModelChangeForTab(tab.id, m) : undefined}
isRecording={isActive && isRecording} isRecording={isActive && isRecording}
recordingText={isActive ? recordingText : undefined} recordingText={isActive ? recordingText : undefined}
recordingState={isActive ? recordingState : undefined} recordingState={isActive ? recordingState : undefined}

View file

@ -8,6 +8,7 @@ import { IMonotonicallyIncreasingIdGenerator } from "../application/lib/id-gen.j
import { AgentScheduleConfig, AgentScheduleEntry } from "@x/shared/dist/agent-schedule.js"; import { AgentScheduleConfig, AgentScheduleEntry } from "@x/shared/dist/agent-schedule.js";
import { AgentScheduleState, AgentScheduleStateEntry } from "@x/shared/dist/agent-schedule-state.js"; import { AgentScheduleState, AgentScheduleStateEntry } from "@x/shared/dist/agent-schedule-state.js";
import { MessageEvent } from "@x/shared/dist/runs.js"; import { MessageEvent } from "@x/shared/dist/runs.js";
import { createRun } from "../runs/runs.js";
import z from "zod"; import z from "zod";
const DEFAULT_STARTING_MESSAGE = "go"; const DEFAULT_STARTING_MESSAGE = "go";
@ -162,8 +163,8 @@ async function runAgent(
}); });
try { try {
// Create a new run // Create a new run via core (resolves agent + default model+provider).
const run = await runsRepo.create({ agentId: agentName }); const run = await createRun({ agentId: agentName });
console.log(`[AgentRunner] Created run ${run.id} for agent ${agentName}`); console.log(`[AgentRunner] Created run ${run.id} for agent ${agentName}`);
// Add the starting message as a user message // Add the starting message as a user message

View file

@ -16,8 +16,7 @@ import { isBlocked, extractCommandNames } from "../application/lib/command-execu
import container from "../di/container.js"; import container from "../di/container.js";
import { IModelConfigRepo } from "../models/repo.js"; import { IModelConfigRepo } from "../models/repo.js";
import { createProvider } from "../models/models.js"; import { createProvider } from "../models/models.js";
import { isSignedIn } from "../account/account.js"; import { resolveProviderConfig } from "../models/defaults.js";
import { getGatewayProvider } from "../models/gateway.js";
import { IAgentsRepo } from "./repo.js"; import { IAgentsRepo } from "./repo.js";
import { IMonotonicallyIncreasingIdGenerator } from "../application/lib/id-gen.js"; import { IMonotonicallyIncreasingIdGenerator } from "../application/lib/id-gen.js";
import { IBus } from "../application/lib/bus.js"; import { IBus } from "../application/lib/bus.js";
@ -649,6 +648,8 @@ export class AgentState {
runId: string | null = null; runId: string | null = null;
agent: z.infer<typeof Agent> | null = null; agent: z.infer<typeof Agent> | null = null;
agentName: string | null = null; agentName: string | null = null;
runModel: string | null = null;
runProvider: string | null = null;
messages: z.infer<typeof MessageList> = []; messages: z.infer<typeof MessageList> = [];
lastAssistantMsg: z.infer<typeof AssistantMessage> | null = null; lastAssistantMsg: z.infer<typeof AssistantMessage> | null = null;
subflowStates: Record<string, AgentState> = {}; subflowStates: Record<string, AgentState> = {};
@ -762,13 +763,18 @@ export class AgentState {
case "start": case "start":
this.runId = event.runId; this.runId = event.runId;
this.agentName = event.agentName; this.agentName = event.agentName;
this.runModel = event.model;
this.runProvider = event.provider;
break; break;
case "spawn-subflow": case "spawn-subflow":
// Seed the subflow state with its agent so downstream loadAgent works. // Seed the subflow state with its agent so downstream loadAgent works.
// Subflows inherit the parent run's model+provider — there's one pair per run.
if (!this.subflowStates[event.toolCallId]) { if (!this.subflowStates[event.toolCallId]) {
this.subflowStates[event.toolCallId] = new AgentState(); this.subflowStates[event.toolCallId] = new AgentState();
} }
this.subflowStates[event.toolCallId].agentName = event.agentName; this.subflowStates[event.toolCallId].agentName = event.agentName;
this.subflowStates[event.toolCallId].runModel = this.runModel;
this.subflowStates[event.toolCallId].runProvider = this.runProvider;
break; break;
case "message": case "message":
this.messages.push(event.message); this.messages.push(event.message);
@ -857,35 +863,23 @@ export async function* streamAgent({
yield event; yield event;
} }
const modelConfig = await modelConfigRepo.getConfig();
if (!modelConfig) {
throw new Error("Model config not found");
}
// set up agent // set up agent
const agent = await loadAgent(state.agentName!); const agent = await loadAgent(state.agentName!);
// set up tools // set up tools
const tools = await buildTools(agent); const tools = await buildTools(agent);
// set up provider + model // model+provider were resolved and frozen on the run at runs:create time.
const signedIn = await isSignedIn(); // Look up the named provider's current credentials from models.json and
const provider = signedIn // instantiate the LLM client. No selection happens here.
? await getGatewayProvider() if (!state.runModel || !state.runProvider) {
: createProvider(modelConfig.provider); throw new Error(`Run ${runId} is missing model/provider on its start event`);
const knowledgeGraphAgents = ["note_creation", "email-draft", "meeting-prep", "labeling_agent", "note_tagging_agent", "agent_notes_agent"]; }
const isKgAgent = knowledgeGraphAgents.includes(state.agentName!); const modelId = state.runModel;
const isInlineTaskAgent = state.agentName === "inline_task_agent"; const providerConfig = await resolveProviderConfig(state.runProvider);
const defaultModel = signedIn ? "gpt-5.4" : modelConfig.model; const provider = createProvider(providerConfig);
const defaultKgModel = signedIn ? "anthropic/claude-haiku-4.5" : defaultModel;
const defaultInlineTaskModel = signedIn ? "anthropic/claude-sonnet-4.6" : defaultModel;
const modelId = isInlineTaskAgent
? defaultInlineTaskModel
: (isKgAgent && modelConfig.knowledgeGraphModel)
? modelConfig.knowledgeGraphModel
: isKgAgent ? defaultKgModel : defaultModel;
const model = provider.languageModel(modelId); const model = provider.languageModel(modelId);
logger.log(`using model: ${modelId}`); logger.log(`using model: ${modelId} (provider: ${state.runProvider})`);
let loopCounter = 0; let loopCounter = 0;
let voiceInput = false; let voiceInput = false;

View file

@ -21,9 +21,8 @@ import { BrowserControlInputSchema, type BrowserControlInput } from "@x/shared/d
import type { ToolContext } from "./exec-tool.js"; import type { ToolContext } from "./exec-tool.js";
import { generateText } from "ai"; import { generateText } from "ai";
import { createProvider } from "../../models/models.js"; import { createProvider } from "../../models/models.js";
import { IModelConfigRepo } from "../../models/repo.js"; import { getDefaultModelAndProvider, resolveProviderConfig } from "../../models/defaults.js";
import { isSignedIn } from "../../account/account.js"; import { isSignedIn } from "../../account/account.js";
import { getGatewayProvider } from "../../models/gateway.js";
import { getAccessToken } from "../../auth/tokens.js"; import { getAccessToken } from "../../auth/tokens.js";
import { API_URL } from "../../config/env.js"; import { API_URL } from "../../config/env.js";
import { updateContent, updateTrackBlock } from "../../knowledge/track/fileops.js"; import { updateContent, updateTrackBlock } from "../../knowledge/track/fileops.js";
@ -746,13 +745,9 @@ export const BuiltinTools: z.infer<typeof BuiltinToolsSchema> = {
const base64 = buffer.toString('base64'); const base64 = buffer.toString('base64');
// Resolve model config from DI container const { model: modelId, provider: providerName } = await getDefaultModelAndProvider();
const modelConfigRepo = container.resolve<IModelConfigRepo>('modelConfigRepo'); const providerConfig = await resolveProviderConfig(providerName);
const modelConfig = await modelConfigRepo.getConfig(); const model = createProvider(providerConfig).languageModel(modelId);
const provider = await isSignedIn()
? await getGatewayProvider()
: createProvider(modelConfig.provider);
const model = provider.languageModel(modelConfig.model);
const userPrompt = prompt || 'Convert this file to well-structured markdown.'; const userPrompt = prompt || 'Convert this file to well-structured markdown.';

View file

@ -1,5 +1,6 @@
export function getRaw(): string { export function getRaw(): string {
return `--- return `---
model: anthropic/claude-haiku-4.5
tools: tools:
workspace-writeFile: workspace-writeFile:
type: builtin type: builtin

View file

@ -13,7 +13,7 @@ export function getRaw(): string {
const defaultEndISO = defaultEnd.toISOString(); const defaultEndISO = defaultEnd.toISOString();
return `--- return `---
model: gpt-5.2 model: anthropic/claude-sonnet-4.6
tools: tools:
${toolEntries} ${toolEntries}
--- ---

View file

@ -2,7 +2,7 @@ import { renderTagSystemForEmails } from './tag_system.js';
export function getRaw(): string { export function getRaw(): string {
return `--- return `---
model: gpt-5.2 model: anthropic/claude-haiku-4.5
tools: tools:
workspace-readFile: workspace-readFile:
type: builtin type: builtin

View file

@ -3,7 +3,7 @@ import { renderNoteEffectRules } from './tag_system.js';
export function getRaw(): string { export function getRaw(): string {
return `--- return `---
model: gpt-5.2 model: anthropic/claude-haiku-4.5
tools: tools:
workspace-writeFile: workspace-writeFile:
type: builtin type: builtin

View file

@ -2,7 +2,7 @@ import { renderTagSystemForNotes } from './tag_system.js';
export function getRaw(): string { export function getRaw(): string {
return `--- return `---
model: gpt-5.2 model: anthropic/claude-haiku-4.5
tools: tools:
workspace-readFile: workspace-readFile:
type: builtin type: builtin

View file

@ -1,11 +1,8 @@
import fs from 'fs'; import fs from 'fs';
import path from 'path'; import path from 'path';
import { generateText } from 'ai'; import { generateText } from 'ai';
import container from '../di/container.js';
import type { IModelConfigRepo } from '../models/repo.js';
import { createProvider } from '../models/models.js'; import { createProvider } from '../models/models.js';
import { isSignedIn } from '../account/account.js'; import { getDefaultModelAndProvider, resolveProviderConfig } from '../models/defaults.js';
import { getGatewayProvider } from '../models/gateway.js';
import { WorkDir } from '../config/config.js'; import { WorkDir } from '../config/config.js';
const CALENDAR_SYNC_DIR = path.join(WorkDir, 'calendar_sync'); const CALENDAR_SYNC_DIR = path.join(WorkDir, 'calendar_sync');
@ -138,15 +135,9 @@ function loadCalendarEventContext(calendarEventJson: string): string {
} }
export async function summarizeMeeting(transcript: string, meetingStartTime?: string, calendarEventJson?: string): Promise<string> { export async function summarizeMeeting(transcript: string, meetingStartTime?: string, calendarEventJson?: string): Promise<string> {
const repo = container.resolve<IModelConfigRepo>('modelConfigRepo'); const { model: modelId, provider: providerName } = await getDefaultModelAndProvider();
const config = await repo.getConfig(); const providerConfig = await resolveProviderConfig(providerName);
const signedIn = await isSignedIn(); const model = createProvider(providerConfig).languageModel(modelId);
const provider = signedIn
? await getGatewayProvider()
: createProvider(config.provider);
const modelId = config.meetingNotesModel
|| (signedIn ? "gpt-5.4" : config.model);
const model = provider.languageModel(modelId);
// If a specific calendar event was linked, use it directly. // If a specific calendar event was linked, use it directly.
// Otherwise fall back to scanning events within ±3 hours. // Otherwise fall back to scanning events within ±3 hours.

View file

@ -1,11 +1,8 @@
import { generateObject } from 'ai'; import { generateObject } from 'ai';
import { trackBlock, PrefixLogger } from '@x/shared'; import { trackBlock, PrefixLogger } from '@x/shared';
import type { KnowledgeEvent } from '@x/shared/dist/track-block.js'; import type { KnowledgeEvent } from '@x/shared/dist/track-block.js';
import container from '../../di/container.js';
import type { IModelConfigRepo } from '../../models/repo.js';
import { createProvider } from '../../models/models.js'; import { createProvider } from '../../models/models.js';
import { isSignedIn } from '../../account/account.js'; import { getDefaultModelAndProvider, resolveProviderConfig } from '../../models/defaults.js';
import { getGatewayProvider } from '../../models/gateway.js';
const log = new PrefixLogger('TrackRouting'); const log = new PrefixLogger('TrackRouting');
@ -37,15 +34,9 @@ Rules:
- For each candidate, return BOTH trackId and filePath exactly as given. trackIds are not globally unique.`; - For each candidate, return BOTH trackId and filePath exactly as given. trackIds are not globally unique.`;
async function resolveModel() { async function resolveModel() {
const repo = container.resolve<IModelConfigRepo>('modelConfigRepo'); const { model, provider } = await getDefaultModelAndProvider();
const config = await repo.getConfig(); const config = await resolveProviderConfig(provider);
const signedIn = await isSignedIn(); return createProvider(config).languageModel(model);
const provider = signedIn
? await getGatewayProvider()
: createProvider(config.provider);
const modelId = config.knowledgeGraphModel
|| (signedIn ? 'gpt-5.4' : config.model);
return provider.languageModel(modelId);
} }
function buildRoutingPrompt(event: KnowledgeEvent, batch: ParsedTrack[]): string { function buildRoutingPrompt(event: KnowledgeEvent, batch: ParsedTrack[]): string {

View file

@ -0,0 +1,53 @@
import z from "zod";
import { LlmProvider } from "@x/shared/dist/models.js";
import { IModelConfigRepo } from "./repo.js";
import { isSignedIn } from "../account/account.js";
import container from "../di/container.js";
const SIGNED_IN_DEFAULT_MODEL = "gpt-5.4";
const SIGNED_IN_DEFAULT_PROVIDER = "rowboat";
/**
* The single source of truth for "what model+provider should we use when
* the caller didn't specify and the agent didn't declare". Returns names only.
* This is the only place that branches on signed-in state.
*/
export async function getDefaultModelAndProvider(): Promise<{ model: string; provider: string }> {
if (await isSignedIn()) {
return { model: SIGNED_IN_DEFAULT_MODEL, provider: SIGNED_IN_DEFAULT_PROVIDER };
}
const repo = container.resolve<IModelConfigRepo>("modelConfigRepo");
const cfg = await repo.getConfig();
return { model: cfg.model, provider: cfg.provider.flavor };
}
/**
* Resolve a provider name (as stored on a run, an agent, or returned by
* getDefaultModelAndProvider) into the full LlmProvider config that
* createProvider expects (apiKey/baseURL/headers).
*
* - "rowboat" gateway provider (auth via OAuth bearer; no creds field).
* - other names look up models.json's `providers[name]` map.
* - fallback: if the name matches the active default's flavor (legacy
* single-provider configs that didn't write to the providers map yet).
*/
export async function resolveProviderConfig(name: string): Promise<z.infer<typeof LlmProvider>> {
if (name === "rowboat") {
return { flavor: "rowboat" };
}
const repo = container.resolve<IModelConfigRepo>("modelConfigRepo");
const cfg = await repo.getConfig();
const entry = cfg.providers?.[name];
if (entry) {
return LlmProvider.parse({
flavor: name,
apiKey: entry.apiKey,
baseURL: entry.baseURL,
headers: entry.headers,
});
}
if (cfg.provider.flavor === name) {
return cfg.provider;
}
throw new Error(`Provider '${name}' is referenced but not configured`);
}

View file

@ -10,7 +10,7 @@ const authedFetch: typeof fetch = async (input, init) => {
return fetch(input, { ...init, headers }); return fetch(input, { ...init, headers });
}; };
export async function getGatewayProvider(): Promise<ProviderV2> { export function getGatewayProvider(): ProviderV2 {
return createOpenRouter({ return createOpenRouter({
baseURL: `${API_URL}/v1/llm`, baseURL: `${API_URL}/v1/llm`,
apiKey: 'managed-by-rowboat', apiKey: 'managed-by-rowboat',

View file

@ -8,7 +8,6 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
import { createOpenAICompatible } from '@ai-sdk/openai-compatible'; import { createOpenAICompatible } from '@ai-sdk/openai-compatible';
import { LlmModelConfig, LlmProvider } from "@x/shared/dist/models.js"; import { LlmModelConfig, LlmProvider } from "@x/shared/dist/models.js";
import z from "zod"; import z from "zod";
import { isSignedIn } from "../account/account.js";
import { getGatewayProvider } from "./gateway.js"; import { getGatewayProvider } from "./gateway.js";
export const Provider = LlmProvider; export const Provider = LlmProvider;
@ -65,6 +64,8 @@ export function createProvider(config: z.infer<typeof Provider>): ProviderV2 {
baseURL, baseURL,
headers, headers,
}) as unknown as ProviderV2; }) as unknown as ProviderV2;
case "rowboat":
return getGatewayProvider();
default: default:
throw new Error(`Unsupported provider flavor: ${config.flavor}`); throw new Error(`Unsupported provider flavor: ${config.flavor}`);
} }
@ -80,9 +81,7 @@ export async function testModelConnection(
const controller = new AbortController(); const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), effectiveTimeout); const timeout = setTimeout(() => controller.abort(), effectiveTimeout);
try { try {
const provider = await isSignedIn() const provider = createProvider(providerConfig);
? await getGatewayProvider()
: createProvider(providerConfig);
const languageModel = provider.languageModel(model); const languageModel = provider.languageModel(model);
await generateText({ await generateText({
model: languageModel, model: languageModel,

View file

@ -1,5 +1,5 @@
--- ---
model: gpt-4.1 model: anthropic/claude-haiku-4.5
tools: tools:
workspace-readFile: workspace-readFile:
type: builtin type: builtin

View file

@ -1,5 +1,5 @@
--- ---
model: gpt-4.1 model: anthropic/claude-haiku-4.5
tools: tools:
workspace-readFile: workspace-readFile:
type: builtin type: builtin

View file

@ -6,9 +6,28 @@ import fsp from "fs/promises";
import fs from "fs"; import fs from "fs";
import readline from "readline"; import readline from "readline";
import { Run, RunEvent, StartEvent, CreateRunOptions, ListRunsResponse, MessageEvent } from "@x/shared/dist/runs.js"; import { Run, RunEvent, StartEvent, CreateRunOptions, ListRunsResponse, MessageEvent } from "@x/shared/dist/runs.js";
import { getDefaultModelAndProvider } from "../models/defaults.js";
/**
* Reading-only schemas: extend the canonical `StartEvent` / `RunEvent` to
* accept legacy run files written before `model`/`provider` were required.
*
* `RunEvent.or(LegacyStartEvent)` works because zod unions try left-to-right:
* for any non-start event RunEvent matches first; for a strict start event
* RunEvent still matches; only a legacy start event falls through and parses
* as LegacyStartEvent. New event types stay maintained in one place
* (`@x/shared/dist/runs.js`) the lenient form just adds one fallback variant.
*/
const LegacyStartEvent = StartEvent.extend({
model: z.string().optional(),
provider: z.string().optional(),
});
const ReadRunEvent = RunEvent.or(LegacyStartEvent);
export type CreateRunRepoOptions = Required<z.infer<typeof CreateRunOptions>>;
export interface IRunsRepo { export interface IRunsRepo {
create(options: z.infer<typeof CreateRunOptions>): Promise<z.infer<typeof Run>>; create(options: CreateRunRepoOptions): Promise<z.infer<typeof Run>>;
fetch(id: string): Promise<z.infer<typeof Run>>; fetch(id: string): Promise<z.infer<typeof Run>>;
list(cursor?: string): Promise<z.infer<typeof ListRunsResponse>>; list(cursor?: string): Promise<z.infer<typeof ListRunsResponse>>;
appendEvents(runId: string, events: z.infer<typeof RunEvent>[]): Promise<void>; appendEvents(runId: string, events: z.infer<typeof RunEvent>[]): Promise<void>;
@ -69,16 +88,19 @@ export class FSRunsRepo implements IRunsRepo {
/** /**
* Read file line-by-line using streams, stopping early once we have * Read file line-by-line using streams, stopping early once we have
* the start event and title (or determine there's no title). * the start event and title (or determine there's no title).
*
* Parses the start event with `LegacyStartEvent` so runs written before
* `model`/`provider` were required still surface in the list view.
*/ */
private async readRunMetadata(filePath: string): Promise<{ private async readRunMetadata(filePath: string): Promise<{
start: z.infer<typeof StartEvent>; start: z.infer<typeof LegacyStartEvent>;
title: string | undefined; title: string | undefined;
} | null> { } | null> {
return new Promise((resolve) => { return new Promise((resolve) => {
const stream = fs.createReadStream(filePath, { encoding: 'utf8' }); const stream = fs.createReadStream(filePath, { encoding: 'utf8' });
const rl = readline.createInterface({ input: stream, crlfDelay: Infinity }); const rl = readline.createInterface({ input: stream, crlfDelay: Infinity });
let start: z.infer<typeof StartEvent> | null = null; let start: z.infer<typeof LegacyStartEvent> | null = null;
let title: string | undefined; let title: string | undefined;
let lineIndex = 0; let lineIndex = 0;
@ -88,11 +110,10 @@ export class FSRunsRepo implements IRunsRepo {
try { try {
if (lineIndex === 0) { if (lineIndex === 0) {
// First line should be the start event start = LegacyStartEvent.parse(JSON.parse(trimmed));
start = StartEvent.parse(JSON.parse(trimmed));
} else { } else {
// Subsequent lines - look for first user message or assistant response // Subsequent lines - look for first user message or assistant response
const event = RunEvent.parse(JSON.parse(trimmed)); const event = ReadRunEvent.parse(JSON.parse(trimmed));
if (event.type === 'message') { if (event.type === 'message') {
const msg = event.message; const msg = event.message;
if (msg.role === 'user') { if (msg.role === 'user') {
@ -157,13 +178,15 @@ export class FSRunsRepo implements IRunsRepo {
); );
} }
async create(options: z.infer<typeof CreateRunOptions>): Promise<z.infer<typeof Run>> { async create(options: CreateRunRepoOptions): Promise<z.infer<typeof Run>> {
const runId = await this.idGenerator.next(); const runId = await this.idGenerator.next();
const ts = new Date().toISOString(); const ts = new Date().toISOString();
const start: z.infer<typeof StartEvent> = { const start: z.infer<typeof StartEvent> = {
type: "start", type: "start",
runId, runId,
agentName: options.agentId, agentName: options.agentId,
model: options.model,
provider: options.provider,
subflow: [], subflow: [],
ts, ts,
}; };
@ -172,24 +195,41 @@ export class FSRunsRepo implements IRunsRepo {
id: runId, id: runId,
createdAt: ts, createdAt: ts,
agentId: options.agentId, agentId: options.agentId,
model: options.model,
provider: options.provider,
log: [start], log: [start],
}; };
} }
async fetch(id: string): Promise<z.infer<typeof Run>> { async fetch(id: string): Promise<z.infer<typeof Run>> {
const contents = await fsp.readFile(path.join(WorkDir, 'runs', `${id}.jsonl`), 'utf8'); const contents = await fsp.readFile(path.join(WorkDir, 'runs', `${id}.jsonl`), 'utf8');
const events = contents.split('\n') // Parse with the lenient schema so legacy start events (no model/provider) load.
const rawEvents = contents.split('\n')
.filter(line => line.trim() !== '') .filter(line => line.trim() !== '')
.map(line => RunEvent.parse(JSON.parse(line))); .map(line => ReadRunEvent.parse(JSON.parse(line)));
if (events.length === 0 || events[0].type !== 'start') { if (rawEvents.length === 0 || rawEvents[0].type !== 'start') {
throw new Error('Corrupt run data'); throw new Error('Corrupt run data');
} }
// Backfill model/provider on the start event from current defaults if missing,
// then promote to the canonical strict types for callers.
const rawStart = rawEvents[0];
const defaults = (!rawStart.model || !rawStart.provider)
? await getDefaultModelAndProvider()
: null;
const start: z.infer<typeof StartEvent> = {
...rawStart,
model: rawStart.model ?? defaults!.model,
provider: rawStart.provider ?? defaults!.provider,
};
const events: z.infer<typeof RunEvent>[] = [start, ...rawEvents.slice(1) as z.infer<typeof RunEvent>[]];
const title = this.extractTitle(events); const title = this.extractTitle(events);
return { return {
id, id,
title, title,
createdAt: events[0].ts!, createdAt: start.ts!,
agentId: events[0].agentName, agentId: start.agentName,
model: start.model,
provider: start.provider,
log: events, log: events,
}; };
} }

View file

@ -10,11 +10,21 @@ import { IRunsLock } from "./lock.js";
import { forceCloseAllMcpClients } from "../mcp/mcp.js"; import { forceCloseAllMcpClients } from "../mcp/mcp.js";
import { extractCommandNames } from "../application/lib/command-executor.js"; import { extractCommandNames } from "../application/lib/command-executor.js";
import { addToSecurityConfig } from "../config/security.js"; import { addToSecurityConfig } from "../config/security.js";
import { loadAgent } from "../agents/runtime.js";
import { getDefaultModelAndProvider } from "../models/defaults.js";
export async function createRun(opts: z.infer<typeof CreateRunOptions>): Promise<z.infer<typeof Run>> { export async function createRun(opts: z.infer<typeof CreateRunOptions>): Promise<z.infer<typeof Run>> {
const repo = container.resolve<IRunsRepo>('runsRepo'); const repo = container.resolve<IRunsRepo>('runsRepo');
const bus = container.resolve<IBus>('bus'); const bus = container.resolve<IBus>('bus');
const run = await repo.create(opts);
// Resolve model+provider once at creation: opts > agent declaration > defaults.
// Both fields are plain strings (provider is a name, looked up at runtime).
const agent = await loadAgent(opts.agentId);
const defaults = await getDefaultModelAndProvider();
const model = opts.model ?? agent.model ?? defaults.model;
const provider = opts.provider ?? agent.provider ?? defaults.provider;
const run = await repo.create({ agentId: opts.agentId, model, provider });
await bus.publish(run.log[0]); await bus.publish(run.log[0]);
return run; return run;
} }

View file

@ -1,7 +1,7 @@
import { z } from "zod"; import { z } from "zod";
export const LlmProvider = z.object({ export const LlmProvider = z.object({
flavor: z.enum(["openai", "anthropic", "google", "openrouter", "aigateway", "ollama", "openai-compatible"]), flavor: z.enum(["openai", "anthropic", "google", "openrouter", "aigateway", "ollama", "openai-compatible", "rowboat"]),
apiKey: z.string().optional(), apiKey: z.string().optional(),
baseURL: z.string().optional(), baseURL: z.string().optional(),
headers: z.record(z.string(), z.string()).optional(), headers: z.record(z.string(), z.string()).optional(),
@ -11,6 +11,15 @@ export const LlmModelConfig = z.object({
provider: LlmProvider, provider: LlmProvider,
model: z.string(), model: z.string(),
models: z.array(z.string()).optional(), models: z.array(z.string()).optional(),
providers: z.record(z.string(), z.object({
apiKey: z.string().optional(),
baseURL: z.string().optional(),
headers: z.record(z.string(), z.string()).optional(),
model: z.string().optional(),
models: z.array(z.string()).optional(),
})).optional(),
// Deprecated: per-run model+provider supersedes these. Kept on the schema so
// existing settings/onboarding UIs continue to compile until they're cleaned up.
knowledgeGraphModel: z.string().optional(), knowledgeGraphModel: z.string().optional(),
meetingNotesModel: z.string().optional(), meetingNotesModel: z.string().optional(),
}); });

View file

@ -19,6 +19,8 @@ export const RunProcessingEndEvent = BaseRunEvent.extend({
export const StartEvent = BaseRunEvent.extend({ export const StartEvent = BaseRunEvent.extend({
type: z.literal("start"), type: z.literal("start"),
agentName: z.string(), agentName: z.string(),
model: z.string(),
provider: z.string(),
}); });
export const SpawnSubFlowEvent = BaseRunEvent.extend({ export const SpawnSubFlowEvent = BaseRunEvent.extend({
@ -121,6 +123,8 @@ export const Run = z.object({
title: z.string().optional(), title: z.string().optional(),
createdAt: z.iso.datetime(), createdAt: z.iso.datetime(),
agentId: z.string(), agentId: z.string(),
model: z.string(),
provider: z.string(),
log: z.array(RunEvent), log: z.array(RunEvent),
}); });
@ -134,6 +138,8 @@ export const ListRunsResponse = z.object({
nextCursor: z.string().optional(), nextCursor: z.string().optional(),
}); });
export const CreateRunOptions = Run.pick({ export const CreateRunOptions = z.object({
agentId: true, agentId: z.string(),
model: z.string().optional(),
provider: z.string().optional(),
}); });