added search button to chat input

This commit is contained in:
Arjun 2026-03-13 13:58:30 +05:30
parent c845a7c40d
commit 604d521ac2
7 changed files with 71 additions and 9 deletions

View file

@ -352,7 +352,7 @@ export function setupIpcHandlers() {
return runsCore.createRun(args);
},
'runs:createMessage': async (_event, args) => {
return { messageId: await runsCore.createMessage(args.runId, args.message, args.voiceInput, args.voiceOutput) };
return { messageId: await runsCore.createMessage(args.runId, args.message, args.voiceInput, args.voiceOutput, args.searchEnabled) };
},
'runs:authorizePermission': async (_event, args) => {
await runsCore.authorizePermission(args.runId, args.authorization);

View file

@ -1896,7 +1896,8 @@ function App() {
const handlePromptSubmit = async (
message: PromptInputMessage,
mentions?: FileMention[],
stagedAttachments: StagedAttachment[] = []
stagedAttachments: StagedAttachment[] = [],
searchEnabled?: boolean,
) => {
if (isProcessing) return
@ -1994,6 +1995,7 @@ function App() {
message: attachmentPayload,
voiceInput: pendingVoiceInputRef.current || undefined,
voiceOutput: ttsEnabledRef.current ? ttsModeRef.current : undefined,
searchEnabled: searchEnabled || undefined,
})
} else {
// Legacy path: plain string with optional XML-formatted @mentions.
@ -2024,6 +2026,7 @@ function App() {
message: formattedMessage,
voiceInput: pendingVoiceInputRef.current || undefined,
voiceOutput: ttsEnabledRef.current ? ttsModeRef.current : undefined,
searchEnabled: searchEnabled || undefined,
})
titleSource = formattedMessage

View file

@ -10,6 +10,7 @@ import {
FileSpreadsheet,
FileText,
FileVideo,
Globe,
Headphones,
LoaderIcon,
Mic,
@ -56,6 +57,7 @@ export type StagedAttachment = {
const MAX_ATTACHMENT_SIZE = 10 * 1024 * 1024 // 10MB
const providerDisplayNames: Record<string, string> = {
openai: 'OpenAI',
anthropic: 'Anthropic',
@ -95,7 +97,7 @@ function getAttachmentIcon(kind: AttachmentIconKind) {
}
interface ChatInputInnerProps {
onSubmit: (message: PromptInputMessage, mentions?: FileMention[], attachments?: StagedAttachment[]) => void
onSubmit: (message: PromptInputMessage, mentions?: FileMention[], attachments?: StagedAttachment[], searchEnabled?: boolean) => void
onStop?: () => void
isProcessing: boolean
isStopping?: boolean
@ -152,6 +154,8 @@ function ChatInputInner({
const [configuredModels, setConfiguredModels] = useState<ConfiguredModel[]>([])
const [activeModelKey, setActiveModelKey] = useState('')
const [searchEnabled, setSearchEnabled] = useState(false)
const [searchAvailable, setSearchAvailable] = useState(false)
// Load model config from disk (on mount and whenever tab becomes active)
const loadModelConfig = useCallback(async () => {
@ -209,6 +213,27 @@ function ChatInputInner({
return () => window.removeEventListener('models-config-changed', handler)
}, [loadModelConfig])
// Check search tool availability (brave or exa)
useEffect(() => {
const checkSearch = async () => {
let available = false
try {
const raw = await window.ipc.invoke('workspace:readFile', { path: 'config/brave-search.json' })
const config = JSON.parse(raw.data)
if (config.apiKey) available = true
} catch { /* not configured */ }
if (!available) {
try {
const raw = await window.ipc.invoke('workspace:readFile', { path: 'config/exa-search.json' })
const config = JSON.parse(raw.data)
if (config.apiKey) available = true
} catch { /* not configured */ }
}
setSearchAvailable(available)
}
checkSearch()
}, [isActive])
const handleModelChange = useCallback(async (key: string) => {
const entry = configuredModels.find((m) => `${m.flavor}/${m.model}` === key)
if (!entry) return
@ -290,11 +315,12 @@ function ChatInputInner({
const handleSubmit = useCallback(() => {
if (!canSubmit) return
onSubmit({ text: message.trim(), files: [] }, controller.mentions.mentions, attachments)
onSubmit({ text: message.trim(), files: [] }, controller.mentions.mentions, attachments, searchEnabled || undefined)
controller.textInput.clear()
controller.mentions.clearMentions()
setAttachments([])
}, [attachments, canSubmit, controller, message, onSubmit])
setSearchEnabled(false)
}, [attachments, canSubmit, controller, message, onSubmit, searchEnabled])
const handleKeyDown = useCallback((e: React.KeyboardEvent) => {
if (e.key === 'Enter' && !e.shiftKey) {
@ -446,6 +472,28 @@ function ChatInputInner({
>
<Plus className="h-4 w-4" />
</button>
{searchAvailable && (
searchEnabled ? (
<button
type="button"
onClick={() => setSearchEnabled(false)}
className="flex h-7 shrink-0 items-center gap-1.5 rounded-full border border-blue-200 bg-blue-50 px-2.5 text-blue-600 transition-colors hover:bg-blue-100 dark:border-blue-800 dark:bg-blue-950 dark:text-blue-400 dark:hover:bg-blue-900"
>
<Globe className="h-3.5 w-3.5" />
<span className="text-xs font-medium">Search</span>
<X className="h-3 w-3" />
</button>
) : (
<button
type="button"
onClick={() => setSearchEnabled(true)}
className="flex h-7 w-7 shrink-0 items-center justify-center rounded-full text-muted-foreground transition-colors hover:bg-muted hover:text-foreground"
aria-label="Search"
>
<Globe className="h-4 w-4" />
</button>
)
)}
<div className="flex-1" />
{configuredModels.length > 0 && (
<DropdownMenu>

View file

@ -896,6 +896,7 @@ export async function* streamAgent({
// get any queued user messages
let voiceInput = false;
let voiceOutput: 'summary' | 'full' | null = null;
let searchEnabled = false;
while (true) {
const msg = await messageQueue.dequeue(runId);
if (!msg) {
@ -904,6 +905,9 @@ export async function* streamAgent({
if (msg.voiceInput) {
voiceInput = true;
}
if (msg.searchEnabled) {
searchEnabled = true;
}
if (msg.voiceOutput) {
voiceOutput = msg.voiceOutput;
}
@ -958,6 +962,10 @@ export async function* streamAgent({
loopLogger.log('voice output enabled (full mode), injecting voice output prompt');
instructionsWithDateTime += `\n\n# Voice Output — Full Read-Aloud (MANDATORY)\nThe user wants your ENTIRE response spoken aloud. You MUST wrap your full response in <voice></voice> tags. This is NOT optional.\n\nRules:\n1. Wrap EACH sentence in its own separate <voice> tag so it can be spoken incrementally.\n2. Write your response in a natural, conversational style suitable for listening — no markdown headings, bullet points, or formatting symbols. Use plain spoken language.\n3. Structure the content as if you are speaking to the user directly. Use transitions like "first", "also", "one more thing" instead of visual formatting.\n4. Every sentence MUST be inside a <voice> tag. Do not leave any content outside <voice> tags.\n\nExample:\n<voice>Your meeting with Sarah covered three main things.</voice>\n<voice>First, you discussed the Q2 roadmap timeline and agreed to push the launch to April.</voice>\n<voice>Second, you talked about hiring for the backend role — Sarah will send over two candidates by Friday.</voice>\n<voice>And lastly, the client demo is next week on Thursday at 2pm, and you're handling the intro slides.</voice>`;
}
if (searchEnabled) {
loopLogger.log('search enabled, injecting search prompt');
instructionsWithDateTime += `\n\n# Search\nThe user has requested a search. Load the search skill and use web search or research search as needed to answer their query.`;
}
let streamError: string | null = null;
for await (const event of streamLlm(
model,

View file

@ -10,10 +10,11 @@ type EnqueuedMessage = {
message: UserMessageContentType;
voiceInput?: boolean;
voiceOutput?: VoiceOutputMode;
searchEnabled?: boolean;
};
export interface IMessageQueue {
enqueue(runId: string, message: UserMessageContentType, voiceInput?: boolean, voiceOutput?: VoiceOutputMode): Promise<string>;
enqueue(runId: string, message: UserMessageContentType, voiceInput?: boolean, voiceOutput?: VoiceOutputMode, searchEnabled?: boolean): Promise<string>;
dequeue(runId: string): Promise<EnqueuedMessage | null>;
}
@ -29,7 +30,7 @@ export class InMemoryMessageQueue implements IMessageQueue {
this.idGenerator = idGenerator;
}
async enqueue(runId: string, message: UserMessageContentType, voiceInput?: boolean, voiceOutput?: VoiceOutputMode): Promise<string> {
async enqueue(runId: string, message: UserMessageContentType, voiceInput?: boolean, voiceOutput?: VoiceOutputMode, searchEnabled?: boolean): Promise<string> {
if (!this.store[runId]) {
this.store[runId] = [];
}
@ -39,6 +40,7 @@ export class InMemoryMessageQueue implements IMessageQueue {
message,
voiceInput,
voiceOutput,
searchEnabled,
});
return id;
}

View file

@ -19,9 +19,9 @@ export async function createRun(opts: z.infer<typeof CreateRunOptions>): Promise
return run;
}
export async function createMessage(runId: string, message: UserMessageContentType, voiceInput?: boolean, voiceOutput?: VoiceOutputMode): Promise<string> {
export async function createMessage(runId: string, message: UserMessageContentType, voiceInput?: boolean, voiceOutput?: VoiceOutputMode, searchEnabled?: boolean): Promise<string> {
const queue = container.resolve<IMessageQueue>('messageQueue');
const id = await queue.enqueue(runId, message, voiceInput, voiceOutput);
const id = await queue.enqueue(runId, message, voiceInput, voiceOutput, searchEnabled);
const runtime = container.resolve<IAgentRuntime>('agentRuntime');
runtime.trigger(runId);
return id;

View file

@ -132,6 +132,7 @@ const ipcSchemas = {
message: UserMessageContent,
voiceInput: z.boolean().optional(),
voiceOutput: z.enum(['summary', 'full']).optional(),
searchEnabled: z.boolean().optional(),
}),
res: z.object({
messageId: z.string(),