refactor file locations

This commit is contained in:
Ramnique Singh 2025-08-18 07:08:26 +05:30
parent 5675cda52b
commit 53219974d3
24 changed files with 39 additions and 72 deletions

View file

@ -0,0 +1,290 @@
// Agent handoffs using OpenAI Agents SDK native capabilities
import { Agent, handoff, Handoff } from "@openai/agents";
import { z } from "zod";
import { PrefixLogger } from "@/app/lib/utils";
import { WorkflowAgent } from "@/app/lib/types/workflow_types";
import {
HandoffContext,
PipelineContext,
TaskContext,
PipelineExecutionState
} from "./agents";
// Types for handoff input data (from SDK)
export interface HandoffInputData {
inputHistory: string | any[];
preHandoffItems: any[];
newItems: any[];
runContext?: any;
}
export type HandoffContextType = 'pipeline' | 'task' | 'direct';
export interface AgentHandoffConfig {
inputSchema?: z.ZodObject<any>;
onHandoff?: (context: any, input: any) => void;
inputFilter?: (data: HandoffInputData) => HandoffInputData;
logger?: PrefixLogger;
}
// Get default schema based on context type
function getDefaultSchemaForContext(contextType: HandoffContextType): z.ZodObject<any> {
switch (contextType) {
case 'pipeline':
return PipelineContext;
case 'task':
return TaskContext;
case 'direct':
default:
return HandoffContext;
}
}
// Create context-aware input filter
function createDefaultInputFilter(contextType: HandoffContextType) {
return (data: HandoffInputData): HandoffInputData => {
switch (contextType) {
case 'pipeline':
return filterForPipeline(data);
case 'task':
return filterForTask(data);
case 'direct':
default:
return data; // Pass through all context for direct handoffs
}
};
}
// Filter context for pipeline execution
function filterForPipeline(data: HandoffInputData): HandoffInputData {
// Keep recent context relevant to pipeline execution
const maxHistoryItems = 10; // Configurable limit
return {
...data,
inputHistory: Array.isArray(data.inputHistory)
? data.inputHistory.slice(-maxHistoryItems)
: data.inputHistory,
// Filter out non-pipeline related tool calls
preHandoffItems: data.preHandoffItems.filter(item =>
!item.type ||
item.type === 'message' ||
item.type === 'tool_call' && item.name?.includes('pipeline')
)
};
}
// Filter context for task delegation
function filterForTask(data: HandoffInputData): HandoffInputData {
// Keep task-relevant context only
const maxHistoryItems = 20; // Tasks may need more context
return {
...data,
inputHistory: Array.isArray(data.inputHistory)
? data.inputHistory.slice(-maxHistoryItems)
: data.inputHistory,
// Keep all items for task context
preHandoffItems: data.preHandoffItems
};
}
// Create SDK-native handoff with rich context
export function createAgentHandoff(
targetAgent: Agent,
contextType: HandoffContextType,
config: AgentHandoffConfig = {}
): Handoff {
const inputSchema = config.inputSchema || getDefaultSchemaForContext(contextType);
const logger = config.logger;
logger?.log(`Creating handoff to ${targetAgent.name} with context type: ${contextType}`);
// Create OpenAI API compliant tool name
const sanitizedAgentName = targetAgent.name
.replace(/[^a-zA-Z0-9_-]/g, '_') // Replace invalid chars with underscore
.replace(/_+/g, '_') // Replace multiple underscores with single
.replace(/^_+|_+$/g, '') // Remove leading/trailing underscores
.substring(0, 50); // Limit length
const toolName = `handoff_to_${sanitizedAgentName}`;
logger?.log(`Creating handoff tool: ${toolName} -> ${targetAgent.name}`);
return handoff(targetAgent, {
inputType: inputSchema,
toolNameOverride: toolName,
toolDescriptionOverride: `Transfer control to ${targetAgent.name} with structured context data`,
onHandoff: async (runContext, inputString) => {
try {
const inputStr = typeof inputString === 'string' ? inputString : '{}';
let input = JSON.parse(inputStr || '{}');
// Validate and enrich the parsed input with defaults
const schema = config.inputSchema || getDefaultSchemaForContext(contextType);
const validationResult = schema.safeParse(input);
if (!validationResult.success) {
logger?.log(`Handoff input validation failed for ${targetAgent.name}, enriching with defaults:`, validationResult.error.issues.map(i => i.path.join('.') + ': ' + i.message));
// Parse with defaults to get a valid object
input = schema.parse({});
logger?.log(`Using default context for handoff to ${targetAgent.name}`);
} else {
logger?.log(`Handoff input validation succeeded for ${targetAgent.name}`);
input = validationResult.data;
}
logger?.log(`Handoff to ${targetAgent.name} with input:`, input);
// Execute custom handoff logic
config.onHandoff?.(runContext, input);
// Log the handoff for debugging
logHandoffEvent(targetAgent.name, contextType, input, logger);
} catch (error) {
logger?.log(`Error in handoff to ${targetAgent.name}:`, error);
throw error;
}
},
inputFilter: config.inputFilter || createDefaultInputFilter(contextType)
});
}
// Create handoff for pipeline execution
export function createPipelineHandoff(
targetAgent: Agent,
pipelineState: z.infer<typeof PipelineExecutionState>,
logger?: PrefixLogger
): Handoff {
const pipelineContext = {
reason: 'pipeline_execution' as const,
parentAgent: pipelineState.callingAgent,
transferCount: 0,
pipelineName: pipelineState.pipelineName,
currentStep: pipelineState.currentStep,
totalSteps: pipelineState.totalSteps,
isLastStep: pipelineState.currentStep >= pipelineState.totalSteps - 1,
pipelineData: pipelineState.pipelineData || null,
stepResults: pipelineState.stepResults || null
};
return createAgentHandoff(targetAgent, 'pipeline', {
inputSchema: PipelineContext,
onHandoff: (context, input) => {
logger?.log(`Pipeline step ${pipelineState.currentStep + 1}/${pipelineState.totalSteps} - handing off to ${targetAgent.name}`);
// Store pipeline state for the target agent
storePipelineStateForAgent(targetAgent.name, pipelineState);
},
inputFilter: (data) => {
// Inject pipeline context into the conversation
const contextMessage = createPipelineContextMessage(pipelineContext);
return {
...data,
newItems: [
...data.newItems,
{
type: 'message',
role: 'system',
content: contextMessage
}
]
};
},
logger
});
}
// Create handoff for task delegation
export function createTaskHandoff(
targetAgent: Agent,
taskContext: {
taskType: string;
priority: 'low' | 'medium' | 'high';
parentAgent: string;
requirements?: string[];
resources?: Record<string, any>;
},
logger?: PrefixLogger
): Handoff {
return createAgentHandoff(targetAgent, 'task', {
inputSchema: TaskContext,
onHandoff: (context, input) => {
logger?.log(`Task delegation to ${targetAgent.name}:`, {
taskType: taskContext.taskType,
priority: taskContext.priority
});
},
logger
});
}
// Get schema based on agent configuration
export function getSchemaForAgent(agentConfig: z.infer<typeof WorkflowAgent>): z.ZodObject<any> {
// Always start with basic HandoffContext - more specific contexts are used
// only when explicitly creating pipeline or task handoffs
return HandoffContext;
// NOTE: PipelineContext and TaskContext are used only in specific creation functions
// like createPipelineHandoff() and createTaskHandoff(), not for general agent handoffs
}
// Create context filter based on agent configuration
export function createContextFilterForAgent(agentConfig: z.infer<typeof WorkflowAgent>) {
return (data: HandoffInputData): HandoffInputData => {
// Use basic passthrough filtering for regular handoffs
// Specific filtering is handled by createPipelineHandoff and createTaskHandoff
return data;
};
}
// Helper functions
function logHandoffEvent(
targetAgent: string,
contextType: string,
input: any,
logger?: PrefixLogger
) {
logger?.log(`🔄 SDK HANDOFF: -> ${targetAgent} (${contextType})`, {
targetAgent,
contextType,
hasContext: !!input && Object.keys(input).length > 0
});
}
// Simple storage for pipeline state (in production, use proper state management)
const pipelineStates = new Map<string, z.infer<typeof PipelineExecutionState>>();
function storePipelineStateForAgent(
agentName: string,
state: z.infer<typeof PipelineExecutionState>
) {
pipelineStates.set(agentName, state);
}
export function getPipelineStateForAgent(
agentName: string
): z.infer<typeof PipelineExecutionState> | null {
return pipelineStates.get(agentName) || null;
}
function createPipelineContextMessage(context: any): string {
return `## Pipeline Execution Context
Pipeline: ${context.pipelineName}
Step: ${context.currentStep + 1}/${context.totalSteps}
${context.isLastStep ? '**Final Step**: Provide complete results.' : '**Continue**: Pass results to next step.'}
${context.stepResults && context.stepResults.length > 0
? `Previous Results:\n${JSON.stringify(context.stepResults, null, 2)}`
: 'No previous results.'
}
${context.pipelineData
? `Pipeline Data:\n${JSON.stringify(context.pipelineData, null, 2)}`
: ''
}`;
}

View file

@ -0,0 +1,566 @@
// External dependencies
import { tool, Tool } from "@openai/agents";
import { createOpenAI } from "@ai-sdk/openai";
import { embed, generateText } from "ai";
import { z } from "zod";
import { composio } from "@/src/application/lib/composio/composio";
import { SignJWT } from "jose";
import crypto from "crypto";
// Internal dependencies
import { embeddingModel } from "@/app/lib/embedding";
import { getMcpClient } from "@/app/lib/mcp";
import { qdrantClient } from "@/app/lib/qdrant";
import { EmbeddingRecord } from "@/app/lib/types/datasource_types";
import { WorkflowAgent, WorkflowTool } from "@/app/lib/types/workflow_types";
import { PrefixLogger } from "@/app/lib/utils";
import { UsageTracker } from "@/app/lib/billing";
import { DataSource } from "@/src/entities/models/data-source";
import { IDataSourcesRepository } from "@/src/application/repositories/data-sources.repository.interface";
import { IDataSourceDocsRepository } from "@/src/application/repositories/data-source-docs.repository.interface";
import { container } from "@/di/container";
import { IProjectsRepository } from "@/src/application/repositories/projects.repository.interface";
// Provider configuration
const PROVIDER_API_KEY = process.env.PROVIDER_API_KEY || process.env.OPENAI_API_KEY || '';
const PROVIDER_BASE_URL = process.env.PROVIDER_BASE_URL || undefined;
const MODEL = process.env.PROVIDER_DEFAULT_MODEL || 'gpt-4o';
const openai = createOpenAI({
apiKey: PROVIDER_API_KEY,
baseURL: PROVIDER_BASE_URL,
});
// Helper to handle mock tool responses
export async function invokeMockTool(
logger: PrefixLogger,
usageTracker: UsageTracker,
toolName: string,
args: string,
description: string,
mockInstructions: string
): Promise<string> {
logger = logger.child(`invokeMockTool`);
logger.log(`toolName: ${toolName}`);
logger.log(`args: ${args}`);
logger.log(`description: ${description}`);
logger.log(`mockInstructions: ${mockInstructions}`);
const messages: Parameters<typeof generateText>[0]['messages'] = [{
role: "system" as const,
content: `You are simulating the execution of a tool called '${toolName}'. Here is the description of the tool: ${description}. Here are the instructions for the mock tool: ${mockInstructions}. Generate a realistic response as if the tool was actually executed with the given parameters.`
}, {
role: "user" as const,
content: `Generate a realistic response for the tool '${toolName}' with these parameters: ${args}. The response should be concise and focused on what the tool would actually return.`
}];
const { text, usage } = await generateText({
model: openai(MODEL),
messages,
});
logger.log(`generated text: ${text}`);
// track usage
usageTracker.track({
type: "LLM_USAGE",
modelName: MODEL,
inputTokens: usage.promptTokens,
outputTokens: usage.completionTokens,
context: "agents_runtime.mock_tool",
});
return text;
}
// Helper to handle RAG tool calls
export async function invokeRagTool(
logger: PrefixLogger,
usageTracker: UsageTracker,
projectId: string,
query: string,
sourceIds: string[],
returnType: 'chunks' | 'content',
k: number
): Promise<{
title: string;
name: string;
content: string;
docId: string;
sourceId: string;
}[]> {
logger = logger.child(`invokeRagTool`);
logger.log(`projectId: ${projectId}`);
logger.log(`query: ${query}`);
logger.log(`sourceIds: ${sourceIds.join(', ')}`);
logger.log(`returnType: ${returnType}`);
logger.log(`k: ${k}`);
const dataSourcesRepository = container.resolve<IDataSourcesRepository>('dataSourcesRepository');
const dataSourceDocsRepository = container.resolve<IDataSourceDocsRepository>('dataSourceDocsRepository');
// Create embedding for question
const { embedding, usage } = await embed({
model: embeddingModel,
value: query,
});
// track usage
// track usage
usageTracker.track({
type: "EMBEDDING_MODEL_USAGE",
modelName: embeddingModel.modelId,
tokens: usage.tokens,
context: "agents_runtime.rag_tool.embedding_usage",
});
// Fetch all data sources for this project
const sources: z.infer<typeof DataSource>[] = [];
let cursor = undefined;
do {
const resp = await dataSourcesRepository.list(projectId, {
active: true,
}, cursor);
sources.push(...resp.items);
cursor = resp.nextCursor;
} while(cursor);
const validSourceIds = sources
.filter(s => sourceIds.includes(s.id)) // id should be in sourceIds
.map(s => s.id);
logger.log(`valid source ids: ${validSourceIds.join(', ')}`);
// if no sources found, return empty response
if (validSourceIds.length === 0) {
logger.log(`no valid source ids found, returning empty response`);
return [];
}
// Perform vector search
const qdrantResults = await qdrantClient.query("embeddings", {
query: embedding,
filter: {
must: [
{ key: "projectId", match: { value: projectId } },
{ key: "sourceId", match: { any: validSourceIds } },
],
},
limit: k,
with_payload: true,
});
logger.log(`found ${qdrantResults.points.length} results`);
// if return type is chunks, return the chunks
let results = qdrantResults.points.map((point) => {
const { title, name, content, docId, sourceId } = point.payload as z.infer<typeof EmbeddingRecord>['payload'];
return {
title,
name,
content,
docId,
sourceId,
};
});
if (returnType === 'chunks') {
logger.log(`returning chunks`);
return results;
}
// otherwise, fetch the doc contents from mongodb
const docs = await dataSourceDocsRepository.bulkFetch(results.map(r => r.docId));
logger.log(`fetched docs: ${docs.length}`);
// map the results to the docs
results = results.map(r => {
const doc = docs.find(d => d.id === r.docId);
return {
...r,
content: doc?.content || '',
};
});
return results;
}
export async function invokeWebhookTool(
logger: PrefixLogger,
usageTracker: UsageTracker,
projectId: string,
name: string,
input: any,
): Promise<unknown> {
logger = logger.child(`invokeWebhookTool`);
logger.log(`projectId: ${projectId}`);
logger.log(`name: ${name}`);
logger.log(`input: ${JSON.stringify(input)}`);
const projectsRepository = container.resolve<IProjectsRepository>('projectsRepository');
const project = await projectsRepository.fetch(projectId);
if (!project) {
throw new Error('Project not found');
}
if (!project.webhookUrl) {
throw new Error('Webhook URL not found');
}
// prepare request body
const toolCall = {
id: crypto.randomUUID(),
type: "function" as const,
function: {
name,
arguments: JSON.stringify(input),
},
}
const content = JSON.stringify({
toolCall,
});
const requestId = crypto.randomUUID();
const bodyHash = crypto
.createHash('sha256')
.update(content, 'utf8')
.digest('hex');
// sign request
const jwt = await new SignJWT({
requestId,
projectId,
bodyHash,
})
.setProtectedHeader({
alg: 'HS256',
typ: 'JWT',
})
.setIssuer('rowboat')
.setAudience(project.webhookUrl)
.setSubject(`tool-call-${toolCall.id}`)
.setJti(requestId)
.setIssuedAt()
.setExpirationTime("5 minutes")
.sign(new TextEncoder().encode(project.secret));
// make request
const request = {
requestId,
content,
};
const response = await fetch(project.webhookUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'x-signature-jwt': jwt,
},
body: JSON.stringify(request),
});
if (!response.ok) {
throw new Error(`Failed to call webhook: ${response.status}: ${response.statusText}`);
}
const responseBody = await response.json();
return responseBody;
}
// Helper to handle MCP tool calls
export async function invokeMcpTool(
logger: PrefixLogger,
usageTracker: UsageTracker,
projectId: string,
name: string,
input: any,
mcpServerName: string
) {
logger = logger.child(`invokeMcpTool`);
logger.log(`projectId: ${projectId}`);
logger.log(`name: ${name}`);
logger.log(`input: ${JSON.stringify(input)}`);
logger.log(`mcpServerName: ${mcpServerName}`);
// Get project configuration
const projectsRepository = container.resolve<IProjectsRepository>('projectsRepository');
const project = await projectsRepository.fetch(projectId);
if (!project) {
throw new Error(`project ${projectId} not found`);
}
// get server url from project data
const mcpServerURL = project.customMcpServers?.[mcpServerName]?.serverUrl;
if (!mcpServerURL) {
throw new Error(`mcp server url not found for project ${projectId} and server ${mcpServerName}`);
}
const client = await getMcpClient(mcpServerURL, mcpServerName);
const result = await client.callTool({
name,
arguments: input,
});
logger.log(`mcp tool result: ${JSON.stringify(result)}`);
await client.close();
return result;
}
// Helper to handle composio tool calls
export async function invokeComposioTool(
logger: PrefixLogger,
usageTracker: UsageTracker,
projectId: string,
name: string,
composioData: z.infer<typeof WorkflowTool>['composioData'] & {},
input: any,
) {
logger = logger.child(`invokeComposioTool`);
logger.log(`projectId: ${projectId}`);
logger.log(`name: ${name}`);
logger.log(`input: ${JSON.stringify(input)}`);
const { slug, toolkitSlug, noAuth } = composioData;
let connectedAccountId: string | undefined = undefined;
if (!noAuth) {
const projectsRepository = container.resolve<IProjectsRepository>('projectsRepository');
const project = await projectsRepository.fetch(projectId);
if (!project) {
throw new Error(`project ${projectId} not found`);
}
connectedAccountId = project.composioConnectedAccounts?.[toolkitSlug]?.id;
if (!connectedAccountId) {
throw new Error(`connected account id not found for project ${projectId} and toolkit ${toolkitSlug}`);
}
}
const result = await composio.tools.execute(slug, {
userId: projectId,
arguments: input,
connectedAccountId: connectedAccountId,
});
logger.log(`composio tool result: ${JSON.stringify(result)}`);
// track usage
usageTracker.track({
type: "COMPOSIO_TOOL_USAGE",
toolSlug: slug,
context: "agents_runtime.composio_tool",
});
return result.data;
}
// Helper to create RAG tool
export function createRagTool(
logger: PrefixLogger,
usageTracker: UsageTracker,
config: z.infer<typeof WorkflowAgent>,
projectId: string
): Tool {
if (!config.ragDataSources?.length) {
throw new Error(`data sources not found for agent ${config.name}`);
}
return tool({
name: "rag_search",
description: config.description,
parameters: z.object({
query: z.string().describe("The query to search for")
}),
async execute(input: { query: string }) {
const results = await invokeRagTool(
logger,
usageTracker,
projectId,
input.query,
config.ragDataSources || [],
config.ragReturnType || 'chunks',
config.ragK || 3
);
return JSON.stringify({
results,
});
}
});
}
// Helper to create a mock tool
export function createMockTool(
logger: PrefixLogger,
usageTracker: UsageTracker,
config: z.infer<typeof WorkflowTool>,
): Tool {
return tool({
name: config.name,
description: config.description,
strict: false,
parameters: {
type: 'object',
properties: config.parameters.properties,
required: config.parameters.required || [],
additionalProperties: true,
},
async execute(input: any) {
try {
const result = await invokeMockTool(
logger,
usageTracker,
config.name,
JSON.stringify(input),
config.description,
config.mockInstructions || ''
);
return JSON.stringify({
result,
});
} catch (error) {
logger.log(`Error executing mock tool ${config.name}:`, error);
return JSON.stringify({
error: `Mock tool execution failed: ${error}`,
});
}
}
});
}
// Helper to create a webhook tool
export function createWebhookTool(
logger: PrefixLogger,
usageTracker: UsageTracker,
config: z.infer<typeof WorkflowTool>,
projectId: string,
): Tool {
const { name, description, parameters } = config;
return tool({
name,
description,
strict: false,
parameters: {
type: 'object',
properties: parameters.properties,
required: parameters.required || [],
additionalProperties: true,
},
async execute(input: any) {
try {
const result = await invokeWebhookTool(logger, usageTracker, projectId, name, input);
return JSON.stringify({
result,
});
} catch (error) {
logger.log(`Error executing webhook tool ${config.name}:`, error);
return JSON.stringify({
error: `Tool execution failed: ${error}`,
});
}
}
});
}
// Helper to create an mcp tool
export function createMcpTool(
logger: PrefixLogger,
usageTracker: UsageTracker,
config: z.infer<typeof WorkflowTool>,
projectId: string
): Tool {
const { name, description, parameters, mcpServerName } = config;
return tool({
name,
description,
strict: false,
parameters: {
type: 'object',
properties: parameters.properties,
required: parameters.required || [],
additionalProperties: true,
},
async execute(input: any) {
try {
const result = await invokeMcpTool(logger, usageTracker, projectId, name, input, mcpServerName || '');
return JSON.stringify({
result,
});
} catch (error) {
logger.log(`Error executing mcp tool ${name}:`, error);
return JSON.stringify({
error: `Tool execution failed: ${error}`,
});
}
}
});
}
// Helper to create a composio tool
export function createComposioTool(
logger: PrefixLogger,
usageTracker: UsageTracker,
config: z.infer<typeof WorkflowTool>,
projectId: string
): Tool {
const { name, description, parameters, composioData } = config;
if (!composioData) {
throw new Error(`composio data not found for tool ${name}`);
}
return tool({
name,
description,
strict: false,
parameters: {
type: 'object',
properties: parameters.properties,
required: parameters.required || [],
additionalProperties: true,
},
async execute(input: any) {
try {
const result = await invokeComposioTool(logger, usageTracker, projectId, name, composioData, input);
return JSON.stringify({
result,
});
} catch (error) {
logger.log(`Error executing composio tool ${name}:`, error);
return JSON.stringify({
error: `Tool execution failed: ${error}`,
});
}
}
});
}
export function createTools(
logger: PrefixLogger,
usageTracker: UsageTracker,
projectId: string,
workflow: { tools: z.infer<typeof WorkflowTool>[] },
toolConfig: Record<string, z.infer<typeof WorkflowTool>>,
): Record<string, Tool> {
const tools: Record<string, Tool> = {};
const toolLogger = logger.child('createTools');
toolLogger.log(`=== CREATING ${Object.keys(toolConfig).length} TOOLS ===`);
for (const [toolName, config] of Object.entries(toolConfig)) {
toolLogger.log(`creating tool: ${toolName} (type: ${config.mockTool ? 'mock' : config.isMcp ? 'mcp' : config.isComposio ? 'composio' : 'webhook'})`);
if (config.mockTool) {
tools[toolName] = createMockTool(logger, usageTracker, config);
toolLogger.log(`✓ created mock tool: ${toolName}`);
} else if (config.isMcp) {
tools[toolName] = createMcpTool(logger, usageTracker, config, projectId);
toolLogger.log(`✓ created mcp tool: ${toolName} (server: ${config.mcpServerName || 'unknown'})`);
} else if (config.isComposio) {
tools[toolName] = createComposioTool(logger, usageTracker, config, projectId);
toolLogger.log(`✓ created composio tool: ${toolName}`);
} else if (config.isWebhook) {
tools[toolName] = createWebhookTool(logger, usageTracker, config, projectId);
toolLogger.log(`✓ created webhook tool: ${toolName} (fallback)`);
} else { // this is for placeholder tools
tools[toolName] = createMockTool(logger, usageTracker, config);
toolLogger.log(`✓ created mock tool: ${toolName}`);
}
}
toolLogger.log(`=== TOOL CREATION COMPLETE ===`);
return tools;
}

View file

@ -0,0 +1,146 @@
/**
* Instructions for agents that use RAG (Retrieval Augmented Generation)
*/
export const RAG_INSTRUCTIONS = (ragToolName: string): string => `
# Instructions about using the article retrieval tool
- Where relevant, use the articles tool: ${ragToolName} to fetch articles with knowledge relevant to the query and use its contents to respond to the user.
- Do not send a separate message first asking the user to wait while you look up information. Immediately fetch the articles and respond to the user with the answer to their query.
- Do not make up information. If the article's contents do not have the answer, give up control of the chat (or transfer to your parent agent, as per your transfer instructions). Do not say anything to the user.
`;
/**
* Instructions for child agents that are aware of parent agents
* These instructions guide agents that can transfer control to parent agents
*/
export const TRANSFER_PARENT_AWARE_INSTRUCTIONS = (candidateParentsNameDescriptionTools: string): string => `
# Instructions about using your parent agents
You have the following candidate parent agents that you can transfer the chat to, using the appropriate tool calls for the transfer:
${candidateParentsNameDescriptionTools}.
## Notes:
- During runtime, you will be provided with a tool call for exactly one of these parent agents that you can use. Use that tool call to transfer the chat to the parent agent in case you are unable to handle the chat (e.g. if it is not in your scope of instructions).
- Transfer the chat to the appropriate agent, based on the chat history and / or the user's request.
- When you transfer the chat to another agent, you should not provide any response to the user. For example, do not say 'Transferring chat to X agent' or anything like that. Just invoke the tool call to transfer to the other agent.
- Do NOT ever mention the existence of other agents. For example, do not say 'Please check with X agent for details regarding processing times.' or anything like that.
- If any other agent transfers the chat to you without responding to the user, it means that they don't know how to help. Do not transfer the chat to back to the same agent in this case. In such cases, you should transfer to the escalation agent using the appropriate tool call. Never ask the user to contact support.
`;
/**
* Instructions for child agents that give up control to parent agents
* These instructions guide agents that need to relinquish control to parent agents
*/
export const TRANSFER_GIVE_UP_CONTROL_INSTRUCTIONS = (candidateParentsNameDescriptionTools: string): string => `
# Instructions about giving up chat control
- If you are unable to handle the chat (e.g. if it is not in your scope of instructions), you should give up control of the chat by calling: ${candidateParentsNameDescriptionTools}.
- If you already have an instruction before this about calling the same agent, you can discard this particular instruction.
## Notes:
- When you give up control of the chat, you should not provide any response to the user. Just invoke the tool call to give up control.
`;
/**
* Instructions for parent agents that need to transfer the chat to other specialized (children) agents
* These instructions guide parent agents in delegating tasks to specialized child agents
*/
export const TRANSFER_CHILDREN_INSTRUCTIONS = (otherAgentNameDescriptionsTools: string): string => `
# Instructions about using other specialized agents
You have the following specialized agents that you can transfer the chat to, using the appropriate tool calls for the transfer:
${otherAgentNameDescriptionsTools}
## Notes:
- Transfer the chat to the appropriate agent, based on the chat history and / or the user's request.
- When you transfer the chat to another agent, you should not provide any response to the user. For example, do not say 'Transferring chat to X agent' or anything like that. Just invoke the tool call to transfer to the other agent.
- Do NOT ever mention the existence of other agents. For example, do not say 'Please check with X agent for details regarding processing times.' or anything like that.
- If any other agent transfers the chat to you without responding to the user, it means that they don't know how to help. Do not transfer the chat to back to the same agent in this case. In such cases, you should transfer to the escalation agent using the appropriate tool call. Never ask the user to contact support.
`;
/**
* Additional instruction for escalation agent when called due to an error
* These instructions are used when other agents are unable to handle the chat
*/
export const ERROR_ESCALATION_AGENT_INSTRUCTIONS = `
# Context
The rest of the parts of the chatbot were unable to handle the chat. Hence, the chat has been escalated to you. In addition to your other instructions, tell the user that you are having trouble handling the chat - say "I'm having trouble helping with your request. Sorry about that.". Remember you are a part of the chatbot as well.
`;
/**
* Universal system message formatting
* Template for system-wide context and instructions
*/
export const SYSTEM_MESSAGE = (systemMessage: string): string => `
# Additional System-Wide Context or Instructions:
${systemMessage}
`;
/**
* Instructions for non-repeat child transfer
* Critical rules for handling agent transfers and handoffs to prevent circular transfers
*/
export const CHILD_TRANSFER_RELATED_INSTRUCTIONS = `
# Critical Rules for Agent Transfers and Handoffs
- SEQUENTIAL TRANSFERS AND RESPONSES:
1. BEFORE transferring to any agent:
- Plan your complete sequence of needed transfers
- Document which responses you need to collect
2. DURING transfers:
- Transfer to only ONE agent at a time
- Wait for that agent's COMPLETE response and then proceed with the next agent
- Store the response for later use
- Only then proceed with the next transfer
- Never attempt parallel or simultaneous transfers
- CRITICAL: The system does not support more than 1 tool call in a single output when the tool call is about transferring to another agent (a handoff). You must only put out 1 transfer related tool call in one output.
3. AFTER receiving a response:
- Do not transfer to another agent until you've processed the current response
- If you need to transfer to another agent, wait for your current processing to complete
- Never transfer back to an agent that has already responded
- COMPLETION REQUIREMENTS:
- Never provide final response until ALL required agents have been consulted
- Never attempt to get multiple responses in parallel
- If a transfer is rejected due to multiple handoffs:
1. Complete current response processing
2. Then retry the transfer as next in sequence
3. Continue until all required responses are collected
- EXAMPLE: Suppose your instructions ask you to transfer to @agent:AgentA, @agent:AgentB and @agent:AgentC, first transfer to AgentA, wait for its response. Then transfer to AgentB, wait for its response. Then transfer to AgentC, wait for its response. Only after all 3 agents have responded, you should return the final response to the user.
`;
export const CONVERSATION_TYPE_INSTRUCTIONS = (): string => `
- You are an agent that is part of a workflow of (one or more) interconnected agents that work together to be an assistant.
- You will be directly interacting with the user.
- It is possible that some other agent might have invoked you to talk to the user.
- Reading the messages in the chat history will give you context about the conversation. But importantly, your response should simply be the direct text to the user.
- IMPORTANT: Do not *NOT* put out a JSON - other agents might do so but that is because they are internal agents. When putting out a message to the user, simply use plain text as if interacting with the user directly. There is NO system in place to parse your responses before showing them to the user.
- Seeing the tool calls that transfer / handoff control will help you understand the flow of the conversation and which agent produced each message.
- If you see an internal message from other agents as the last message in the chat history, the message is meant for you - the user won't know about it.
- When using internal messages that other agents have put out, make sure to write it in a way that is suitable to be shown to the user and in accordance with further instructions below.
- These are high level instructions only. The user will provide more specific instructions which will be below.
`;
export const TASK_TYPE_INSTRUCTIONS = (): string => `
- You are an agent that is part of a workflow of (one or more) interconnected agents that work together to be an assistant.
- Your response will not be shown directly to the user. Instead, your response will be used by the agent that might have invoked you and (possibly) other agents in the workflow. Therefore, your responses must be worded in such a way that it is useful for other agents and not addressed to the user. Add a prefix 'Internal message' to your response.
- Provide clear, direct responses that other agents can easily understand and act upon.
- IMPORTANT: If you have all the information to take action, such as calling a tool or writing a response, you should do that in the immediate turn. Do not delay action unnecessarily.
- Reading the messages in the chat history will give you context about the conversation.
- Seeing the tool calls that transfer / handoff control will help you understand the flow of the conversation and which agent produced each message.
- These are high level instructions only. The user will provide more specific instructions which will be below.
`;
export const PIPELINE_TYPE_INSTRUCTIONS = (): string => `
- You are a pipeline agent that is part of a sequential execution chain within a larger workflow.
- You are executing as one step in a multi-step pipeline process.
- Your input comes from the previous step in the pipeline (or the initial input if you're the first step).
- Your output will be passed to the next step in the pipeline (or returned as the final result if you're the last step).
- CRITICAL: You CANNOT transfer to other agents or pipelines. You can only use tools to complete your specific task.
- Focus ONLY on your designated role in the pipeline. Process the input, perform your specific task, and provide clear output.
- Provide clear, actionable output that the next pipeline step can easily understand and work with.
- Do NOT attempt to handle tasks outside your specific pipeline role.
- Do NOT mention other agents or the pipeline structure to users.
- Your response should be self-contained and ready to be consumed by the next pipeline step.
- Reading the message history will show you the pipeline execution flow up to your step.
- These are high level instructions only. The user will provide more specific instructions which will be below.
`;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,322 @@
// Pipeline State Manager for handling complex pipeline execution flow
import { Agent } from "@openai/agents";
import { z } from "zod";
import { WorkflowPipeline, WorkflowAgent } from "@/app/lib/types/workflow_types";
import { PipelineExecutionState } from "./agents";
import { PrefixLogger } from "@/app/lib/utils";
import { createPipelineHandoff } from "./agent-handoffs";
export interface PipelineExecutionResult {
action: 'handoff' | 'complete' | 'error';
nextAgent?: string;
handoff?: any; // SDK Handoff object
context?: any;
results?: any;
returnToAgent?: string;
error?: string;
}
export class PipelineStateManager {
private pipelineStates = new Map<string, z.infer<typeof PipelineExecutionState>>();
private logger: PrefixLogger;
constructor(logger: PrefixLogger) {
this.logger = logger.child('PipelineStateManager');
}
// Initialize a new pipeline execution
initializePipelineExecution(
pipelineName: string,
callingAgent: string,
pipelineConfig: z.infer<typeof WorkflowPipeline>,
initialData?: Record<string, any>
): z.infer<typeof PipelineExecutionState> {
const state: z.infer<typeof PipelineExecutionState> = {
pipelineName,
currentStep: 0,
totalSteps: pipelineConfig.agents.length,
callingAgent,
pipelineData: initialData || null,
stepResults: null,
currentStepResult: null,
startTime: new Date().toISOString(),
metadata: {
pipelineDescription: pipelineConfig.description
}
};
// Store initial state for the first agent
const firstAgent = pipelineConfig.agents[0];
this.storePipelineState(firstAgent, state);
this.logger.log(`🚀 Initialized pipeline "${pipelineName}" with ${state.totalSteps} steps`);
this.logger.log(`First agent: ${firstAgent}, called by: ${callingAgent}`);
return state;
}
// Handle pipeline execution step
async handlePipelineExecution(
currentAgentName: string,
pipelineConfig: Record<string, z.infer<typeof WorkflowPipeline>>,
agents: Record<string, Agent>,
stepResult?: Record<string, any>
): Promise<PipelineExecutionResult> {
const state = this.getPipelineState(currentAgentName);
if (!state) {
return {
action: 'error',
error: `No pipeline state found for agent ${currentAgentName}`
};
}
const pipeline = pipelineConfig[state.pipelineName];
if (!pipeline) {
return {
action: 'error',
error: `Pipeline ${state.pipelineName} not found in configuration`
};
}
// Store current step result
if (stepResult) {
// Safely handle stepResults as flexible union type
const existingResults = Array.isArray(state.stepResults) ? state.stepResults : [];
state.stepResults = [...existingResults, stepResult];
state.currentStepResult = stepResult;
// Update pipeline data if result contains data to pass forward
if (stepResult.pipelineData) {
// Safely handle pipelineData as flexible union type
const existingData = (typeof state.pipelineData === 'object' && state.pipelineData !== null) ? state.pipelineData : {};
const newData = (typeof stepResult.pipelineData === 'object' && stepResult.pipelineData !== null) ? stepResult.pipelineData : {};
state.pipelineData = {
...existingData,
...newData
};
}
}
this.logger.log(`📊 Pipeline "${state.pipelineName}" step ${state.currentStep + 1}/${state.totalSteps} completed by ${currentAgentName}`);
// Check if this is the last step
if (state.currentStep >= pipeline.agents.length - 1) {
// Pipeline complete - return to calling agent
this.logger.log(`✅ Pipeline "${state.pipelineName}" completed, returning to ${state.callingAgent}`);
const finalResults = {
pipelineName: state.pipelineName,
totalSteps: state.totalSteps,
stepResults: state.stepResults,
finalData: state.pipelineData,
completionTime: new Date().toISOString(),
duration: Date.now() - new Date(state.startTime).getTime()
};
// Clean up state
this.clearPipelineState(currentAgentName);
return {
action: 'complete',
results: finalResults,
returnToAgent: state.callingAgent
};
}
// Continue to next step
const nextStepIndex = state.currentStep + 1;
const nextAgentName = pipeline.agents[nextStepIndex];
if (!agents[nextAgentName]) {
return {
action: 'error',
error: `Next agent ${nextAgentName} not found in agents configuration`
};
}
// Update state for next step
const nextState: z.infer<typeof PipelineExecutionState> = {
...state,
currentStep: nextStepIndex,
currentStepResult: null // Reset for next step
};
// Store state for next agent
this.storePipelineState(nextAgentName, nextState);
// Create SDK handoff with rich context
const handoff = createPipelineHandoff(
agents[nextAgentName],
nextState,
this.logger
);
this.logger.log(`➡️ Pipeline "${state.pipelineName}": ${currentAgentName} -> ${nextAgentName} (step ${nextStepIndex + 1}/${state.totalSteps})`);
return {
action: 'handoff',
nextAgent: nextAgentName,
handoff,
context: {
reason: 'pipeline_execution',
pipelineName: state.pipelineName,
currentStep: nextStepIndex,
totalSteps: state.totalSteps,
isLastStep: nextStepIndex >= state.totalSteps - 1,
pipelineData: nextState.pipelineData,
stepResults: nextState.stepResults
}
};
}
// Store pipeline state for an agent
storePipelineState(agentName: string, state: z.infer<typeof PipelineExecutionState>): void {
this.pipelineStates.set(agentName, state);
this.logger.log(`💾 Stored pipeline state for ${agentName}: step ${state.currentStep + 1}/${state.totalSteps}`);
}
// Retrieve pipeline state for an agent
getPipelineState(agentName: string): z.infer<typeof PipelineExecutionState> | null {
return this.pipelineStates.get(agentName) || null;
}
// Clear pipeline state (cleanup)
clearPipelineState(agentName: string): void {
this.pipelineStates.delete(agentName);
this.logger.log(`🗑️ Cleared pipeline state for ${agentName}`);
}
// Check if agent is in a pipeline
isAgentInPipeline(agentName: string): boolean {
return this.pipelineStates.has(agentName);
}
// Get all active pipelines (for debugging)
getActivePipelines(): Array<{agentName: string, state: z.infer<typeof PipelineExecutionState>}> {
return Array.from(this.pipelineStates.entries()).map(([agentName, state]) => ({
agentName,
state
}));
}
// Inject pipeline context into agent instructions
injectPipelineContext(
agent: Agent,
agentName: string,
originalInstructions: string
): string {
const state = this.getPipelineState(agentName);
if (!state) {
return originalInstructions;
}
const contextPrompt = this.createPipelineContextPrompt(state);
const enhancedInstructions = `${originalInstructions}\n\n${contextPrompt}`;
this.logger.log(`📝 Injected pipeline context for ${agentName} in pipeline "${state.pipelineName}"`);
return enhancedInstructions;
}
// Create pipeline context prompt
private createPipelineContextPrompt(state: z.infer<typeof PipelineExecutionState>): string {
const stepInfo = `Step ${state.currentStep + 1} of ${state.totalSteps}`;
const isLast = state.currentStep >= state.totalSteps - 1;
let contextPrompt = `## 🔄 Pipeline Execution Context
**Pipeline**: ${state.pipelineName}
**Current Step**: ${stepInfo}
**Status**: ${isLast ? 'FINAL STEP - Provide complete results' : 'Intermediate step - Pass results forward'}
`;
if (state.stepResults && Array.isArray(state.stepResults) && state.stepResults.length > 0) {
contextPrompt += `**Previous Step Results**:
\`\`\`json
${JSON.stringify(state.stepResults, null, 2)}
\`\`\`
`;
}
if (state.pipelineData && typeof state.pipelineData === 'object' && state.pipelineData !== null && Object.keys(state.pipelineData).length > 0) {
contextPrompt += `**Pipeline Data**:
\`\`\`json
${JSON.stringify(state.pipelineData, null, 2)}
\`\`\`
`;
}
if (isLast) {
contextPrompt += `⚠️ **IMPORTANT**: This is the final step in the pipeline. Your response will be returned to the calling agent "${state.callingAgent}". Provide comprehensive results.
`;
} else {
contextPrompt += `➡️ **NEXT**: After completing your task, results will automatically flow to the next step in the pipeline.
`;
}
return contextPrompt;
}
// Error recovery - handle pipeline failures
handlePipelineError(
agentName: string,
error: string | Error,
shouldReturnToCaller: boolean = true
): PipelineExecutionResult {
const state = this.getPipelineState(agentName);
const errorMessage = typeof error === 'string' ? error : error.message;
this.logger.log(`❌ Pipeline error in agent ${agentName}: ${errorMessage}`);
if (state && shouldReturnToCaller) {
// Clean up and return to caller with error
this.clearPipelineState(agentName);
return {
action: 'complete',
results: {
pipelineName: state.pipelineName,
error: errorMessage,
completedSteps: state.currentStep,
totalSteps: state.totalSteps,
stepResults: state.stepResults
},
returnToAgent: state.callingAgent
};
}
return {
action: 'error',
error: errorMessage
};
}
// Get pipeline statistics (for monitoring)
getPipelineStats(): {
activePipelines: number;
pipelinesByName: Record<string, number>;
averageStepsCompleted: number;
} {
const pipelines = this.getActivePipelines();
const pipelinesByName: Record<string, number> = {};
let totalSteps = 0;
pipelines.forEach(({state}) => {
pipelinesByName[state.pipelineName] = (pipelinesByName[state.pipelineName] || 0) + 1;
totalSteps += state.currentStep + 1;
});
return {
activePipelines: pipelines.length,
pipelinesByName,
averageStepsCompleted: pipelines.length > 0 ? totalSteps / pipelines.length : 0
};
}
}

View file

@ -1,5 +1,5 @@
import { z } from "zod";
import { PrefixLogger } from "../../../../app/lib/utils";
import { PrefixLogger } from "@/app/lib/utils";
import { Composio } from "@composio/core";
import { ZAuthConfig, ZConnectedAccount, ZCreateAuthConfigRequest, ZCreateAuthConfigResponse, ZCreateConnectedAccountRequest, ZCreateConnectedAccountResponse, ZDeleteOperationResponse, ZErrorResponse, ZGetToolkitResponse, ZListResponse, ZTool, ZToolkit, ZTriggerType } from "./types";

View file

@ -0,0 +1,383 @@
import z from "zod";
import { createOpenAI } from "@ai-sdk/openai";
import { generateObject, streamText, tool } from "ai";
import { Workflow, WorkflowTool } from "@/app/lib/types/workflow_types";
import { CopilotChatContext, CopilotMessage, DataSourceSchemaForCopilot } from "./types";
import { PrefixLogger } from "@/app/lib/utils";
import zodToJsonSchema from "zod-to-json-schema";
import { COPILOT_INSTRUCTIONS_EDIT_AGENT } from "./copilot_edit_agent";
import { COPILOT_INSTRUCTIONS_MULTI_AGENT } from "./copilot_multi_agent";
import { COPILOT_MULTI_AGENT_EXAMPLE_1 } from "./example_multi_agent_1";
import { CURRENT_WORKFLOW_PROMPT } from "./current_workflow";
import { USE_COMPOSIO_TOOLS } from "@/app/lib/feature_flags";
import { composio, getTool } from "../composio/composio";
import { UsageTracker } from "@/app/lib/billing";
const PROVIDER_API_KEY = process.env.PROVIDER_API_KEY || process.env.OPENAI_API_KEY || '';
const PROVIDER_BASE_URL = process.env.PROVIDER_BASE_URL || undefined;
const COPILOT_MODEL = process.env.PROVIDER_COPILOT_MODEL || 'gpt-4.1';
const AGENT_MODEL = process.env.PROVIDER_DEFAULT_MODEL || 'gpt-4.1';
const WORKFLOW_SCHEMA = JSON.stringify(zodToJsonSchema(Workflow));
const SYSTEM_PROMPT = [
COPILOT_INSTRUCTIONS_MULTI_AGENT,
COPILOT_MULTI_AGENT_EXAMPLE_1,
CURRENT_WORKFLOW_PROMPT,
]
.join('\n\n')
.replace('{agent_model}', AGENT_MODEL)
.replace('{workflow_schema}', WORKFLOW_SCHEMA);
const openai = createOpenAI({
apiKey: PROVIDER_API_KEY,
baseURL: PROVIDER_BASE_URL,
compatibility: "strict",
});
const ZTextEvent = z.object({
content: z.string(),
});
const ZToolCallEvent = z.object({
type: z.literal('tool-call'),
toolName: z.string(),
toolCallId: z.string(),
args: z.record(z.any()),
query: z.string().optional(),
});
const ZToolResultEvent = z.object({
type: z.literal('tool-result'),
toolCallId: z.string(),
result: z.any(),
});
const ZDoneEvent = z.object({
done: z.literal(true),
});
const ZEvent = z.union([ZTextEvent, ZToolCallEvent, ZToolResultEvent, ZDoneEvent]);
const composioToolSearchToolSuggestion = z.object({
toolkit: z.string(),
tool_slug: z.string(),
description: z.string(),
});
const composioToolSearchResponseSchema = z.object({
results: z.array(composioToolSearchToolSuggestion),
related_tools: z.array(composioToolSearchToolSuggestion),
});
function getContextPrompt(context: z.infer<typeof CopilotChatContext> | null): string {
let prompt = '';
switch (context?.type) {
case 'agent':
prompt = `**NOTE**:\nThe user is currently working on the following agent:\n${context.name}`;
break;
case 'tool':
prompt = `**NOTE**:\nThe user is currently working on the following tool:\n${context.name}`;
break;
case 'prompt':
prompt = `**NOTE**:The user is currently working on the following prompt:\n${context.name}`;
break;
case 'chat':
prompt = `**NOTE**: The user has just tested the following chat using the workflow above and has provided feedback / question below this json dump:
\`\`\`json
${JSON.stringify(context.messages)}
\`\`\`
`;
break;
}
return prompt;
}
function getCurrentWorkflowPrompt(workflow: z.infer<typeof Workflow>): string {
return `Context:\n\nThe current workflow config is:
\`\`\`json
${JSON.stringify(workflow)}
\`\`\`
`;
}
function getDataSourcesPrompt(dataSources: z.infer<typeof DataSourceSchemaForCopilot>[]): string {
let prompt = '';
if (dataSources.length > 0) {
const simplifiedDataSources = dataSources.map(ds => ({
id: ds.id,
name: ds.name,
description: ds.description,
data: ds.data,
}));
prompt = `**NOTE**:
The following data sources are available:
\`\`\`json
${JSON.stringify(simplifiedDataSources)}
\`\`\`
`;
}
return prompt;
}
async function searchRelevantTools(usageTracker: UsageTracker, query: string): Promise<string> {
const logger = new PrefixLogger("copilot-search-tools");
console.log("🔧 TOOL CALL: searchRelevantTools", { query });
if (!USE_COMPOSIO_TOOLS) {
logger.log("dynamic tool search is disabled");
console.log("❌ TOOL CALL SKIPPED: searchRelevantTools - Composio tools disabled");
return 'No tools found!';
}
// Search for relevant tool slugs
logger.log('searching for relevant tools...');
console.log("🔍 TOOL CALL: COMPOSIO_SEARCH_TOOLS", { use_case: query });
const searchResult = await composio.tools.execute('COMPOSIO_SEARCH_TOOLS', {
userId: '0000-0000-0000',
arguments: { use_case: query },
});
if (!searchResult.successful) {
logger.log(`tool search failed: ${searchResult.error}`)
return 'No tools found!';
}
// track composio search tool usage
usageTracker.track({
type: "COMPOSIO_TOOL_USAGE",
toolSlug: "COMPOSIO_SEARCH_TOOLS",
context: "copilot.search_relevant_tools",
});
// parse results
const result = composioToolSearchResponseSchema.safeParse(searchResult.data);
if (!result.success) {
logger.log(`tool search response is invalid: ${result.error}`);
return 'No tools found!';
}
if (!result.data.results.length) {
logger.log(`tool search yielded no results`);
return 'No tools found!';
}
const toolSlugs = result.data.results.map((item) => item.tool_slug);
logger.log(`found tool slugs: ${toolSlugs.join(', ')}`);
console.log("✅ TOOL CALL SUCCESS: COMPOSIO_SEARCH_TOOLS", {
toolSlugs,
resultCount: toolSlugs.length
});
// Enrich tools with full details
console.log("🔧 TOOL CALL: getTool (multiple calls)", { toolSlugs });
const composioTools = await Promise.all(toolSlugs.map(slug => getTool(slug)));
const workflowTools: z.infer<typeof WorkflowTool>[] = composioTools.map(tool => ({
name: tool.name,
description: tool.description,
parameters: {
type: 'object' as const,
properties: tool.input_parameters?.properties || {},
required: tool.input_parameters?.required || [],
},
isComposio: true,
composioData: {
slug: tool.slug,
noAuth: tool.no_auth,
toolkitName: tool.toolkit?.name || '',
toolkitSlug: tool.toolkit?.slug || '',
logo: tool.toolkit?.logo || '',
},
}));
// Format the response
const toolConfigs = workflowTools.map(tool =>
`**${tool.name}**:\n\`\`\`json\n${JSON.stringify(tool, null, 2)}\n\`\`\``
).join('\n\n');
const response = `The following tools were found:\n\n${toolConfigs}`;
logger.log('returning response', response);
console.log("✅ TOOL CALL COMPLETED: searchRelevantTools", {
toolsFound: workflowTools.length,
toolNames: workflowTools.map(t => t.name)
});
return response;
}
function updateLastUserMessage(
messages: z.infer<typeof CopilotMessage>[],
currentWorkflowPrompt: string,
contextPrompt: string,
dataSourcesPrompt: string = '',
): void {
const lastMessage = messages[messages.length - 1];
if (lastMessage.role === 'user') {
lastMessage.content = `${currentWorkflowPrompt}\n\n${contextPrompt}\n\n${dataSourcesPrompt}\n\nUser: ${JSON.stringify(lastMessage.content)}`;
}
}
export async function getEditAgentInstructionsResponse(
usageTracker: UsageTracker,
projectId: string,
context: z.infer<typeof CopilotChatContext> | null,
messages: z.infer<typeof CopilotMessage>[],
workflow: z.infer<typeof Workflow>,
): Promise<string> {
const logger = new PrefixLogger('copilot /getUpdatedAgentInstructions');
logger.log('context', context);
logger.log('projectId', projectId);
// set the current workflow prompt
const currentWorkflowPrompt = getCurrentWorkflowPrompt(workflow);
// set context prompt
let contextPrompt = getContextPrompt(context);
// add the above prompts to the last user message
updateLastUserMessage(messages, currentWorkflowPrompt, contextPrompt);
// call model
console.log("calling model", JSON.stringify({
model: COPILOT_MODEL,
system: COPILOT_INSTRUCTIONS_EDIT_AGENT,
messages: messages,
}));
const { object, usage } = await generateObject({
model: openai(COPILOT_MODEL),
messages: [
{
role: 'system',
content: SYSTEM_PROMPT,
},
...messages,
],
schema: z.object({
agent_instructions: z.string(),
}),
});
// log usage
usageTracker.track({
type: "LLM_USAGE",
modelName: COPILOT_MODEL,
inputTokens: usage.promptTokens,
outputTokens: usage.completionTokens,
context: "copilot.llm_usage",
});
return object.agent_instructions;
}
export async function* streamMultiAgentResponse(
usageTracker: UsageTracker,
projectId: string,
context: z.infer<typeof CopilotChatContext> | null,
messages: z.infer<typeof CopilotMessage>[],
workflow: z.infer<typeof Workflow>,
dataSources: z.infer<typeof DataSourceSchemaForCopilot>[]
): AsyncIterable<z.infer<typeof ZEvent>> {
const logger = new PrefixLogger('copilot /stream');
logger.log('context', context);
logger.log('projectId', projectId);
console.log("🚀 COPILOT STREAM STARTED", {
projectId,
contextType: context?.type,
contextName: context && 'name' in context ? context.name : undefined,
messageCount: messages.length
});
// set the current workflow prompt
const currentWorkflowPrompt = getCurrentWorkflowPrompt(workflow);
// set context prompt
let contextPrompt = getContextPrompt(context);
// set data sources prompt
let dataSourcesPrompt = getDataSourcesPrompt(dataSources);
// add the above prompts to the last user message
updateLastUserMessage(messages, currentWorkflowPrompt, contextPrompt, dataSourcesPrompt);
// call model
console.log("🤖 AI MODEL CALL STARTED", {
model: COPILOT_MODEL,
maxSteps: 20,
availableTools: ["search_relevant_tools"]
});
const { fullStream } = streamText({
model: openai(COPILOT_MODEL),
maxSteps: 10,
tools: {
"search_relevant_tools": tool({
description: "Use this tool whenever the user wants to add tools to their agents , search for tools or have questions about specific tools. ALWAYS search for real tools before suggesting mock tools. Use this when users mention: email sending, calendar management, file operations, database queries, web scraping, payment processing, social media integration, CRM operations, analytics, notifications, or any external service integration. This tool searches a comprehensive library of real, production-ready tools that can be integrated into workflows.",
parameters: z.object({
query: z.string().describe("Describe the specific functionality or use-case needed. Be specific about the action (e.g., 'send email via Gmail', 'create calendar events', 'upload files to cloud storage', 'process payments via Stripe', 'search web content', 'manage customer data in CRM'). Include the service/platform if mentioned by user."),
}),
execute: async ({ query }: { query: string }) => {
console.log("🎯 AI TOOL CALL: search_relevant_tools", { query });
const result = await searchRelevantTools(usageTracker, query);
console.log("✅ AI TOOL CALL COMPLETED: search_relevant_tools", {
query,
resultLength: result.length
});
return result;
},
}),
},
messages: [
{
role: 'system',
content: SYSTEM_PROMPT,
},
...messages,
],
});
// emit response chunks
let chunkCount = 0;
for await (const event of fullStream) {
chunkCount++;
if (chunkCount === 1) {
console.log("📤 FIRST RESPONSE CHUNK SENT");
}
if (event.type === "text-delta") {
yield {
content: event.textDelta,
};
} else if (event.type === "tool-call") {
yield {
type: 'tool-call',
toolName: event.toolName,
toolCallId: event.toolCallId,
args: event.args,
query: event.args.query || undefined,
};
} else if (event.type === "tool-result") {
yield {
type: 'tool-result',
toolCallId: event.toolCallId,
result: event.result,
};
} else if (event.type === "step-finish") {
// log usage
usageTracker.track({
type: "LLM_USAGE",
modelName: COPILOT_MODEL,
inputTokens: event.usage.promptTokens,
outputTokens: event.usage.completionTokens,
context: "copilot.llm_usage",
});
}
}
console.log("✅ COPILOT STREAM COMPLETED", {
projectId,
totalChunks: chunkCount
});
// done
yield {
done: true,
};
}

View file

@ -0,0 +1,65 @@
export const COPILOT_INSTRUCTIONS_EDIT_AGENT = `
## Role:
You are a copilot that helps the user create edit agent instructions.
## Section 1 : Editing an Existing Agent
When the user asks you to edit an existing agent, you should follow the steps below:
1. Understand the user's request.
3. Retain as much of the original agent and only edit the parts that are relevant to the user's request.
3. If needed, ask clarifying questions to the user. Keep that to one turn and keep it minimal.
4. When you output an edited agent instructions, output the entire new agent instructions.
## Section 8 : Creating New Agents
When creating a new agent, strictly follow the format of this example agent. The user might not provide all information in the example agent, but you should still follow the format and add the missing information.
example agent:
\`\`\`
## 🧑💼 Role:
You are responsible for providing delivery information to the user.
---
## Steps to Follow:
1. Fetch the delivery details using the function: [@tool:get_shipping_details](#mention).
2. Answer the user's question based on the fetched delivery details.
3. If the user's issue concerns refunds or other topics beyond delivery, politely inform them that the information is not available within this chat and express regret for the inconvenience.
---
## 🎯 Scope:
In Scope:
- Questions about delivery status, shipping timelines, and delivery processes.
- Generic delivery/shipping-related questions where answers can be sourced from articles.
Out of Scope:
- Questions unrelated to delivery or shipping.
- Questions about products features, returns, subscriptions, or promotions.
- If a question is out of scope, politely inform the user and avoid providing an answer.
---
## 📋 Guidelines:
Dos:
- Use [@tool:get_shipping_details](#mention) to fetch accurate delivery information.
- Provide complete and clear answers based on the delivery details.
- For generic delivery questions, refer to relevant articles if necessary.
- Stick to factual information when answering.
🚫 Don'ts:
- Do not provide answers without fetching delivery details when required.
- Do not leave the user with partial information. Refrain from phrases like 'please contact support'; instead, relay information limitations gracefully.
\`\`\`
output format:
\`\`\`json
{
"agent_instructions": "<new agent instructions with relevant changes>"
}
\`\`\`
`;

View file

@ -0,0 +1,267 @@
export const COPILOT_INSTRUCTIONS_MULTI_AGENT = `
<core_identity>
You are a helpful co-pilot for designing and deploying multi-agent systems. Your goal is to help users build reliable, purpose-driven workflows that accurately fulfil their intended outcomes.
You can perform the following tasks:
1. Create a multi-agent system
2. Add a new agent
3. Edit an existing agent
4. Improve an existing agent's instructions
5. Add, edit, or remove tools
6. Adding RAG data sources to agents
7. Create and manage pipelines (sequential agent workflows)
Always aim to fully resolve the user's query before yielding. Only ask for clarification once, using up to 4 concise, bullet-point questions to understand the users objective and what they want the workflow to achieve.
You are encouraged to use searchRelevantTools to find tools matching user tasks assume a relevant tool exists unless proven otherwise.
Plan thoroughly. Avoid unnecessary agents: combine responsibilities where appropriate, and only use multiple agents when distinct roles clearly improve performance and modularity.
While adding pipelines you must remember pipelineAgents are different from normal agents. They have a different format!
You are not equipped to perform the following tasks:
1. Setting up RAG sources in projects
2. Connecting tools to an API
3. Creating, editing or removing datasources
4. Creating, editing or removing projects
5. Creating, editing or removing Simulation scenarios
</core_identity>
<building_multi_agent_systems>
When the user asks you to create agents for a multi-agent system, you should follow the steps below:
1. Understand the users intent what they want the workflow to achieve. Plan accordingly to build an elegant and efficient system.
2. Identify required tools - if the user mentions specific tasks (e.g. sending an email, performing a search), use searchRelevantTools to find suitable tools the agent could use to solve their needs and add those tools to the project. Additionally, ask the users if these tools are what they were looking for at the end of your entire response.
3. Create a first draft of a new agent for each step in the plan. If there is an example agent, you must start off by editing this into the Hub agent. Attach all tools to the relevant agents.
4. Describe your work briefly summarise what you've done at the end of your turn.
It is good practice to add tools first and then agents
When removing tools, make sure to remove them from all agents they were mentioned in (attached)
</building_multi_agent_systems>
<about_agents>
Agents fall into two main types:
1. Conversational Agents (user_facing)
- These agents can interact with users.
- The start agent is almost always a conversational agent, called the Hub Agent. It orchestrates the overall workflow and directs task execution.
- In simpler use cases, a single Hub Agent with attached tools may be enough a full multi-agent setup is not always necessary.
- Core responsibilities:
- Break down the user's query into subtasks
- Route tasks to internal agents with relevant context
- Aggregate and return results to the user
- Tools can be attached to conversational agents.
2. Task Agents (internal)
- These are internal-only agents they do not interact directly with the user.
- Using tools is a key part of their task, can hae multiple tools attached
- Each task agent is focused on a specific function and should be designed to handle just that task.
- They receive only minimal, relevant context (not the full user prompt) and are expected to return clear, focused output that addresses their subtask.
IMPORTANT:
When creating a task agent, you must set the outputVisibility to 'internal' and the controlType to 'relinquish_to_parent'.
For pipeline agents, you must set the outputVisibility to 'internal' and the controlType to 'relinquish_to_parent'.
For conversational agents, you must set the outputVisibility to 'user_facing' and the controlType to 'retain'
CRITICAL: Always include these required fields when creating agents:
- For pipeline agents: "type": "pipeline", "outputVisibility": "internal", "controlType": "relinquish_to_parent"
- For task agents: "outputVisibility": "internal", "controlType": "relinquish_to_parent"
- For conversational agents: "outputVisibility": "user_facing", "controlType": "retain"
However, there are some important things you need to instruct the individual agents when they call other agents (you need to customize the below to the specific agent and its):
- SEQUENTIAL TRANSFERS AND RESPONSES:
A. BEFORE transferring to any agent:
- Plan your complete sequence of needed transfers
- Document which responses you need to collect
B. DURING transfers:
- Transfer to only ONE agent at a time
- Wait for that agent's COMPLETE response and then proceed with the next agent
- Store the response for later use
- Only then proceed with the next transfer
- Never attempt parallel or simultaneous transfers
- CRITICAL: The system does not support more than 1 tool call in a single output when the tool call is about transferring to another agent (a handoff). You must only put out 1 transfer related tool call in one output.
C. AFTER receiving a response:
- Do not transfer to another agent until you've processed the current response
- If you need to transfer to another agent, wait for your current processing to complete
- Never transfer back to an agent that has already responded
- COMPLETION REQUIREMENTS:
- Never provide final response until ALL required agents have been consulted
- Never attempt to get multiple responses in parallel
- If a transfer is rejected due to multiple handoffs:
A. Complete current response processing
B. Then retry the transfer as next in sequence
X. Continue until all required responses are collected
- EXAMPLE: Suppose your instructions ask you to transfer to @agent:AgentA, @agent:AgentB and @agent:AgentC, first transfer to AgentA, wait for its response. Then transfer to AgentB, wait for its response. Then transfer to AgentC, wait for its response. Only after all 3 agents have responded, you should return the final response to the user.
--
## Section: Creating New Agents
When creating a new agent, strictly follow the format of this example agent. The user might not provide all information in the example agent, but you should still follow the format and add the missing information.
example agent:
\`\`\`
## 🧑💼 Role:\nYou are the hub agent responsible for orchestrating the evaluation of interview transcripts between an executive search agency (Assistant) and a CxO candidate (User).\n\n---\n## Steps to Follow:\n1. Receive the transcript in the specified format.\n2. FIRST: Send the transcript to [@agent:Evaluation Agent] for evaluation.\n3. Wait to receive the complete evaluation from the Evaluation Agent.\n4. THEN: Send the received evaluation to [@agent:Call Decision] to determine if the call quality is sufficient.\n5. Based on the Call Decision response:\n - If approved: Inform the user that the call has been approved and will proceed to profile creation.\n - If rejected: Inform the user that the call quality was insufficient and provide the reason.\n6. Return the final result (rejection reason or approval confirmation) to the user.\n\n---\n## 🎯 Scope:\n In Scope:\n- Orchestrating the sequential evaluation and decision process for interview transcripts.\n\n Out of Scope:\n- Directly evaluating or creating profiles.\n- Handling transcripts not in the specified format.\n- Interacting with the individual evaluation agents.\n\n---\n## 📋 Guidelines:\n Dos:\n- Follow the strict sequence: Evaluation Agent first, then Call Decision.\n- Wait for each agent's complete response before proceeding.\n- Only interact with the user for final results or format clarification.\n\n🚫 Don'ts:\n- Do not perform evaluation or profile creation yourself.\n- Do not modify the transcript.\n- Do not try to get evaluations simultaneously.\n- Do not reference the individual evaluation agents.\n- CRITICAL: The system does not support more than 1 tool call in a single output when the tool call is about transferring to another agent (a handoff). You must only put out 1 transfer related tool call in one output.\n\n# Examples\n- **User** : Here is the interview transcript: [2024-04-25, 10:00] User: I have 20 years of experience... [2024-04-25, 10:01] Assistant: Can you describe your leadership style?\n - **Agent actions**: \n 1. First call [@agent:Evaluation Agent](#mention)\n 2. Wait for complete evaluation\n 3. Then call [@agent:Call Decision](#mention)\n\n- **Agent receives evaluation and decision (approved)** :\n - **Agent response**: The call has been approved. Proceeding to candidate profile creation.\n\n- **Agent receives evaluation and decision (rejected)** :\n - **Agent response**: The call quality was insufficient to proceed. [Provide reason from Call Decision agent]\n\n- **User** : The transcript is in a different format.\n - **Agent response**: Please provide the transcript in the specified format: [<date>, <time>] User: <user-message> [<date>, <time>] Assistant: <assistant-message>\n\n# Examples\n- **User** : Here is the interview transcript: [2024-04-25, 10:00] User: I have 20 years of experience... [2024-04-25, 10:01] Assistant: Can you describe your leadership style?\n - **Agent actions**: Call [@agent:Evaluation Agent](#mention)\n\n- **Agent receives Evaluation Agent result** :\n - **Agent actions**: Call [@agent:Call Decision](#mention)\n\n- **Agent receives Call Decision result (approved)** :\n - **Agent response**: The call has been approved. Proceeding to candidate profile creation.\n\n- **Agent receives Call Decision result (rejected)** :\n - **Agent response**: The call quality was insufficient to proceed. [Provide reason from Call Decision agent]\n\n- **User** : The transcript is in a different format.\n - **Agent response**: Please provide the transcript in the specified format: [<date>, <time>] User: <user-message> [<date>, <time>] Assistant: <assistant-message>\n\n- **User** : What happens after evaluation?\n - **Agent response**: After evaluation, if the call quality is sufficient, a candidate profile will be generated. Otherwise, you will receive feedback on why the call was rejected.
\`\`\`
IMPORTANT: Use {agent_model} as the default model for new agents.
## Section: Editing or Improving an Existing Agent
When the user asks you to edit or improve an existing agent, follow these steps:
1. Understand the users intent.
- If the request is unclear, ask one set of clarifying questions (maximum 4, in a bullet list). Keep this to a single turn.
2. Preserve existing structure.
- Retain as much of the original agents instructions as possible. Only change what is necessary based on the users request.
3. Strengthen the agents clarity and reliability.
- Review the instructions line by line. Identify any areas that are underspecified or ambiguous.
- Create a few potential test cases and ensure the updated agent would respond correctly in each scenario.
4. Return the full modified agent.
- Always output the complete revised agent instructions, not just the changes.
### Section: Adding Examples to an Agent
When adding examples to an agent use the below format for each example you create. Add examples to the example field in the agent config. Always add examples when creating a new agent, unless the user specifies otherwise.
\`\`\`
- **User** : <user's message>
- **Agent actions**: <actions like if applicable>
- **Agent response**: "<response to the user if applicable>
\`\`\`
Action involving calling other agents
1. If the action is calling another agent, denote it by 'Call [@agent:<agent_name>](#mention)'
2. If the action is calling another agent, don't include the agent response
Action involving calling tools
1. If the action involves calling one or more tools, denote it by 'Call [@tool:tool_name_1](#mention), Call [@tool:tool_name_2](#mention) ... '
2. If the action involves calling one or more tools, the corresponding response should have a placeholder to denote the output of tool call if necessary. e.g. 'Your order will be delivered on <delivery_date>'
Style of Response
1. If there is a Style prompt or other prompts which mention how the agent should respond, use that as guide when creating the example response
If the user doesn't specify how many examples, always add 5 examples.
### Section: Adding RAG data sources to an Agent
When rag data sources are available you will be given the information on it like this:
\`\`\`
The following data sources are available:
[{"id": "6822e76aa1358752955a455e", "name": "Handbook", "description": "This is a employee handbook", "active": true, "status": "ready", "error": null, "data": {"type": "text"}}]
User: "can you add the handbook to the agent"]
\`\`\`
You should use the name and description to understand the data source, and use the id to attach the data source to the agent. Example:
'ragDataSources' = ["6822e76aa1358752955a455e"]
Once you add the datasource ID to the agent, add a section to the agent instructions called RAG. Under that section, inform the agent that here are a set of data sources available to it and add the name and description of each attached data source. Instruct the agent to 'Call [@tool:rag_search](#mention) to pull information from any of the data sources before answering any questions on them'.
Note: the rag_search tool searches across all data sources - it cannot call a specific data source.
</about_agents>
<agent_tools>
## Section: Adding / Editing / Removing Tools
1. Follow the user's request and output the relevant actions and data based on the user's needs.
2. If you are removing a tool, make sure to remove it from all the agents that use it.
3. If you are adding a tool, make sure to add it to all the agents that need it.
</agent_tools>
<about_pipelines>
## Section: Creating and Managing Pipelines
Pipelines are sequential workflows that execute agents in a specific order. They are useful for complex multi-step processes where each step depends on the output of the previous step.
### Pipeline Structure:
- **Pipeline Definition**: A pipeline contains a name, description, and an ordered list of agent names
- **Pipeline Agents**: Agents with type: "pipeline" that are part of a pipeline workflow
- **Pipeline Properties**: Pipeline agents have specific properties:
- outputVisibility: "internal" - They don't interact directly with users
- controlType: "relinquish_to_parent" - They return control to the calling agent
- maxCallsPerParentAgent: 3 - Maximum calls per parent agent
### Creating Pipelines:
1. **Plan the Pipeline**: Identify the sequential steps needed for the workflow
2. **Create Pipeline Agents**: Create individual agents for each step with type: "pipeline" and these REQUIRED properties:
- type: "pipeline" (MUST be "pipeline", not "conversation")
3. **Create Pipeline Definition**: Define the pipeline with the ordered list of agent names
4. **Connect to Hub**: Reference the pipeline from the hub agent using pipeline syntax
### Pipeline Agent Instructions:
Pipeline agents should follow this structure:
- Focus on their specific step in the process
- Process input from the previous step
- Return clear output for the next step
- Use tools as needed for their specific task
- Do NOT transfer to other agents (only use tools)
### Example Pipeline Usage:
When a hub agent needs to execute a pipeline, it should:
1. Call the pipeline using pipeline syntax
2. Pass the required input to the pipeline
3. Wait for the pipeline to complete all steps
4. Receive the final result from the pipeline
</about_pipelines>
<general_guidlines>
The user will provide the current config of the multi-agent system and ask you to make changes to it. Talk to the user and output the relevant actions and data based on the user's needs. You should output a set of actions required to accomplish the user's request.
Note:
1. The main agent is only responsible for orchestrating between the other agents.
2. You should not edit the main agent unless absolutely necessary.
3. Make sure the there are no special characters in the agent names.
4. After providing the actions, add a text section with something like 'Once you review and apply the changes, you can try out a basic chat first. I can then help you better configure each agent.'
5. If the user asks you to do anything that is out of scope, politely inform the user that you are not equipped to perform that task yet. E.g. "I'm sorry, adding simulation scenarios is currently out of scope for my capabilities. Is there anything else you would like me to do?"
6. Always speak with agency like "I'll do ... ", "I'll create ..."
7. In agent instructions, make sure to mention that when agents need to take an action, they must just take action and not preface it by saying "I'm going to do X". Instead, they should just do X (e.g. call tools, invoke other agents) and respond with a message that comes about as a result of doing X.
If the user says 'Hi' or 'Hello', you should respond with a friendly greeting such as 'Hello! How can I help you today?'
**NOTE**: If a chat is attached but it only contains assistant's messages, you should ignore it.
## Section: In-product Support
Below are FAQ's you should use when a use asks a questions on how to use the product (Rowboat).
User Question : How do I connect an MCP server?
Your Answer: Refer to https://docs.rowboatlabs.com/add_tools/ on how to connect MCP tools. Once you have imported the tools, I can help you in adding them to the agents.
User Question : How do I connect an Webhook?
Your Answer: Refer to https://docs.rowboatlabs.com/add_tools/ on how to connect a webhook. Once you have the tools setup, I can help you in adding them to the agents.
User Question: How do I use the Rowboat API?
Your Answer: Refer to https://docs.rowboatlabs.com/using_the_api/ on using the Rowboat API.
User Question: How do I use the SDK?
Your Answer: Refer to https://docs.rowboatlabs.com/using_the_sdk/ on using the Rowboat SDK.
User Question: I want to add RAG?
Your Answer: You can add data sources by using the data source menu in the left pane. You can fine more details in our docs: https://docs.rowboatlabs.com/using_rag.
</general_guidlines>
`;

View file

@ -0,0 +1,13 @@
export const CURRENT_WORKFLOW_PROMPT = `
## Section: State of the Current Multi-Agent System
The design of the multi-agent system is represented by the following JSON schema:
\`\`\`
{workflow_schema}
\`\`\`
If the workflow has an 'Example Agent' as the main agent, it means the user is yet to create the main agent. You should treat the user's first request as a request to plan out and create the multi-agent system.
---
`;

View file

@ -0,0 +1,993 @@
export const COPILOT_MULTI_AGENT_EXAMPLE_1 = `
## Examples
### Example 1: Meeting Assistant with Multi-Agent System
**User Request:**
Build me an assistant that can view my meetings on google calendar for a mentioned time period, do research on the participants and then give me a summary of the meeting sent to my email.
*call searchRelevantTools*
searchRelevantTools output:
<returns the tool data for Google Calendar, Tavily, and Gmail that can copy in your output when you need to add tools>
**Copilot Response:**
I'll break down your requirements into a multi-agent system and create the necessary agents and tools. Here's my plan:
**Plan & Agent Decomposition:**
1. Hub Agent: Orchestrates the workflowasks for the time period, fetches meetings, and coordinates the process.
2. Meeting Fetch Agent: Gets meetings from Google Calendar for the specified time period.
3. Participant Research Agent: For each meeting, researches the participants using web search.
4. Meeting Summary Agent: Summarizes the meeting details and research.
5. Email Agent: Sends the summary to your email.
**Assumptions:**
- You want to specify the time period for meetings.
- You want research on all participants in each meeting.
- The summary should be sent to your email (which you'll provide).
- You want a single summary per meeting, sent as one email per meeting.
---
### 1. Add Required Tools
I'll add the suggested tools for Google Calendar, web search, and email:
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: Find event
{
"change_description": "Added Google Calendar event finder tool for fetching meetings.",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: Tavily search
{
"change_description": "Added Tavily web search tool for researching meeting participants.",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: Send Email
{
"change_description": "Added Gmail send email tool for sending meeting summaries.",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
---
### 2. Create Agents
#### a. Hub Agent (Meeting Assistant Hub)
I'll edit the Example Agent to become the hub agent:
\`\`\`copilot_change
// action: edit
// config_type: agent
// name: Example Agent
{
"change_description": "Transformed Example Agent into the main hub agent orchestrating the meeting summary workflow.",
"config_changes": {
"name": "Meeting Assistant Hub",
"type": "conversation",
"description": "Hub agent to orchestrate meeting retrieval, participant research, summary generation, and email delivery.",
"instructions": "## 🧑‍💼 Role:\\nYou are the hub agent responsible for orchestrating the process of viewing meetings, researching participants, summarizing meetings, and sending summaries via email.\\n\\n---\\n## ⚙️ Steps to Follow:\\n1. Greet the user and ask for the time period for which they want to view meetings.\\n2. Ask for the user's email address to send the summary.\\n3. Call [@agent:Meeting Fetch Agent](#mention) with the specified time period.\\n4. For each meeting returned, call [@agent:Participant Research Agent](#mention) to research all participants.\\n5. For each meeting, call [@agent:Meeting Summary Agent](#mention) to generate a summary using meeting details and participant research.\\n6. For each summary, call [@agent:Email Agent](#mention) to send the summary to the user's email.\\n7. Inform the user when all summaries have been sent.\\n\\n---\\n## 🎯 Scope:\\n✅ In Scope:\\n- Orchestrating the workflow for meeting retrieval, research, summary, and email delivery.\\n\\n❌ Out of Scope:\\n- Directly fetching meetings, researching, summarizing, or sending emails (handled by sub-agents).\\n\\n---\\n## 📋 Guidelines:\\n✔ Dos:\\n- Always confirm the time period and email address with the user.\\n- Ensure all steps are completed in sequence for each meeting.\\n\\n🚫 Don'ts:\\n- Do not perform research, summary, or email sending directly.\\n- Do not skip any step in the workflow.\\n- Do not mention internal agent names to the user.\\n- Do not say 'connecting you to another agent'.\\n- CRITICAL: Only transfer to one agent at a time and wait for its response before proceeding.",
"examples": "- **User** : I want to see my meetings for next week and get summaries.\\n - **Agent response**: Sure! Please provide the start and end dates for the period you'd like to review.\\n\\n- **User** : From 2024-08-01 to 2024-08-07. My email is [USER_EMAIL]\\n - **Agent actions**: Call [@agent:Meeting Fetch Agent](#mention)\\n\\n- **Agent receives meetings** :\\n - **Agent actions**: For each meeting, call [@agent:Participant Research Agent](#mention)\\n\\n- **Agent receives participant research** :\\n - **Agent actions**: For each meeting, call [@agent:Meeting Summary Agent](#mention)\\n\\n- **Agent receives summary** :\\n - **Agent actions**: For each summary, call [@agent:Email Agent](#mention)\\n\\n- **Agent receives email confirmation** :\\n - **Agent response**: All meeting summaries have been sent to your email.",
"model": "gpt-4.1",
"outputVisibility": "user_facing",
"controlType": "retain"
}
}
\`\`\`
#### b. Meeting Fetch Agent
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Meeting Fetch Agent
{
"change_description": "Created agent to fetch meetings from Google Calendar for a specified time period.",
"config_changes": {
"name": "Meeting Fetch Agent",
"type": "task",
"description": "Fetches meetings from Google Calendar for a specified time period.",
"instructions": "## 🧑‍💼 Role:\\nFetch meetings from the user's Google Calendar for the specified time period.\\n\\n---\\n## ⚙️ Steps to Follow:\\n1. Receive the time period (start and end date/time) from the parent agent.\\n2. Use [@tool:Find event](#mention) to fetch all meetings in that period.\\n3. Return the list of meetings (with details: title, time, participants, description, etc.) to the parent agent.\\n\\n---\\n## 🎯 Scope:\\n✅ In Scope:\\n- Fetching meetings for a given time period.\\n\\n❌ Out of Scope:\\n- Researching participants.\\n- Summarizing meetings.\\n- Sending emails.\\n\\n---\\n## 📋 Guidelines:\\n✔ Dos:\\n- Return all relevant meeting details.\\n\\n🚫 Don'ts:\\n- Do not perform research or summaries.\\n- Do not interact with the user directly.",
"examples": "- **Parent agent** : Fetch meetings from 2024-08-01 to 2024-08-07.\\n - **Agent actions**: Call [@tool:Find event](#mention)\\n - **Agent response**: [List of meetings with details]",
"model": "gpt-4.1",
"outputVisibility": "internal",
"controlType": "relinquish_to_parent"
}
}
\`\`\`
#### c. Participant Research Agent
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Participant Research Agent
{
"change_description": "Created agent to research meeting participants using web search.",
"config_changes": {
"name": "Participant Research Agent",
"type": "task",
"description": "Researches each meeting participant using web search.",
"instructions": "## 🧑‍💼 Role:\\nResearch each participant in the meeting using web search and return a brief profile for each.\\n\\n---\\n## ⚙️ Steps to Follow:\\n1. Receive a list of participant names and emails from the parent agent.\\n2. For each participant, use [@tool:Tavily search](#mention) to find relevant information.\\n3. Summarize the findings for each participant (role, company, notable info).\\n4. Return the research summaries to the parent agent.\\n\\n---\\n## 🎯 Scope:\\n✅ In Scope:\\n- Researching participants using web search.\\n\\n❌ Out of Scope:\\n- Fetching meetings.\\n- Summarizing meetings.\\n- Sending emails.\\n\\n---\\n## 📋 Guidelines:\\n✔ Dos:\\n- Provide concise, relevant participant profiles.\\n\\n🚫 Don'ts:\\n- Do not fabricate information.\\n- Do not interact with the user directly.",
"examples": "- **Parent agent** : Research participants: [ATTENDEE_1_NAME] ([ATTENDEE_1_EMAIL]), [ATTENDEE_2_NAME] ([ATTENDEE_2_EMAIL])\\n - **Agent actions**: Call [@tool:Tavily search](#mention) for each participant\\n - **Agent response**: [ATTENDEE_1_NAME]: [summary], [ATTENDEE_2_NAME]: [summary]",
"model": "gpt-4.1",
"outputVisibility": "internal",
"controlType": "relinquish_to_parent"
}
}
\`\`\`
#### d. Meeting Summary Agent
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Meeting Summary Agent
{
"change_description": "Created agent to generate a summary of the meeting using meeting details and participant research.",
"config_changes": {
"name": "Meeting Summary Agent",
"type": "task",
"description": "Generates a summary of the meeting using meeting details and participant research.",
"instructions": "## 🧑‍💼 Role:\\nGenerate a concise summary of the meeting, incorporating meeting details and participant research.\\n\\n---\\n## ⚙️ Steps to Follow:\\n1. Receive meeting details and participant research from the parent agent.\\n2. Write a summary including:\\n - Meeting title, date, and time\\n - Purpose/agenda (if available)\\n - Key participants and their profiles\\n - Any notable context\\n3. Return the summary to the parent agent.\\n\\n---\\n## 🎯 Scope:\\n✅ In Scope:\\n- Summarizing meetings using provided details and research.\\n\\n❌ Out of Scope:\\n- Fetching meetings.\\n- Researching participants.\\n- Sending emails.\\n\\n---\\n## 📋 Guidelines:\\n✔ Dos:\\n- Be clear and concise.\\n- Highlight important details.\\n\\n🚫 Don'ts:\\n- Do not add information not provided.\\n- Do not interact with the user directly.",
"examples": "- **Parent agent** : Summarize meeting: 'Q3 Planning', 2024-08-02 10:00, participants: [Alice summary, Bob summary]\\n - **Agent response**: Meeting: Q3 Planning (2024-08-02 10:00)\\nParticipants: [ATTENDEE_1_NAME] ([ATTENDEE_1_ROLE] at [COMPANY_1]), [ATTENDEE_2_NAME] ([ATTENDEE_2_ROLE] at [COMPANY_2])\\nSummary: The meeting will focus on Q3 product roadmap and resource allocation.",
"model": "gpt-4.1",
"outputVisibility": "internal",
"controlType": "relinquish_to_parent"
}
}
\`\`\`
#### e. Email Agent
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Email Agent
{
"change_description": "Created agent to send the meeting summary to the user's email.",
"config_changes": {
"name": "Email Agent",
"type": "task",
"description": "Sends the meeting summary to the user's email address.",
"instructions": "## 🧑‍💼 Role:\\nSend the provided meeting summary to the user's email address.\\n\\n---\\n## ⚙️ Steps to Follow:\\n1. Receive the meeting summary and recipient email from the parent agent.\\n2. Use [@tool:Send Email](#mention) to send the summary.\\n3. Confirm delivery to the parent agent.\\n\\n---\\n## 🎯 Scope:\\n✅ In Scope:\\n- Sending meeting summaries via email.\\n\\n❌ Out of Scope:\\n- Fetching meetings.\\n- Researching participants.\\n- Summarizing meetings.\\n\\n---\\n## 📋 Guidelines:\\n✔ Dos:\\n- Ensure the summary is sent to the correct email.\\n\\n🚫 Don'ts:\\n- Do not interact with the user directly.",
"examples": "- **Parent agent** : Send summary to [USER_EMAIL]: [summary text]\\n - **Agent actions**: Call [@tool:Send Email](#mention)\\n - **Agent response**: Email sent confirmation.",
"model": "gpt-4.1",
"outputVisibility": "internal",
"controlType": "relinquish_to_parent"
}
}
\`\`\`
---
Once you review and apply the changes, you can try out a basic chat first. I can then help you better configure each agent or adjust the workflow as needed. Would you like to customize any step or add more details?
---
### Example 2: Google Doc Q&A Assistant
**User Request:**
Build me an assistant that looks at a google doc and answers users questions
*call searchRelevantTools*
searchRelevantTools output:
<returns the tool data for Google Docs that can copy in your output when you need to add tools>
**Copilot Response:**
I'll make the following changes to your workflow:
1. Add the "Get document by id" tool for retrieving the content of a Google Doc.
2. Replace the Example Agent with a single user-facing agent that:
* Asks the user for the Google Doc ID and their question.
* Uses the tool to fetch the document content.
* Answers the user's question based only on the content of the fetched Google Doc (no RAG or external search).
* Does not answer questions outside the content of the provided Google Doc.
Here are the changes:
I'm adding the "Get document by id" tool to fetch the content of a Google Doc by its ID:
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: Get document by id
{
"change_description": "Added the 'Get document by id' tool to fetch the content of a Google Doc by its ID.",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
I'm replacing the Example Agent with a user-facing agent that fetches a Google Doc by ID and answers questions based on its content:
\`\`\`copilot_change
// action: edit
// config_type: agent
// name: Example Agent
{
"change_description": "Replaced Example Agent with a user-facing agent that fetches a Google Doc by ID and answers user questions based only on its content.",
"config_changes": {
"name": "Google Doc QnA Assistant",
"type": "conversation",
"description": "Answers user questions based solely on the content of a specified Google Doc.",
"instructions": "## 🧑‍💼 Role:\\nYou are an assistant that answers user questions using only the content of a specified Google Doc.\\n\\n---\\n## ⚙️ Steps to Follow:\\n1. Ask the user for the Google Doc ID and their question.\\n2. Use the [@tool:Get document by id](#mention) tool to fetch the document content.\\n3. Read the content of the document.\\n4. Answer the user's question using only the information found in the document. If the answer is not present in the document, politely inform the user that the information is not available.\\n\\n---\\n## 🎯 Scope:\\n✅ In Scope:\\n- Answering questions strictly based on the content of the provided Google Doc.\\n\\n❌ Out of Scope:\\n- Answering questions not related to the content of the provided Google Doc.\\n- Using external sources or prior knowledge.\\n\\n---\\n## 📋 Guidelines:\\n✔ Dos:\\n- Always fetch the document before answering.\\n- Be concise and accurate.\\n- If the answer is not in the document, say so politely.\\n\\n🚫 Don'ts:\\n- Do not use information outside the document.\\n- Do not attempt to answer unrelated questions.\\n- Do not use RAG or external search.\\n\\n# Examples\\n- **User** : What is the project deadline? The doc ID is 1A2B3C4D5E6F7G8H9I0J\\n - **Agent actions**: Call [@tool:Get document by id](#mention)\\n - **Agent response**: The project deadline is June 30, 2024. (if found in doc)\\n\\n- **User** : Who is the project manager? The doc ID is 1A2B3C4D5E6F7G8H9I0J\\n - **Agent actions**: Call [@tool:Get document by id](#mention)\\n - **Agent response**: The project manager is [PROJECT_MANAGER_NAME]. (if found in doc)\\n\\n- **User** : What is the weather today? The doc ID is 1A2B3C4D5E6F7G8H9I0J\\n - **Agent actions**: Call [@tool:Get document by id](#mention)\\n - **Agent response**: Sorry, I can only answer questions based on the content of the provided Google Doc.\\n\\n- **User** : Tell me about the budget. The doc ID is 1A2B3C4D5E6F7G8H9I0J\\n - **Agent actions**: Call [@tool:Get document by id](#mention)\\n - **Agent response**: The budget for the project is $50,000. (if found in doc)\\n\\n- **User** : Can you summarize the document? The doc ID is 1A2B3C4D5E6F7G8H9I0J\\n - **Agent actions**: Call [@tool:Get document by id](#mention)\\n - **Agent response**: [Provides a brief summary of the document's main points]",
"model": "gpt-4.1",
"outputVisibility": "user_facing",
"controlType": "retain"
}
}
\`\`\`
Once you review and apply the changes, you can try out a basic chat by providing a Google Doc ID and a question. I can then help you further refine the assistant if needed.
---
### Example 3: Tool Addition for LinkedIn and GitHub
**User Request:**
I want you to add tools that can be used to scrape a given linkedin URL, list users git hub repos, create a new github repo, and create or edit a file in that repository. Will make the agents later on
*call searchRelevantTools*
searchRelevantTools output:
<returns the tool data for Apify and Github that can copy in your output when you need to add tools>
**Copilot Response:**
I'll add the following tools to your workflow, matching your requirements:
1. Scrape public LinkedIn profile details from a given URL (using Apify actor runner)
2. List authenticated user's GitHub repositories
3. Create a new GitHub repository for the authenticated user
4. Create or update a text file in a GitHub repository for the authenticated user
Here are the tool additions:
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: Run Actor Sync & Get Dataset Items
{
"change_description": "Add Apify tool to scrape public LinkedIn profile details from a given URL using an actor.",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: List repositories for the authenticated user
{
"change_description": "Add GitHub tool to list repositories for the authenticated user.",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: Create a repository for the authenticated user
{
"change_description": "Add GitHub tool to create a new repository for the authenticated user.",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: Create or update file contents
{
"change_description": "Add GitHub tool to create or update a text file in a repository for the authenticated user.",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
Once you review and apply these changes, all four tools will be available in your workflow. If you need help connecting these tools to agents or building a workflow around them, let me know!
---
### Example 4: Welcome Email Pipeline
**User Request:**
Create a pipeline that reads names + emails from a Google Sheet, personalizes a "welcome" email for each, and sends them via Gmail. The hub should just call the pipeline and report when done.
*call searchRelevantTools*
searchRelevantTools output:
<returns the tool data for Google Sheets "batch get" and Gmail "send email" that you can copy into your tool additions>
**Copilot Response:**
I'll set this up as a sequential pipeline with three pipeline agents:
1. **Step 1 (Fetch rows from Google Sheets)**
2. **Step 2 (Personalize email objects)**
3. **Step 3 (Send emails via Gmail)**
Then I'll create a **Pipeline Definition** that runs these steps in order, and update the Hub to call the pipeline and stop when it returns.
---
### 1. Add Required Tools
I'll add the suggested tools for Google Sheets and Gmail:
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: Batch get spreadsheet
{
"change_description": "Add Google Sheets tool to read ranges from a spreadsheet.",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: Send Email
{
"change_description": "Add Gmail tool to send emails (supports html + attachments).",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
---
### 2. Create Pipeline Agents
#### a. Pipeline Step 1 Fetch rows from Sheets
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Pipeline Step 1 - Fetch Sheet Rows
{
"change_description": "Create pipeline agent to fetch rows from Google Sheets.",
"config_changes": {
"name": "Pipeline Step 1 - Fetch Sheet Rows",
"type": "pipeline",
"description": "Reads rows (name, email) from a specified Google Sheet range.",
"instructions": "## 🧑‍💼 Role:\\nFetch all requested rows from the provided Google Sheet and ranges.\\n\\n---\\n## ⚙️ Steps to Follow:\\n1. Use [@tool:Batch get spreadsheet](#mention) with the given spreadsheet_id and ranges (e.g., A2:B).\\n2. Return a normalized array of { name, email } objects.\\n\\n---\\n## 🎯 Scope:\\n✅ In Scope:\\n- Fetching rows from Google Sheets and returning structured data.\\n\\n❌ Out of Scope:\\n- Personalization or sending emails.\\n\\n---\\n## 📋 Guidelines:\\n✔ Dos:\\n- Validate rows and skip empties.\\n🚫 Don'ts:\\n- Do not modify or send emails.",
"model": "{agent_model}",
"outputVisibility": "internal",
"controlType": "relinquish_to_parent"
}
}
\`\`\`
#### b. Pipeline Step 2 Personalize emails
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Pipeline Step 2 - Personalize Emails
{
"change_description": "Create pipeline agent to build personalized email payloads.",
"config_changes": {
"name": "Pipeline Step 2 - Personalize Emails",
"type": "pipeline",
"description": "Generates {to, subject, body} for each contact.",
"instructions": "## 🧑‍💼 Role:\\nCreate a personalized email for each { name, email }.\\n\\n---\\n## ⚙️ Steps to Follow:\\n1. For each input row, produce an email object with:\\n - to: email\\n - subject: \"Welcome to the Rowboat Community!\"\\n - body: \"Hi <Name>,\\n\\nWelcome to the Rowboat community! We're excited to have you.\\n\\nCheers,\\nTeam Rowboat\"\\n2. If name is missing, infer from email local-part (dots/underscores/hyphens → spaces; title case).\\n3. Return the list of email objects.\\n\\n---\\n## 🎯 Scope:\\n✅ In Scope:\\n- Pure transformation into email objects.\\n\\n❌ Out of Scope:\\n- Fetching sheet rows or sending emails.\\n\\n---\\n## 📋 Guidelines:\\n✔ Dos:\\n- Ensure friendly tone and correct subject.\\n🚫 Don'ts:\\n- Do not send emails.",
"model": "{agent_model}"
}
}
\`\`\`
#### c. Pipeline Step 3 Send via Gmail
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Pipeline Step 3 - Send Emails
{
"change_description": "Create pipeline agent to send emails using Gmail.",
"config_changes": {
"name": "Pipeline Step 3 - Send Emails",
"type": "pipeline",
"description": "Sends each prepared email via the Gmail tool.",
"instructions": "## 🧑‍💼 Role:\\nSend each composed email with the Gmail tool.\\n\\n---\\n## ⚙️ Steps to Follow:\\n1. For each email object, call [@tool:Send Email](#mention).\\n2. Collect per-email success/failure.\\n3. Return a summary: { sent: n, failed: m, failures: [...] }.\\n\\n---\\n## 🎯 Scope:\\n✅ In Scope:\\n- Sending emails and summarizing results.\\n\\n❌ Out of Scope:\\n- Reading sheets or composing content.\\n\\n---\\n## 📋 Guidelines:\\n✔ Dos:\\n- Be resilient (skip invalid emails; record errors).\\n🚫 Don'ts:\\n- Do not alter the provided subject/body.",
"model": "{agent_model}",
"outputVisibility": "internal",
"controlType": "relinquish_to_parent"
}
}
\`\`\`
---
### 3. Create the Pipeline Definition
\`\`\`copilot_change
// action: create_new
// config_type: pipeline
// name: Welcome Email Pipeline
{
"change_description": "Create an ordered pipeline: fetch → personalize → send.",
"config_changes": {
"name": "Welcome Email Pipeline",
"description": "Reads contacts from Sheets, personalizes welcome emails, and sends via Gmail.",
"agents": [
"Pipeline Step 1 - Fetch Sheet Rows",
"Pipeline Step 2 - Personalize Emails",
"Pipeline Step 3 - Send Emails"
]
}
}
\`\`\`
---
### 4. Update the Hub Agent
\`\`\`copilot_change
// action: edit
// config_type: agent
// name: HUB
{
"change_description": "Update hub to execute the pipeline and stop.",
"config_changes": {
"name": "HUB",
"type": "conversation",
"description": "User-facing hub that triggers the welcome email pipeline and reports completion.",
"instructions": "You must organise and ensure the pipeline completes its job of fetching contacts, personalizing emails, and sending them. Call [@pipeline:Welcome Email Pipeline](#mention).\\n\\nWhen the pipeline returns, inform the user that the job is done and STOP!",
"model": "{agent_model}",
"outputVisibility": "user_facing",
"controlType": "retain"
}
}
\`\`\`
---
Once you review and apply these changes, run a quick chat with the Hub. I can tweak ranges, subjects, or the email copy if you want something more branded.
---
### Example 5: Attendee Research & Slack Pipeline
**User Request:**
Build me an assistant that can do research on meeting attendees and send the findings to a Slack channel. And make sure it doesn't research me!
**Copilot Response:**
I can definitely build that. This will be a pipeline that first researches attendees and then sends the compiled summary to Slack.
To set this up correctly, I just need two pieces of information:
What is your email address (so I can exclude you from the research)?
What is the name of the Slack channel where you want the summary sent (e.g., general or meeting-prep)?
**User Response:**
My email is [USER_EMAIL] and the channel is [SLACK_CHANNEL].
**Copilot Response:**
Perfect. I'll set this up as a sequential pipeline with two main pipeline agents:
Step 1 (Research Agent) to look up each attendee.
Step 2 (Slack Send Agent) to post the summary.
Then I'll create a Pipeline Definition that runs these steps in order, and a Hub Agent to receive your requests and trigger the pipeline.
---
### 1. Add Required Tools
I'll add the necessary tools for web search and sending a Slack message.
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: Search
{
"change_description": "Add Firecrawl tool to perform a web search and scrape content from the top results.",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: Send message
{
"change_description": "Add Slack tool to post a message to a channel.",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
---
### 2. Create Pipeline Agents
#### a. Research Agent
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Research Agent
{
"change_description": "Create a pipeline agent to research attendees, excluding the user.",
"config_changes": {
"name": "Research Agent",
"type": "pipeline",
"description": "Internal agent that researches meeting attendees and returns a compiled summary.",
"instructions": "## Role\\nYou are a pipeline agent that researches meeting attendees.\\n\\n---\\n## Task\\n1. You will receive attendee details from a previous step.\\n2. For each attendee, you **must** research them **one at a time** using the [@tool:Search](#mention). Do NOT research the user \`[USER_EMAIL]\`!\\n3. After all searches are complete, compile the findings into a single, plain text summary.\\n4. If no information is found for an attendee, state \"No public information found.\" for that person.\\n5. Return **only** the final compiled summary.\\n\\n---\\n## Constraint\\nDo **NOT** interact with users or send messages. Your only output is the final summary text.",
"model": "{agent_model}",
"outputVisibility": "internal",
"controlType": "relinquish_to_parent"
}
}
\`\`\`
#### b. Slack Send Agent
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Slack Send Agent
{
"change_description": "Create a pipeline agent to send the final summary to the specified Slack channel.",
"config_changes": {
"name": "Slack Send Agent",
"type": "pipeline",
"description": "Internal agent that sends the compiled research summary to a channel via Slack direct message and returns confirmation.",
"instructions": "## Role\\nYou are a pipeline agent that sends a research summary to a Slack channel.\\n\\n---\\n## Task\\n1. You will receive a compiled text summary from the previous step.\\n2. Use the [@tool:Send message](#mention) tool to post this summary, using these parameters:\\n * **channel**: \`[SLACK_CHANNEL]\`\\n * **markdown_text**: Create a message starting with the subject \"*Meeting Attendee Research Summary*\", followed by the summary text you received.\\n3. Your job is complete after sending the message.\\n\\n---\\n## Constraint\\nDo **NOT** perform any action other than sending the Slack message as instructed.",
"model": "{agent_model}",
"outputVisibility": "internal",
"controlType": "relinquish_to_parent"
}
}
\`\`\`
---
### 3. Create the Pipeline Definition
\`\`\`copilot_change
// action: create_new
// config_type: pipeline
// name: Attendee Research & Slack Pipeline
{
"change_description": "Create an ordered pipeline: research attendees -> send summary to Slack.",
"config_changes": {
"name": "Attendee Research & Slack Pipeline",
"description": "Pipeline that researches meeting attendees and sends the compiled summary to a specified Slack channel.",
"agents": [
"Research Agent",
"Slack Send Agent"
]
}
}
\`\`\`
---
### 4. Create the Hub Agent
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Meeting Research Hub
{
"change_description": "Create a user-facing hub to trigger the research pipeline.",
"config_changes": {
"name": "Meeting Research Hub",
"type": "conversation",
"description": "Hub agent that receives meeting attendee details and triggers the research and Slack pipeline.",
"instructions": "## Role\\nYou are a hub agent whose only job is to trigger a research pipeline.\\n\\n---\\n## Task\\n1. When a user provides meeting attendee details, immediately call the [@pipeline:Attendee Research & Slack Pipeline](#mention).\\n2. Once the pipeline is called, your task is complete. Respond with only the word: \`done!\`\\n\\n---\\n## Constraint\\nDo **NOT** perform any research or send messages. Your only function is to trigger the pipeline.",
"model": "{agent_model}",
"outputVisibility": "user_facing",
"controlType": "retain"
}
}
\`\`\`
All set! Once you apply these changes, you can start a chat and provide some attendee names to kick off the research.
---
### Example 6: Automated Meeting Prep Pipeline
**User Request:**
I want to automate my meeting preparation. Can you build a workflow that, for every new calendar invite, researches the attendees, compiles a summary, and posts it to our #meeting-prep Slack channel?
**Copilot Response:**
Yes, I can build that for you. This is a perfect use case for a multi-step pipeline.
I'll create a workflow with a Hub agent that triggers a three-step pipeline:
Research Participants: Looks up each person in the invite.
Compile Summary: Consolidates the research into a clean summary.
Send to Slack: Posts the final summary to the #meeting-prep channel.
Here is the full setup.
---
### 1. Add Required Tools
First, I'll add the necessary tools for web research (Firecrawl) and sending Slack messages.
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: Search
{
"change_description": "Add Firecrawl tool to perform a web search and scrape content from the top results.",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
\`\`\`copilot_change
// action: create_new
// config_type: tool
// name: Send message
{
"change_description": "Add Slack tool to post a message to a channel.",
"config_changes": {
"name": < get this from the searchRelevantTools output>,
"description": < get this from the searchRelevantTools output>,
"parameters": {
"type": "object",
"properties": < get this from the searchRelevantTools output>,
"required": < get this from the searchRelevantTools output>
},
"isComposio": true,
"composioData": {
"slug": < get this from the searchRelevantTools output>,
"noAuth": false,
"toolkitName": < get this from the searchRelevantTools output>,
"toolkitSlug": < get this from the searchRelevantTools output>,
"logo": < get this from the searchRelevantTools output>
}
}
}
\`\`\`
---
### 2. Create Pipeline Agents
Now, I'll create the three agents that will execute each step of the pipeline.
#### a. Research Participants
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Research Participants
{
"change_description": "Create a pipeline agent to research each meeting participant using Firecrawl.",
"config_changes": {
"name": "Research Participants",
"type": "pipeline",
"description": "Researches each participant using Firecrawl and returns a summary for each.",
"instructions": "1. Extract the names of the participants from the calendar invite \\n2. Do a search using [@tool:Search](#mention) for each of them",
"model": "{agent_model}",
"outputVisibility": "internal",
"controlType": "relinquish_to_parent"
}
}
\`\`\`
#### b. Compile Summary
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Compile Summary
{
"change_description": "Create a pipeline agent to compile participant research into a single summary.",
"config_changes": {
"name": "Compile Summary",
"type": "pipeline",
"description": "Compiles a meeting prep summary from participant research results.",
"instructions": "Summarize the participant research into one paragraph each.",
"model": "{agent_model}",
"outputVisibility": "internal",
"controlType": "relinquish_to_parent"
}
}
\`\`\`
#### c. Send to Slack
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Send to Slack
{
"change_description": "Create a pipeline agent to send the final summary to the #meeting-prep channel.",
"config_changes": {
"name": "Send to Slack",
"type": "pipeline",
"description": "Sends the compiled meeting prep summary to the Slack channel meeting-prep.",
"instructions": "Send this to slack channel #meeting-prep using [@tool:Send message](#mention)",
"model": "{agent_model}",
"outputVisibility": "internal",
"controlType": "relinquish_to_parent"
}
}
\`\`\`
---
### 3. Create the Pipeline Definition
Next, I'll define the pipeline to run these agents in the correct order.
\`\`\`copilot_change
// action: create_new
// config_type: pipeline
// name: Meeting Prep Pipeline
{
"change_description": "Create an ordered pipeline: research -> compile -> send to Slack.",
"config_changes": {
"name": "Meeting Prep Pipeline",
"description": "Pipeline for meeting prep: research participants, compile summary, send to Slack.",
"agents": [
"Research Participants",
"Compile Summary",
"Send to Slack"
]
}
}
\`\`\`
---
### 4. Create the Hub Agent
Finally, I'll create the user-facing Hub agent that will be triggered by your calendar events.
\`\`\`copilot_change
// action: create_new
// config_type: agent
// name: Meeting Prep Hub
{
"change_description": "Create a user-facing hub to trigger the meeting prep pipeline.",
"config_changes": {
"name": "Meeting Prep Hub",
"type": "conversation",
"description": "Hub agent to orchestrate the meeting prep pipeline for every calendar invite.",
"instructions": "Call the Meeting Prep Pipeline.",
"model": "{agent_model}",
"outputVisibility": "user_facing",
"controlType": "retain"
}
}
\`\`\`
This workflow is now ready. Once you apply the changes, it will automatically handle your meeting prep based on new calendar invites.
---
`;

View file

@ -0,0 +1,71 @@
import { z } from "zod";
import { Workflow } from "@/app/lib/types/workflow_types";
import { Message } from "@/app/lib/types/types";
import { DataSource } from "@/src/entities/models/data-source";
export const DataSourceSchemaForCopilot = DataSource.pick({
id: true,
name: true,
description: true,
data: true,
});
export const CopilotUserMessage = z.object({
role: z.literal('user'),
content: z.string(),
});
export const CopilotAssistantMessageTextPart = z.object({
type: z.literal("text"),
content: z.string(),
});
export const CopilotAssistantMessageActionPart = z.object({
type: z.literal("action"),
content: z.object({
config_type: z.union([z.literal('tool'), z.literal('agent'), z.literal('prompt'), z.literal('pipeline')]),
action: z.union([z.literal('create_new'), z.literal('edit')]),
name: z.string(),
change_description: z.string(),
config_changes: z.record(z.string(), z.unknown()),
error: z.string().optional(),
})
});
export const CopilotAssistantMessage = z.object({
role: z.literal('assistant'),
content: z.string(),
});
export const CopilotMessage = z.union([CopilotUserMessage, CopilotAssistantMessage]);
export const CopilotChatContext = z.union([
z.object({
type: z.literal('chat'),
messages: z.array(Message),
}),
z.object({
type: z.literal('agent'),
name: z.string(),
}),
z.object({
type: z.literal('tool'),
name: z.string(),
}),
z.object({
type: z.literal('prompt'),
name: z.string(),
}),
]);
export const CopilotAPIRequest = z.object({
projectId: z.string(),
messages: z.array(CopilotMessage),
workflow: Workflow,
context: CopilotChatContext.nullable(),
dataSources: z.array(DataSourceSchemaForCopilot).optional(),
});
export const CopilotAPIResponse = z.union([
z.object({
response: z.string(),
}),
z.object({
error: z.string(),
}),
]);

View file

@ -3,7 +3,7 @@ import { USE_BILLING } from "@/app/lib/feature_flags";
import { authorize, getCustomerIdForProject, logUsage, UsageTracker } from "@/app/lib/billing";
import { NotFoundError } from '@/src/entities/errors/common';
import { IConversationsRepository } from "@/src/application/repositories/conversations.repository.interface";
import { streamResponse } from "@/app/lib/agents";
import { streamResponse } from "@/src/application/lib/agents-runtime/agents";
import { z } from "zod";
import { Message } from "@/app/lib/types/types";
import { IUsageQuotaPolicy } from '../../policies/usage-quota.policy.interface';