mirror of
https://github.com/rowboatlabs/rowboat.git
synced 2026-04-25 08:26:22 +02:00
Cli to dev (#309)
* add workspace access guidelines to instructions * updated example * removed incorrect example * add --example to add the examples from rowboat * changed --example to --sync-example * rename sync-examples option to sync-example in CLI * fix: sync-example implementation * refactor example import * fix yargs * fix: - remove changes to package-lock - remove output messages from app.js and moved them into importExample * fix: restore package-lock.json to match main (remove diff) * fix: naming of the commands * update: made import-example into import and it can import example workflows or user made workflows * update: added export capability * delete: remove misplaced podcast.json file * removed incomplete gemini3-test example json * remove: eliminate gemini3-test example from exports * Fix: better prompting around MCP config Add: copilot tool to add MCP servers * clean up prompt --------- Co-authored-by: Ramnique Singh <30795890+ramnique@users.noreply.github.com>
This commit is contained in:
parent
255bc9c48d
commit
e47518b98f
12 changed files with 1420 additions and 171 deletions
|
|
@ -2,13 +2,18 @@ import { AgentState, streamAgent } from "./application/lib/agent.js";
|
|||
import { StreamRenderer } from "./application/lib/stream-renderer.js";
|
||||
import { stdin as input, stdout as output } from "node:process";
|
||||
import fs from "fs";
|
||||
import { promises as fsp } from "fs";
|
||||
import path from "path";
|
||||
import { WorkDir, getModelConfig, updateModelConfig } from "./application/config/config.js";
|
||||
import { RunEvent } from "./application/entities/run-events.js";
|
||||
import { createInterface, Interface } from "node:readline/promises";
|
||||
import { ToolCallPart } from "./application/entities/message.js";
|
||||
import { keyof, z } from "zod";
|
||||
import { Flavor, ModelConfig } from "./application/entities/models.js";
|
||||
import { Agent } from "./application/entities/agent.js";
|
||||
import { McpServerConfig, McpServerDefinition } from "./application/entities/mcp.js";
|
||||
import { Example } from "./application/entities/example.js";
|
||||
import { z } from "zod";
|
||||
import { Flavor } from "./application/entities/models.js";
|
||||
import { examples } from "./examples/index.js";
|
||||
|
||||
export async function updateState(agent: string, runId: string) {
|
||||
const state = new AgentState(agent, runId);
|
||||
|
|
@ -412,4 +417,216 @@ function renderCurrentModel(provider: string, flavor: string, model: string) {
|
|||
console.log(`- provider: ${provider}${flavor ? ` (${flavor})` : ""}`);
|
||||
console.log(`- model: ${model}`);
|
||||
console.log("");
|
||||
}
|
||||
}
|
||||
|
||||
async function listAvailableExamples(): Promise<string[]> {
|
||||
return Object.keys(examples);
|
||||
}
|
||||
|
||||
async function writeAgents(agents: z.infer<typeof Agent>[] | undefined) {
|
||||
if (!agents) {
|
||||
return;
|
||||
}
|
||||
await fsp.mkdir(path.join(WorkDir, "agents"), { recursive: true });
|
||||
await Promise.all(
|
||||
agents.map(async (agent) => {
|
||||
const agentPath = path.join(WorkDir, "agents", `${agent.name}.json`);
|
||||
await fsp.writeFile(agentPath, JSON.stringify(agent, null, 2), "utf8");
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
async function mergeMcpServers(servers: Record<string, z.infer<typeof McpServerDefinition>>) {
|
||||
const result = { added: [] as string[], skipped: [] as string[] };
|
||||
|
||||
// Early return if no servers to process
|
||||
if (!servers || Object.keys(servers).length === 0) {
|
||||
return result;
|
||||
}
|
||||
|
||||
const configPath = path.join(WorkDir, "config", "mcp.json");
|
||||
|
||||
// Read existing config
|
||||
let currentConfig: z.infer<typeof McpServerConfig> = { mcpServers: {} };
|
||||
try {
|
||||
const contents = await fsp.readFile(configPath, "utf8");
|
||||
currentConfig = McpServerConfig.parse(JSON.parse(contents));
|
||||
} catch (error: any) {
|
||||
if (error?.code !== "ENOENT") {
|
||||
throw new Error(`Unable to read MCP config: ${error.message ?? error}`);
|
||||
}
|
||||
// File doesn't exist yet, use empty config
|
||||
}
|
||||
|
||||
// Merge servers
|
||||
for (const [name, definition] of Object.entries(servers)) {
|
||||
if (currentConfig.mcpServers[name]) {
|
||||
result.skipped.push(name);
|
||||
} else {
|
||||
currentConfig.mcpServers[name] = definition;
|
||||
result.added.push(name);
|
||||
}
|
||||
}
|
||||
|
||||
// Only write if we added new servers
|
||||
if (result.added.length > 0) {
|
||||
await fsp.mkdir(path.dirname(configPath), { recursive: true });
|
||||
await fsp.writeFile(configPath, JSON.stringify(currentConfig, null, 2), "utf8");
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
export async function importExample(exampleName?: string, filePath?: string) {
|
||||
let example: z.infer<typeof Example>;
|
||||
let sourceName: string;
|
||||
|
||||
if (exampleName) {
|
||||
// Load from built-in examples
|
||||
example = examples[exampleName];
|
||||
if (!example) {
|
||||
const availableExamples = Object.keys(examples);
|
||||
const listMessage = availableExamples.length
|
||||
? `Available examples: ${availableExamples.join(", ")}`
|
||||
: "No packaged examples are available.";
|
||||
throw new Error(`Unknown example '${exampleName}'. ${listMessage}`);
|
||||
}
|
||||
sourceName = exampleName;
|
||||
} else if (filePath) {
|
||||
// Load from file path
|
||||
try {
|
||||
const fileContent = await fsp.readFile(filePath, "utf8");
|
||||
example = Example.parse(JSON.parse(fileContent));
|
||||
sourceName = path.basename(filePath, ".json");
|
||||
} catch (error: any) {
|
||||
if (error?.code === "ENOENT") {
|
||||
throw new Error(`File not found: ${filePath}`);
|
||||
} else if (error?.name === "ZodError") {
|
||||
throw new Error(`Invalid workflow file format: ${error.message}`);
|
||||
}
|
||||
throw new Error(`Failed to read workflow file: ${error.message ?? error}`);
|
||||
}
|
||||
} else {
|
||||
throw new Error("Either exampleName or filePath must be provided");
|
||||
}
|
||||
|
||||
// Import agents and MCP servers
|
||||
await writeAgents(example.agents);
|
||||
let serverMerge = { added: [] as string[], skipped: [] as string[] };
|
||||
if (example.mcpServers) {
|
||||
serverMerge = await mergeMcpServers(example.mcpServers);
|
||||
}
|
||||
|
||||
// Build and display output message
|
||||
const importedAgents = example.agents?.map((agent) => agent.name) ?? [];
|
||||
const entryAgent = example.entryAgent ?? importedAgents[0] ?? "";
|
||||
|
||||
const output = [
|
||||
`✓ Imported workflow '${sourceName}'`,
|
||||
` Agents: ${importedAgents.join(", ")}`,
|
||||
` Primary: ${entryAgent}`,
|
||||
];
|
||||
|
||||
if (serverMerge.added.length > 0) {
|
||||
output.push(` MCP servers added: ${serverMerge.added.join(", ")}`);
|
||||
}
|
||||
if (serverMerge.skipped.length > 0) {
|
||||
output.push(` MCP servers skipped (already configured): ${serverMerge.skipped.join(", ")}`);
|
||||
}
|
||||
|
||||
console.log(output.join("\n"));
|
||||
|
||||
// Display post-install instructions if present
|
||||
if (example.instructions) {
|
||||
console.log("\n" + "=".repeat(60));
|
||||
console.log("POST-INSTALL INSTRUCTIONS");
|
||||
console.log("=".repeat(60));
|
||||
console.log(example.instructions);
|
||||
console.log("=".repeat(60) + "\n");
|
||||
}
|
||||
|
||||
// Display next steps
|
||||
console.log(`\nRun: rowboatx --agent ${entryAgent}`);
|
||||
}
|
||||
|
||||
export async function listExamples() {
|
||||
return listAvailableExamples();
|
||||
}
|
||||
|
||||
export async function exportWorkflow(entryAgentName: string) {
|
||||
const agentsDir = path.join(WorkDir, "agents");
|
||||
const mcpConfigPath = path.join(WorkDir, "config", "mcp.json");
|
||||
|
||||
// Read MCP config
|
||||
let mcpConfig: z.infer<typeof McpServerConfig> = { mcpServers: {} };
|
||||
try {
|
||||
const mcpContent = await fsp.readFile(mcpConfigPath, "utf8");
|
||||
mcpConfig = McpServerConfig.parse(JSON.parse(mcpContent));
|
||||
} catch (error: any) {
|
||||
if (error?.code !== "ENOENT") {
|
||||
throw new Error(`Failed to read MCP config: ${error.message ?? error}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively discover all agents and MCP servers
|
||||
const discoveredAgents = new Map<string, z.infer<typeof Agent>>();
|
||||
const discoveredMcpServers = new Set<string>();
|
||||
|
||||
async function discoverAgent(agentName: string) {
|
||||
if (discoveredAgents.has(agentName)) {
|
||||
return; // Already processed
|
||||
}
|
||||
|
||||
// Load agent
|
||||
const agentPath = path.join(agentsDir, `${agentName}.json`);
|
||||
let agentContent: string;
|
||||
try {
|
||||
agentContent = await fsp.readFile(agentPath, "utf8");
|
||||
} catch (error: any) {
|
||||
if (error?.code === "ENOENT") {
|
||||
throw new Error(`Agent not found: ${agentName}`);
|
||||
}
|
||||
throw new Error(`Failed to read agent ${agentName}: ${error.message ?? error}`);
|
||||
}
|
||||
|
||||
const agent = Agent.parse(JSON.parse(agentContent));
|
||||
discoveredAgents.set(agentName, agent);
|
||||
|
||||
// Process tools
|
||||
if (agent.tools) {
|
||||
for (const [toolKey, tool] of Object.entries(agent.tools)) {
|
||||
if (tool.type === "agent") {
|
||||
// Recursively discover dependent agent
|
||||
await discoverAgent(tool.name);
|
||||
} else if (tool.type === "mcp") {
|
||||
// Track MCP server
|
||||
discoveredMcpServers.add(tool.mcpServerName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start discovery from entry agent
|
||||
await discoverAgent(entryAgentName);
|
||||
|
||||
// Build MCP servers object
|
||||
const workflowMcpServers: Record<string, z.infer<typeof McpServerDefinition>> = {};
|
||||
for (const serverName of discoveredMcpServers) {
|
||||
if (mcpConfig.mcpServers[serverName]) {
|
||||
workflowMcpServers[serverName] = mcpConfig.mcpServers[serverName];
|
||||
} else {
|
||||
throw new Error(`MCP server '${serverName}' is referenced but not found in config`);
|
||||
}
|
||||
}
|
||||
|
||||
// Build workflow object
|
||||
const workflow: z.infer<typeof Example> = {
|
||||
id: entryAgentName,
|
||||
entryAgent: entryAgentName,
|
||||
agents: Array.from(discoveredAgents.values()),
|
||||
...(Object.keys(workflowMcpServers).length > 0 ? { mcpServers: workflowMcpServers } : {}),
|
||||
};
|
||||
|
||||
// Output to stdout
|
||||
console.log(JSON.stringify(workflow, null, 2));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,17 +27,27 @@ Always consult this catalog first so you load the right skills before taking act
|
|||
- Use relative paths (no \${BASE_DIR} prefixes) when running commands or referencing files.
|
||||
- Keep user data safe—double-check before editing or deleting important resources.
|
||||
|
||||
## Workspace access & scope
|
||||
- You have full read/write access inside \`${BASE_DIR}\` (this resolves to the user's \`~/.rowboat\` directory). Create folders, files, and agents there using builtin tools or allowed shell commands—don't wait for the user to do it manually.
|
||||
- If a user mentions a different root (e.g., \`~/.rowboatx\` or another path), clarify whether they meant the Rowboat workspace and propose the equivalent path you can act on. Only refuse if they explicitly insist on an inaccessible location.
|
||||
- Prefer builtin file tools (\`createFile\`, \`updateFile\`, \`deleteFile\`, \`exploreDirectory\`) for workspace changes. Reserve refusal or "you do it" responses for cases that are truly outside the Rowboat sandbox.
|
||||
|
||||
## Builtin Tools vs Shell Commands
|
||||
|
||||
**IMPORTANT**: Rowboat provides builtin tools that are internal and do NOT require security allowlist entries:
|
||||
- \`deleteFile\`, \`createFile\`, \`updateFile\`, \`readFile\` - File operations
|
||||
- \`listFiles\`, \`exploreDirectory\` - Directory exploration
|
||||
- \`analyzeAgent\` - Agent analysis
|
||||
- \`listMcpServers\`, \`listMcpTools\` - MCP server management
|
||||
- \`addMcpServer\`, \`listMcpServers\`, \`listMcpTools\` - MCP server management
|
||||
- \`loadSkill\` - Skill loading
|
||||
|
||||
These tools work directly and are NOT filtered by \`.rowboat/config/security.json\`.
|
||||
|
||||
**CRITICAL: MCP Server Configuration**
|
||||
- ALWAYS use the \`addMcpServer\` builtin tool to add or update MCP servers—it validates the configuration before saving
|
||||
- NEVER manually edit \`config/mcp.json\` using \`createFile\` or \`updateFile\` for MCP servers
|
||||
- Invalid MCP configs will prevent the agent from starting with validation errors
|
||||
|
||||
**Only \`executeCommand\` (shell/bash commands) is filtered** by the security allowlist. If you need to delete a file, use the \`deleteFile\` builtin tool, not \`executeCommand\` with \`rm\`. If you need to create a file, use \`createFile\`, not \`executeCommand\` with \`touch\` or \`echo >\`.
|
||||
|
||||
The security allowlist in \`security.json\` only applies to shell commands executed via \`executeCommand\`, not to Rowboat's internal builtin tools.
|
||||
|
|
|
|||
|
|
@ -1,22 +1,225 @@
|
|||
export const skill = String.raw`
|
||||
# MCP Integration Guidance
|
||||
|
||||
Load this skill whenever a user asks about external tools, MCP servers, or how to extend an agent’s capabilities.
|
||||
Load this skill whenever a user asks about external tools, MCP servers, or how to extend an agent's capabilities.
|
||||
|
||||
## Key concepts
|
||||
- MCP servers expose tools (web scraping, APIs, databases, etc.) declared in \`config/mcp.json\`.
|
||||
- Agents reference MCP tools through the \`"tools"\` block by specifying \`type\`, \`name\`, \`description\`, \`mcpServerName\`, and a full \`inputSchema\`.
|
||||
- Tool schemas can include optional property descriptions; only include \`"required"\` when parameters are mandatory.
|
||||
|
||||
## CRITICAL: Adding MCP Servers
|
||||
|
||||
**ALWAYS use the \`addMcpServer\` builtin tool** to add or update MCP server configurations. This tool validates the configuration before saving and prevents startup errors.
|
||||
|
||||
**NEVER manually create or edit \`config/mcp.json\`** using \`createFile\` or \`updateFile\` for MCP servers—this bypasses validation and will cause errors.
|
||||
|
||||
### MCP Server Configuration Schema
|
||||
|
||||
There are TWO types of MCP servers:
|
||||
|
||||
#### 1. STDIO (Command-based) Servers
|
||||
For servers that run as local processes (Node.js, Python, etc.):
|
||||
|
||||
**Required fields:**
|
||||
- \`command\`: string (e.g., "npx", "node", "python", "uvx")
|
||||
|
||||
**Optional fields:**
|
||||
- \`args\`: array of strings (command arguments)
|
||||
- \`env\`: object with string key-value pairs (environment variables)
|
||||
- \`type\`: "stdio" (optional, inferred from presence of \`command\`)
|
||||
|
||||
**Schema:**
|
||||
\`\`\`json
|
||||
{
|
||||
"type": "stdio",
|
||||
"command": "string (REQUIRED)",
|
||||
"args": ["string", "..."],
|
||||
"env": {
|
||||
"KEY": "value"
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**Valid STDIO examples:**
|
||||
\`\`\`json
|
||||
{
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/path/to/data"]
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
\`\`\`json
|
||||
{
|
||||
"command": "python",
|
||||
"args": ["-m", "mcp_server_git"],
|
||||
"env": {
|
||||
"GIT_REPO_PATH": "/path/to/repo"
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
\`\`\`json
|
||||
{
|
||||
"command": "uvx",
|
||||
"args": ["mcp-server-fetch"]
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
#### 2. HTTP/SSE Servers
|
||||
For servers that expose HTTP or Server-Sent Events endpoints:
|
||||
|
||||
**Required fields:**
|
||||
- \`url\`: string (complete URL including protocol and path)
|
||||
|
||||
**Optional fields:**
|
||||
- \`headers\`: object with string key-value pairs (HTTP headers)
|
||||
- \`type\`: "http" (optional, inferred from presence of \`url\`)
|
||||
|
||||
**Schema:**
|
||||
\`\`\`json
|
||||
{
|
||||
"type": "http",
|
||||
"url": "string (REQUIRED)",
|
||||
"headers": {
|
||||
"Authorization": "Bearer token",
|
||||
"Custom-Header": "value"
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**Valid HTTP examples:**
|
||||
\`\`\`json
|
||||
{
|
||||
"url": "http://localhost:3000/sse"
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
\`\`\`json
|
||||
{
|
||||
"url": "https://api.example.com/mcp",
|
||||
"headers": {
|
||||
"Authorization": "Bearer sk-1234567890"
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
### Common Validation Errors to Avoid
|
||||
|
||||
❌ **WRONG - Missing required field:**
|
||||
\`\`\`json
|
||||
{
|
||||
"args": ["some-arg"]
|
||||
}
|
||||
\`\`\`
|
||||
Error: Missing \`command\` for stdio OR \`url\` for http
|
||||
|
||||
❌ **WRONG - Empty object:**
|
||||
\`\`\`json
|
||||
{}
|
||||
\`\`\`
|
||||
Error: Must have either \`command\` (stdio) or \`url\` (http)
|
||||
|
||||
❌ **WRONG - Mixed types:**
|
||||
\`\`\`json
|
||||
{
|
||||
"command": "npx",
|
||||
"url": "http://localhost:3000"
|
||||
}
|
||||
\`\`\`
|
||||
Error: Cannot have both \`command\` and \`url\`
|
||||
|
||||
✅ **CORRECT - Minimal stdio:**
|
||||
\`\`\`json
|
||||
{
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-time"]
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
✅ **CORRECT - Minimal http:**
|
||||
\`\`\`json
|
||||
{
|
||||
"url": "http://localhost:3000/sse"
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
### Using addMcpServer Tool
|
||||
|
||||
**Example 1: Add stdio server**
|
||||
\`\`\`json
|
||||
{
|
||||
"serverName": "filesystem",
|
||||
"serverType": "stdio",
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/Users/me/data"]
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**Example 2: Add HTTP server**
|
||||
\`\`\`json
|
||||
{
|
||||
"serverName": "custom-api",
|
||||
"serverType": "http",
|
||||
"url": "https://api.example.com/mcp",
|
||||
"headers": {
|
||||
"Authorization": "Bearer token123"
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**Example 3: Add Python MCP server**
|
||||
\`\`\`json
|
||||
{
|
||||
"serverName": "github",
|
||||
"serverType": "stdio",
|
||||
"command": "python",
|
||||
"args": ["-m", "mcp_server_github"],
|
||||
"env": {
|
||||
"GITHUB_TOKEN": "ghp_xxxxx"
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
## Operator actions
|
||||
1. Use \`listMcpServers\` to enumerate configured servers.
|
||||
2. Use \`listMcpTools\` for a server to understand the available operations and schemas.
|
||||
3. Explain which MCP tools match the user’s needs before editing agent definitions.
|
||||
4. When adding a tool to an agent, document what it does and ensure the schema mirrors the MCP definition.
|
||||
2. Use \`addMcpServer\` to add or update MCP server configurations (with validation).
|
||||
3. Use \`listMcpTools\` for a server to understand the available operations and schemas.
|
||||
4. Explain which MCP tools match the user's needs before editing agent definitions.
|
||||
5. When adding a tool to an agent, document what it does and ensure the schema mirrors the MCP definition.
|
||||
|
||||
## Example snippets to reference
|
||||
- Firecrawl search (required param):
|
||||
## Adding MCP Tools to Agents
|
||||
|
||||
Once an MCP server is configured, add its tools to agent definitions:
|
||||
|
||||
### MCP Tool Format in Agent
|
||||
\`\`\`json
|
||||
"tools": {
|
||||
"descriptive_key": {
|
||||
"type": "mcp",
|
||||
"name": "actual_tool_name_from_server",
|
||||
"description": "What the tool does",
|
||||
"mcpServerName": "server_name_from_config",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"param1": {"type": "string", "description": "What param1 means"}
|
||||
},
|
||||
"required": ["param1"]
|
||||
}
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
### Tool Schema Rules
|
||||
- Use \`listMcpTools\` to get the exact \`inputSchema\` from the server
|
||||
- Copy the schema exactly as provided by the MCP server
|
||||
- Only include \`"required"\` array if parameters are truly mandatory
|
||||
- Add descriptions to help the agent understand parameter usage
|
||||
|
||||
### Example snippets to reference
|
||||
- Firecrawl search (required param):
|
||||
\`\`\`json
|
||||
"tools": {
|
||||
"search": {
|
||||
"type": "mcp",
|
||||
|
|
@ -34,8 +237,9 @@ Load this skill whenever a user asks about external tools, MCP servers, or how t
|
|||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
- ElevenLabs text-to-speech (no required array):
|
||||
\`\`\`
|
||||
\`\`\`json
|
||||
"tools": {
|
||||
"text_to_speech": {
|
||||
"type": "mcp",
|
||||
|
|
@ -52,9 +256,13 @@ Load this skill whenever a user asks about external tools, MCP servers, or how t
|
|||
}
|
||||
\`\`\`
|
||||
|
||||
|
||||
## Safety reminders
|
||||
- Only recommend MCP tools that are actually configured.
|
||||
- Clarify any missing details (required parameters, server names) before modifying files.
|
||||
- ALWAYS use \`addMcpServer\` to configure MCP servers—never manually edit config files
|
||||
- Only recommend MCP tools that are actually configured (use \`listMcpServers\` first)
|
||||
- Clarify any missing details (required parameters, server names) before modifying files
|
||||
- Test server connection with \`listMcpTools\` after adding a new server
|
||||
- Invalid MCP configs prevent agents from starting—validation is critical
|
||||
`;
|
||||
|
||||
export default skill;
|
||||
|
|
|
|||
|
|
@ -19,7 +19,45 @@ Load this skill whenever a user wants to inspect, create, or update agents insid
|
|||
3. The orchestrator calls other agents as tools when needed
|
||||
4. Data flows through tool call parameters and responses
|
||||
|
||||
## Agent format
|
||||
## Agent File Schema
|
||||
|
||||
Agent files MUST conform to this exact schema. Invalid agents will fail to load.
|
||||
|
||||
### Complete Agent Schema
|
||||
\`\`\`json
|
||||
{
|
||||
"name": "string (REQUIRED, must match filename without .json)",
|
||||
"description": "string (REQUIRED, what this agent does)",
|
||||
"instructions": "string (REQUIRED, detailed instructions for the agent)",
|
||||
"model": "string (OPTIONAL, e.g., 'gpt-5.1', 'claude-sonnet-4-5')",
|
||||
"provider": "string (OPTIONAL, provider alias from models.json)",
|
||||
"tools": {
|
||||
"descriptive_key": {
|
||||
"type": "builtin | mcp | agent (REQUIRED)",
|
||||
"name": "string (REQUIRED)",
|
||||
// Additional fields depend on type - see below
|
||||
}
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
### Required Fields
|
||||
- \`name\`: Agent identifier (must exactly match the filename without .json)
|
||||
- \`description\`: Brief description of agent's purpose
|
||||
- \`instructions\`: Detailed instructions for how the agent should behave
|
||||
|
||||
### Optional Fields
|
||||
- \`model\`: Model to use (defaults to model config if not specified)
|
||||
- \`provider\`: Provider alias from models.json (optional)
|
||||
- \`tools\`: Object containing tool definitions (can be empty or omitted)
|
||||
|
||||
### Naming Rules
|
||||
- Agent filename MUST match the \`name\` field exactly
|
||||
- Example: If \`name\` is "summariser_agent", file must be "summariser_agent.json"
|
||||
- Use lowercase with underscores for multi-word names
|
||||
- No spaces or special characters in names
|
||||
|
||||
### Agent Format Example
|
||||
\`\`\`json
|
||||
{
|
||||
"name": "agent_name",
|
||||
|
|
@ -43,9 +81,26 @@ Load this skill whenever a user wants to inspect, create, or update agents insid
|
|||
}
|
||||
\`\`\`
|
||||
|
||||
## Tool types
|
||||
## Tool Types & Schemas
|
||||
|
||||
### Builtin tools
|
||||
Tools in agents must follow one of three types. Each has specific required fields.
|
||||
|
||||
### 1. Builtin Tools
|
||||
Internal Rowboat tools (executeCommand, file operations, MCP queries, etc.)
|
||||
|
||||
**Schema:**
|
||||
\`\`\`json
|
||||
{
|
||||
"type": "builtin",
|
||||
"name": "tool_name"
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**Required fields:**
|
||||
- \`type\`: Must be "builtin"
|
||||
- \`name\`: Builtin tool name (e.g., "executeCommand", "readFile")
|
||||
|
||||
**Example:**
|
||||
\`\`\`json
|
||||
"bash": {
|
||||
"type": "builtin",
|
||||
|
|
@ -53,7 +108,42 @@ Load this skill whenever a user wants to inspect, create, or update agents insid
|
|||
}
|
||||
\`\`\`
|
||||
|
||||
### MCP tools
|
||||
**Available builtin tools:**
|
||||
- \`executeCommand\` - Execute shell commands
|
||||
- \`readFile\`, \`createFile\`, \`updateFile\`, \`deleteFile\` - File operations
|
||||
- \`listFiles\`, \`exploreDirectory\` - Directory operations
|
||||
- \`analyzeAgent\` - Analyze agent structure
|
||||
- \`addMcpServer\`, \`listMcpServers\`, \`listMcpTools\` - MCP management
|
||||
- \`loadSkill\` - Load skill guidance
|
||||
|
||||
### 2. MCP Tools
|
||||
Tools from external MCP servers (APIs, databases, web scraping, etc.)
|
||||
|
||||
**Schema:**
|
||||
\`\`\`json
|
||||
{
|
||||
"type": "mcp",
|
||||
"name": "tool_name_from_server",
|
||||
"description": "What the tool does",
|
||||
"mcpServerName": "server_name_from_config",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"param": {"type": "string", "description": "Parameter description"}
|
||||
},
|
||||
"required": ["param"]
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**Required fields:**
|
||||
- \`type\`: Must be "mcp"
|
||||
- \`name\`: Exact tool name from MCP server
|
||||
- \`description\`: What the tool does (helps agent understand when to use it)
|
||||
- \`mcpServerName\`: Server name from config/mcp.json
|
||||
- \`inputSchema\`: Full JSON Schema object for tool parameters
|
||||
|
||||
**Example:**
|
||||
\`\`\`json
|
||||
"search": {
|
||||
"type": "mcp",
|
||||
|
|
@ -70,17 +160,40 @@ Load this skill whenever a user wants to inspect, create, or update agents insid
|
|||
}
|
||||
\`\`\`
|
||||
|
||||
### Agent tools (for chaining agents)
|
||||
**Important:**
|
||||
- Use \`listMcpTools\` to get the exact inputSchema from the server
|
||||
- Copy the schema exactly—don't modify property types or structure
|
||||
- Only include \`"required"\` array if parameters are mandatory
|
||||
|
||||
### 3. Agent Tools (for chaining agents)
|
||||
Reference other agents as tools to build multi-agent workflows
|
||||
|
||||
**Schema:**
|
||||
\`\`\`json
|
||||
{
|
||||
"type": "agent",
|
||||
"name": "target_agent_name"
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**Required fields:**
|
||||
- \`type\`: Must be "agent"
|
||||
- \`name\`: Name of the target agent (must exist in agents/ directory)
|
||||
|
||||
**Example:**
|
||||
\`\`\`json
|
||||
"summariser": {
|
||||
"type": "agent",
|
||||
"name": "summariser_agent"
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
**How it works:**
|
||||
- Use \`"type": "agent"\` to call other agents as tools
|
||||
- The target agent will be invoked with the parameters you pass
|
||||
- Results are returned as tool output
|
||||
- This is how you build multi-agent workflows
|
||||
- The referenced agent file must exist (e.g., agents/summariser_agent.json)
|
||||
|
||||
## Complete Multi-Agent Workflow Example
|
||||
|
||||
|
|
@ -156,13 +269,88 @@ Load this skill whenever a user wants to inspect, create, or update agents insid
|
|||
5. **Tool naming**: Use descriptive tool keys (e.g., "summariser", "fetch_data", "analyze")
|
||||
6. **Orchestration**: Create a top-level agent that coordinates the workflow
|
||||
|
||||
## Validation & Best Practices
|
||||
|
||||
### CRITICAL: Schema Compliance
|
||||
- Agent files MUST have \`name\`, \`description\`, and \`instructions\` fields
|
||||
- Agent filename MUST exactly match the \`name\` field
|
||||
- Tools MUST have valid \`type\` ("builtin", "mcp", or "agent")
|
||||
- MCP tools MUST have all required fields: name, description, mcpServerName, inputSchema
|
||||
- Agent tools MUST reference existing agent files
|
||||
- Invalid agents will fail to load and prevent workflow execution
|
||||
|
||||
### File Creation/Update Process
|
||||
1. When creating an agent, use \`createFile\` with complete, valid JSON
|
||||
2. When updating an agent, read it first with \`readFile\`, modify, then use \`updateFile\`
|
||||
3. Validate JSON syntax before writing—malformed JSON breaks the agent
|
||||
4. Test agent loading after creation/update by using \`analyzeAgent\`
|
||||
|
||||
### Common Validation Errors to Avoid
|
||||
|
||||
❌ **WRONG - Missing required fields:**
|
||||
\`\`\`json
|
||||
{
|
||||
"name": "my_agent"
|
||||
// Missing description and instructions
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
❌ **WRONG - Filename mismatch:**
|
||||
- File: agents/my_agent.json
|
||||
- Content: {"name": "myagent", ...}
|
||||
|
||||
❌ **WRONG - Invalid tool type:**
|
||||
\`\`\`json
|
||||
"tool1": {
|
||||
"type": "custom", // Invalid type
|
||||
"name": "something"
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
❌ **WRONG - MCP tool missing required fields:**
|
||||
\`\`\`json
|
||||
"search": {
|
||||
"type": "mcp",
|
||||
"name": "firecrawl_search"
|
||||
// Missing: description, mcpServerName, inputSchema
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
✅ **CORRECT - Minimal valid agent:**
|
||||
\`\`\`json
|
||||
{
|
||||
"name": "simple_agent",
|
||||
"description": "A simple agent",
|
||||
"instructions": "Do simple tasks"
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
✅ **CORRECT - Complete MCP tool:**
|
||||
\`\`\`json
|
||||
"search": {
|
||||
"type": "mcp",
|
||||
"name": "firecrawl_search",
|
||||
"description": "Search the web",
|
||||
"mcpServerName": "firecrawl",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {"type": "string"}
|
||||
}
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
## Capabilities checklist
|
||||
1. Explore \`agents/\` directory to understand existing agents before editing
|
||||
2. Update files carefully to maintain schema validity
|
||||
3. When creating multi-agent workflows, create an orchestrator agent
|
||||
4. Add other agents as tools with \`"type": "agent"\` for chaining
|
||||
5. List and explore MCP servers/tools when users need new capabilities
|
||||
6. Confirm work done and outline next steps once changes are complete
|
||||
2. Read existing agents with \`readFile\` before making changes
|
||||
3. Validate all required fields are present before creating/updating agents
|
||||
4. Ensure filename matches the \`name\` field exactly
|
||||
5. Use \`analyzeAgent\` to verify agent structure after creation/update
|
||||
6. When creating multi-agent workflows, create an orchestrator agent
|
||||
7. Add other agents as tools with \`"type": "agent"\` for chaining
|
||||
8. Use \`listMcpServers\` and \`listMcpTools\` when adding MCP integrations
|
||||
9. Confirm work done and outline next steps once changes are complete
|
||||
`;
|
||||
|
||||
export default skill;
|
||||
|
|
|
|||
|
|
@ -12,19 +12,6 @@ let modelConfig: z.infer<typeof ModelConfig> | null = null;
|
|||
|
||||
const baseMcpConfig: z.infer<typeof McpServerConfig> = {
|
||||
mcpServers: {
|
||||
firecrawl: {
|
||||
command: "npx",
|
||||
args: ["-y", "supergateway", "--stdio", "npx -y firecrawl-mcp"],
|
||||
env: {
|
||||
FIRECRAWL_API_KEY: "fc-aaacee4bdd164100a4d83af85bef6fdc",
|
||||
},
|
||||
},
|
||||
test: {
|
||||
url: "http://localhost:3000",
|
||||
headers: {
|
||||
"Authorization": "Bearer test",
|
||||
},
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
12
apps/cli/src/application/entities/example.ts
Normal file
12
apps/cli/src/application/entities/example.ts
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
import z from "zod"
|
||||
import { Agent } from "./agent.js"
|
||||
import { McpServerDefinition } from "./mcp.js"
|
||||
|
||||
export const Example = z.object({
|
||||
id: z.string(),
|
||||
instructions: z.string().optional(),
|
||||
description: z.string().optional(),
|
||||
entryAgent: z.string().optional(),
|
||||
agents: z.array(Agent).optional(),
|
||||
mcpServers: z.record(z.string(), McpServerDefinition).optional(),
|
||||
});
|
||||
|
|
@ -1,16 +1,20 @@
|
|||
import z from "zod";
|
||||
import { z } from "zod";
|
||||
|
||||
const StdioMcpServerConfig = z.object({
|
||||
export const StdioMcpServerConfig = z.object({
|
||||
type: z.literal("stdio").optional(),
|
||||
command: z.string(),
|
||||
args: z.array(z.string()).optional(),
|
||||
env: z.record(z.string(), z.string()).optional(),
|
||||
});
|
||||
|
||||
const HttpMcpServerConfig = z.object({
|
||||
export const HttpMcpServerConfig = z.object({
|
||||
type: z.literal("http").optional(),
|
||||
url: z.string(),
|
||||
headers: z.record(z.string(), z.string()).optional(),
|
||||
});
|
||||
|
||||
export const McpServerDefinition = z.union([StdioMcpServerConfig, HttpMcpServerConfig]);
|
||||
|
||||
export const McpServerConfig = z.object({
|
||||
mcpServers: z.record(z.string(), z.union([StdioMcpServerConfig, HttpMcpServerConfig])),
|
||||
});
|
||||
mcpServers: z.record(z.string(), McpServerDefinition),
|
||||
});
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/
|
|||
import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js";
|
||||
import { Client } from "@modelcontextprotocol/sdk/client";
|
||||
import { resolveSkill, availableSkills } from "../assistant/skills/index.js";
|
||||
import { McpServerDefinition, McpServerConfig } from "../entities/mcp.js";
|
||||
|
||||
const BuiltinToolsSchema = z.record(z.string(), z.object({
|
||||
description: z.string(),
|
||||
|
|
@ -305,6 +306,118 @@ export const BuiltinTools: z.infer<typeof BuiltinToolsSchema> = {
|
|||
},
|
||||
},
|
||||
|
||||
addMcpServer: {
|
||||
description: 'Add or update an MCP server in the configuration with validation. This ensures the server definition is valid before saving.',
|
||||
inputSchema: z.object({
|
||||
serverName: z.string().describe('Name/alias for the MCP server'),
|
||||
serverType: z.enum(['stdio', 'http']).describe('Type of MCP server: "stdio" for command-based or "http" for HTTP/SSE-based'),
|
||||
command: z.string().optional().describe('Command to execute (required for stdio type, e.g., "npx", "python", "node")'),
|
||||
args: z.array(z.string()).optional().describe('Command arguments (optional, for stdio type)'),
|
||||
env: z.record(z.string(), z.string()).optional().describe('Environment variables (optional, for stdio type)'),
|
||||
url: z.string().optional().describe('HTTP/SSE endpoint URL (required for http type)'),
|
||||
headers: z.record(z.string(), z.string()).optional().describe('HTTP headers (optional, for http type)'),
|
||||
}),
|
||||
execute: async ({ serverName, serverType, command, args, env, url, headers }: {
|
||||
serverName: string;
|
||||
serverType: 'stdio' | 'http';
|
||||
command?: string;
|
||||
args?: string[];
|
||||
env?: Record<string, string>;
|
||||
url?: string;
|
||||
headers?: Record<string, string>;
|
||||
}) => {
|
||||
try {
|
||||
// Build server definition based on type
|
||||
let serverDef: any;
|
||||
if (serverType === 'stdio') {
|
||||
if (!command) {
|
||||
return {
|
||||
success: false,
|
||||
message: 'For stdio type servers, "command" is required. Example: "npx" or "python"',
|
||||
validationErrors: ['Missing required field: command'],
|
||||
};
|
||||
}
|
||||
serverDef = {
|
||||
type: 'stdio',
|
||||
command,
|
||||
...(args ? { args } : {}),
|
||||
...(env ? { env } : {}),
|
||||
};
|
||||
} else if (serverType === 'http') {
|
||||
if (!url) {
|
||||
return {
|
||||
success: false,
|
||||
message: 'For http type servers, "url" is required. Example: "http://localhost:3000/sse"',
|
||||
validationErrors: ['Missing required field: url'],
|
||||
};
|
||||
}
|
||||
serverDef = {
|
||||
type: 'http',
|
||||
url,
|
||||
...(headers ? { headers } : {}),
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
success: false,
|
||||
message: `Invalid serverType: ${serverType}. Must be "stdio" or "http"`,
|
||||
validationErrors: [`Invalid serverType: ${serverType}`],
|
||||
};
|
||||
}
|
||||
|
||||
// Validate against Zod schema
|
||||
const validationResult = McpServerDefinition.safeParse(serverDef);
|
||||
if (!validationResult.success) {
|
||||
return {
|
||||
success: false,
|
||||
message: 'Server definition failed validation. Check the errors below.',
|
||||
validationErrors: validationResult.error.issues.map((e: any) => `${e.path.join('.')}: ${e.message}`),
|
||||
providedDefinition: serverDef,
|
||||
};
|
||||
}
|
||||
|
||||
// Read existing config
|
||||
const configPath = path.join(BASE_DIR, 'config', 'mcp.json');
|
||||
let currentConfig: z.infer<typeof McpServerConfig> = { mcpServers: {} };
|
||||
try {
|
||||
const content = await fs.readFile(configPath, 'utf-8');
|
||||
currentConfig = McpServerConfig.parse(JSON.parse(content));
|
||||
} catch (error: any) {
|
||||
if (error?.code !== 'ENOENT') {
|
||||
return {
|
||||
success: false,
|
||||
message: `Failed to read existing MCP config: ${error.message}`,
|
||||
};
|
||||
}
|
||||
// File doesn't exist, use empty config
|
||||
}
|
||||
|
||||
// Check if server already exists
|
||||
const isUpdate = !!currentConfig.mcpServers[serverName];
|
||||
|
||||
// Add/update server
|
||||
currentConfig.mcpServers[serverName] = validationResult.data;
|
||||
|
||||
// Write back to file
|
||||
await fs.mkdir(path.dirname(configPath), { recursive: true });
|
||||
await fs.writeFile(configPath, JSON.stringify(currentConfig, null, 2), 'utf-8');
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: `MCP server '${serverName}' ${isUpdate ? 'updated' : 'added'} successfully`,
|
||||
serverName,
|
||||
serverType,
|
||||
isUpdate,
|
||||
configuration: validationResult.data,
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
success: false,
|
||||
message: `Failed to add MCP server: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
};
|
||||
}
|
||||
},
|
||||
},
|
||||
|
||||
listMcpServers: {
|
||||
description: 'List all available MCP servers from the configuration',
|
||||
inputSchema: z.object({}),
|
||||
|
|
|
|||
7
apps/cli/src/examples/index.ts
Normal file
7
apps/cli/src/examples/index.ts
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
import twitterPodcast from './twitter-podcast.json' with { type: 'json' };
|
||||
import { Example } from '../application/entities/example.js';
|
||||
import z from 'zod';
|
||||
|
||||
export const examples: Record<string, z.infer<typeof Example>> = {
|
||||
"twitter-podcast": Example.parse(twitterPodcast),
|
||||
};
|
||||
559
apps/cli/src/examples/twitter-podcast.json
Normal file
559
apps/cli/src/examples/twitter-podcast.json
Normal file
|
|
@ -0,0 +1,559 @@
|
|||
{
|
||||
"id": "twitter-podcast",
|
||||
"instructions": "This example workflow generates a narrated podcast episode from recent AI-related tweets using multiple agents.",
|
||||
"description": "Generates a narrated podcast episode from recent AI-related tweets using multiple agents.",
|
||||
"entryAgent": "tweet-podcast",
|
||||
"agents": [
|
||||
{
|
||||
"name": "tweet-podcast",
|
||||
"description": "An agent that will produce a podcast from recent tweets",
|
||||
"model": "gpt-5.1",
|
||||
"instructions": "You are the orchestrator for producing a short podcast episode end-to-end. Follow these steps in order and only advance once each step succeeds:\n\n1. Tweets: call the tweets workflow to collect the latest tweets, .\n\n2.Transcript creation: Provide the resulting tweets to the podcast_transcript_agent tool so it can script a ~1 minute alternating dialogue between John and Chloe that references the tweets and a balanced conversation about AI bubble.\n\n4. Audio production: Send the transcript to the elevenlabs_audio_gen tool create an audio file.",
|
||||
"tools": {
|
||||
"tweets": {
|
||||
"type": "agent",
|
||||
"name": "tweets"
|
||||
},
|
||||
"podcast_transcript_agent": {
|
||||
"type": "agent",
|
||||
"name": "podcast_transcript_agent"
|
||||
},
|
||||
"elevenlabs_audio_gen": {
|
||||
"type": "agent",
|
||||
"name": "elevenlabs_audio_gen"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "tweets",
|
||||
"description": "Checks latest tweets",
|
||||
"model": "gpt-4.1",
|
||||
"instructions": "Pulls the recent 10 recent tweets each on OpenAI, Anthropic, Nvidia, Grok, Gemini",
|
||||
"tools": {
|
||||
"search_tweets": {
|
||||
"type": "mcp",
|
||||
"name": "TWITTER_RECENT_SEARCH",
|
||||
"description": "Search recent Tweets from the last 7 days using X/Twitter's search syntax via Composio's Twitter MCP server.",
|
||||
"mcpServerName": "twitter",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search query for matching Tweets. Use X search operators like from:username, -is:retweet, -is:reply, has:media, lang:en, etc. Limited to last 7 days."
|
||||
},
|
||||
"start_time": {
|
||||
"type": "string",
|
||||
"description": "Oldest UTC timestamp (YYYY-MM-DDTHH:mm:ssZ) for results, within the last 7 days."
|
||||
},
|
||||
"end_time": {
|
||||
"type": "string",
|
||||
"description": "Newest UTC timestamp (YYYY-MM-DDTHH:mm:ssZ) for results; exclusive."
|
||||
},
|
||||
"max_results": {
|
||||
"type": "integer",
|
||||
"description": "Number of Tweets to return (up to 2000 per call).",
|
||||
"default": 10
|
||||
},
|
||||
"sort_order": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"recency",
|
||||
"relevancy"
|
||||
],
|
||||
"description": "Order of results: 'recency' (most recent first) or 'relevancy'."
|
||||
},
|
||||
"tweet_fields": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"article",
|
||||
"attachments",
|
||||
"author_id",
|
||||
"card_uri",
|
||||
"context_annotations",
|
||||
"conversation_id",
|
||||
"created_at",
|
||||
"edit_controls",
|
||||
"edit_history_tweet_ids",
|
||||
"entities",
|
||||
"geo",
|
||||
"id",
|
||||
"in_reply_to_user_id",
|
||||
"lang",
|
||||
"non_public_metrics",
|
||||
"note_tweet",
|
||||
"organic_metrics",
|
||||
"possibly_sensitive",
|
||||
"promoted_metrics",
|
||||
"public_metrics",
|
||||
"referenced_tweets",
|
||||
"reply_settings",
|
||||
"scopes",
|
||||
"source",
|
||||
"text",
|
||||
"withheld"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "Tweet fields to include in the response. Example: ['created_at','author_id','public_metrics']."
|
||||
},
|
||||
"expansions": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"article.cover_media",
|
||||
"article.media_entities",
|
||||
"attachments.media_keys",
|
||||
"attachments.media_source_tweet",
|
||||
"attachments.poll_ids",
|
||||
"author_id",
|
||||
"author_screen_name",
|
||||
"edit_history_tweet_ids",
|
||||
"entities.mentions.username",
|
||||
"entities.note.mentions.username",
|
||||
"geo.place_id",
|
||||
"in_reply_to_user_id",
|
||||
"referenced_tweets.id",
|
||||
"referenced_tweets.id.author_id"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "Expansions to hydrate related objects like users, media, polls, and places."
|
||||
},
|
||||
"media_fields": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"alt_text",
|
||||
"duration_ms",
|
||||
"height",
|
||||
"media_key",
|
||||
"non_public_metrics",
|
||||
"organic_metrics",
|
||||
"preview_image_url",
|
||||
"promoted_metrics",
|
||||
"public_metrics",
|
||||
"type",
|
||||
"url",
|
||||
"variants",
|
||||
"width"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "Media fields to include when media keys are expanded."
|
||||
},
|
||||
"place_fields": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"contained_within",
|
||||
"country",
|
||||
"country_code",
|
||||
"full_name",
|
||||
"geo",
|
||||
"id",
|
||||
"name",
|
||||
"place_type"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "Place fields to include when place IDs are expanded."
|
||||
},
|
||||
"poll_fields": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"duration_minutes",
|
||||
"end_datetime",
|
||||
"id",
|
||||
"options",
|
||||
"voting_status"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "Poll fields to include when poll IDs are expanded."
|
||||
},
|
||||
"user_fields": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"affiliation",
|
||||
"connection_status",
|
||||
"created_at",
|
||||
"description",
|
||||
"entities",
|
||||
"id",
|
||||
"location",
|
||||
"most_recent_tweet_id",
|
||||
"name",
|
||||
"pinned_tweet_id",
|
||||
"profile_banner_url",
|
||||
"profile_image_url",
|
||||
"protected",
|
||||
"public_metrics",
|
||||
"receives_your_dm",
|
||||
"subscription_type",
|
||||
"url",
|
||||
"verified",
|
||||
"verified_type",
|
||||
"withheld",
|
||||
"username"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "User fields to include when user IDs are expanded. Username is always returned by default."
|
||||
},
|
||||
"since_id": {
|
||||
"type": "string",
|
||||
"description": "Return Tweets more recent than this ID (cannot be used with start_time)."
|
||||
},
|
||||
"until_id": {
|
||||
"type": "string",
|
||||
"description": "Return Tweets older than this ID (cannot be used with end_time)."
|
||||
},
|
||||
"next_token": {
|
||||
"type": "string",
|
||||
"description": "Pagination token from a previous response's meta.next_token."
|
||||
},
|
||||
"pagination_token": {
|
||||
"type": "string",
|
||||
"description": "Alternative pagination token from a previous meta.next_token; next_token is preferred."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"query"
|
||||
],
|
||||
"additionalProperties": false
|
||||
}
|
||||
},
|
||||
"bash": {
|
||||
"type": "builtin",
|
||||
"name": "executeCommand",
|
||||
"description": "Execute bash commands to manipulate files like tweets.txt, e.g. writing search results to disk or appending logs.",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"command": {
|
||||
"type": "string",
|
||||
"description": "The bash command to execute, such as 'echo \"text\" >> tweets.txt' or 'cat tweets.txt'."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"command"
|
||||
],
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "podcast_transcript_agent",
|
||||
"description": "An agent that will generate a transcript of a podcast",
|
||||
"model": "gpt-4.1",
|
||||
"instructions": "You job is to create a NotebookLM style 1 minute podcast between 2 speakers John and Chloe. Each line should be a new speaker. The podcast should be about the contents of the two papers (that were selected). You can use [sighs], [inhales then exhales], [chuckles], [laughs], [clears throat], [coughs], [sniffs], [pauses] etc. to make the podcast more natural."
|
||||
},
|
||||
{
|
||||
"name": "elevenlabs_audio_gen",
|
||||
"description": "An agent that will generate an audio file from a text",
|
||||
"model": "gpt-4.1",
|
||||
"instructions": "Your job is to take the mutli speaker transcript and generate an audio file from it. Use the elevenlabs text to speech tool to do this. For each speaker turn, you should generate an audio file and then combine them all into a single audio file. Use the voice_name 'Liam' for John and 'Cassidy' for Chloe. Make sure to remove the speaker names from the text before generating the audio files. Use the eleven_v3 model_id. In addition, you should use the compose_music tool to generate a short musical intro and outro for the podcast. The intro should be a small 5-10 second clip modeled after popular podcasts which fades and the podcast starts. The outro should be 10-15 seconds of a related sound. Save the intro and outro to files, and then use the bash tool to stitch them with the main podcast audio so that the final output audio file starts with the intro music, then the full conversation, and ends with the outro music. Place all generated audio on the Desktop by default unless otherwise instructed. Don't wait for confirmation - go ahead and produce the podcast.",
|
||||
"tools": {
|
||||
"text_to_speech": {
|
||||
"type": "mcp",
|
||||
"name": "text_to_speech",
|
||||
"description": "Generate an audio file from a text",
|
||||
"mcpServerName": "elevenLabs",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"text": {
|
||||
"type": "string",
|
||||
"description": "The text to generate an audio file from"
|
||||
},
|
||||
"voice_name": {
|
||||
"type": "string",
|
||||
"description": "The voice name to use for the audio file"
|
||||
},
|
||||
"model_id": {
|
||||
"type": "string",
|
||||
"description": "The model id to use for the audio file"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"compose_music": {
|
||||
"type": "mcp",
|
||||
"name": "compose_music",
|
||||
"description": "Generate intro and outro music for the podcast and save as audio files",
|
||||
"mcpServerName": "elevenLabs",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"prompt": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Prompt"
|
||||
},
|
||||
"output_directory": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Output Directory"
|
||||
},
|
||||
"composition_plan": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/$defs/MusicPrompt"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null
|
||||
},
|
||||
"music_length_ms": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Music Length Ms"
|
||||
}
|
||||
},
|
||||
"$defs": {
|
||||
"MusicPrompt": {
|
||||
"additionalProperties": true,
|
||||
"properties": {
|
||||
"positive_global_styles": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Positive Global Styles",
|
||||
"type": "array"
|
||||
},
|
||||
"negative_global_styles": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Negative Global Styles",
|
||||
"type": "array"
|
||||
},
|
||||
"sections": {
|
||||
"items": {
|
||||
"$ref": "#/$defs/SongSection"
|
||||
},
|
||||
"title": "Sections",
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"positive_global_styles",
|
||||
"negative_global_styles",
|
||||
"sections"
|
||||
],
|
||||
"title": "MusicPrompt",
|
||||
"type": "object"
|
||||
},
|
||||
"SectionSource": {
|
||||
"additionalProperties": true,
|
||||
"properties": {
|
||||
"song_id": {
|
||||
"title": "Song Id",
|
||||
"type": "string"
|
||||
},
|
||||
"range": {
|
||||
"$ref": "#/$defs/TimeRange"
|
||||
},
|
||||
"negative_ranges": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"$ref": "#/$defs/TimeRange"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"title": "Negative Ranges"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"song_id",
|
||||
"range"
|
||||
],
|
||||
"title": "SectionSource",
|
||||
"type": "object"
|
||||
},
|
||||
"SongSection": {
|
||||
"additionalProperties": true,
|
||||
"properties": {
|
||||
"section_name": {
|
||||
"title": "Section Name",
|
||||
"type": "string"
|
||||
},
|
||||
"positive_local_styles": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Positive Local Styles",
|
||||
"type": "array"
|
||||
},
|
||||
"negative_local_styles": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Negative Local Styles",
|
||||
"type": "array"
|
||||
},
|
||||
"duration_ms": {
|
||||
"title": "Duration Ms",
|
||||
"type": "integer"
|
||||
},
|
||||
"lines": {
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Lines",
|
||||
"type": "array"
|
||||
},
|
||||
"source_from": {
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/$defs/SectionSource"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"section_name",
|
||||
"positive_local_styles",
|
||||
"negative_local_styles",
|
||||
"duration_ms",
|
||||
"lines"
|
||||
],
|
||||
"title": "SongSection",
|
||||
"type": "object"
|
||||
},
|
||||
"TimeRange": {
|
||||
"additionalProperties": true,
|
||||
"properties": {
|
||||
"start_ms": {
|
||||
"title": "Start Ms",
|
||||
"type": "integer"
|
||||
},
|
||||
"end_ms": {
|
||||
"title": "End Ms",
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"start_ms",
|
||||
"end_ms"
|
||||
],
|
||||
"title": "TimeRange",
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"title": "compose_musicArguments"
|
||||
}
|
||||
},
|
||||
"bash": {
|
||||
"type": "builtin",
|
||||
"name": "executeCommand"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"mcpServers": {
|
||||
"elevenLabs": {
|
||||
"command": "uvx",
|
||||
"args": [
|
||||
"elevenlabs-mcp"
|
||||
],
|
||||
"env": {
|
||||
"ELEVENLABS_API_KEY": "<your-api-key>"
|
||||
}
|
||||
},
|
||||
"calendar": {
|
||||
"type": "http",
|
||||
"url": "<composio-url>"
|
||||
},
|
||||
"twitter": {
|
||||
"type": "http",
|
||||
"url": "<composio-url>"
|
||||
}
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue