refactor example import

This commit is contained in:
Ramnique Singh 2025-11-21 11:42:57 +05:30
parent ea4159a94b
commit 97e47faca8
7 changed files with 46 additions and 78 deletions

View file

@ -4,7 +4,6 @@ import { stdin as input, stdout as output } from "node:process";
import fs from "fs";
import { promises as fsp } from "fs";
import path from "path";
import { fileURLToPath } from "url";
import { WorkDir, getModelConfig, updateModelConfig } from "./application/config/config.js";
import { RunEvent } from "./application/entities/run-events.js";
import { createInterface, Interface } from "node:readline/promises";
@ -12,12 +11,8 @@ import { ToolCallPart } from "./application/entities/message.js";
import { Agent } from "./application/entities/agent.js";
import { McpServerConfig, McpServerDefinition } from "./application/entities/mcp.js";
import { z } from "zod";
import { Flavor, ModelConfig } from "./application/entities/models.js";
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
const PackageRoot = path.resolve(__dirname, "..");
const ExamplesDir = path.join(PackageRoot, "examples");
import { Flavor } from "./application/entities/models.js";
import { examples } from "./examples/index.js";
export async function updateState(agent: string, runId: string) {
const state = new AgentState(agent, runId);
@ -413,51 +408,14 @@ function renderCurrentModel(provider: string, flavor: string, model: string) {
console.log("");
}
const ExampleSchema = z.object({
id: z.string().min(1),
"post-install-instructions": z.string().optional(),
description: z.string().optional(),
entryAgent: z.string().optional(),
agents: z.array(Agent).min(1),
mcpServers: z.record(z.string(), McpServerDefinition).optional(),
}).refine(
(data) => !data.entryAgent || data.agents.some((agent) => agent.name === data.entryAgent),
{
message: "entryAgent must reference one of the defined agents",
path: ["entryAgent"],
},
);
async function readExampleFile(exampleName: string): Promise<string> {
const examplePath = path.join(ExamplesDir, `${exampleName}.json`);
try {
return await fsp.readFile(examplePath, "utf8");
} catch (error: any) {
if (error?.code === "ENOENT") {
const availableExamples = await listAvailableExamples();
const listMessage = availableExamples.length
? `Available examples: ${availableExamples.join(", ")}`
: "No packaged examples were found.";
throw new Error(`Unknown example '${exampleName}'. ${listMessage}`);
}
// Re-throw other errors (permission issues, etc.)
throw error;
}
}
async function listAvailableExamples(): Promise<string[]> {
try {
const entries = await fsp.readdir(ExamplesDir);
return entries
.filter((entry) => entry.endsWith(".json"))
.map((entry) => entry.replace(/\.json$/, ""))
.sort();
} catch {
return [];
}
return Object.keys(examples);
}
async function writeAgents(agents: z.infer<typeof Agent>[]) {
async function writeAgents(agents: z.infer<typeof Agent>[] | undefined) {
if (!agents) {
return;
}
await fsp.mkdir(path.join(WorkDir, "agents"), { recursive: true });
await Promise.all(
agents.map(async (agent) => {
@ -509,22 +467,17 @@ async function mergeMcpServers(servers: Record<string, z.infer<typeof McpServerD
}
export async function importExample(exampleName: string) {
const raw = await readExampleFile(exampleName);
const parsed = ExampleSchema.parse(JSON.parse(raw));
const entryAgentName = parsed.entryAgent ?? parsed.agents[0]?.name;
if (!entryAgentName) {
throw new Error(`Example '${exampleName}' does not define any agents to run.`);
}
const postInstallInstructions = parsed["post-install-instructions"];
await writeAgents(parsed.agents);
const example = examples[exampleName];
const postInstallInstructions = example.instructions;
await writeAgents(example.agents);
let serverMerge = { added: [] as string[], skipped: [] as string[] };
if (parsed.mcpServers) {
serverMerge = await mergeMcpServers(parsed.mcpServers);
if (example.mcpServers) {
serverMerge = await mergeMcpServers(example.mcpServers);
}
return {
id: parsed.id,
entryAgent: entryAgentName,
importedAgents: parsed.agents.map((agent) => agent.name),
id: example.id,
entryAgent: example.entryAgent,
importedAgents: example.agents?.map((agent) => agent.name) ?? [],
addedServers: serverMerge.added,
skippedServers: serverMerge.skipped,
postInstallInstructions,

View file

@ -12,19 +12,6 @@ let modelConfig: z.infer<typeof ModelConfig> | null = null;
const baseMcpConfig: z.infer<typeof McpServerConfig> = {
mcpServers: {
firecrawl: {
command: "npx",
args: ["-y", "supergateway", "--stdio", "npx -y firecrawl-mcp"],
env: {
FIRECRAWL_API_KEY: "fc-aaacee4bdd164100a4d83af85bef6fdc",
},
},
test: {
url: "http://localhost:3000",
headers: {
"Authorization": "Bearer test",
},
},
}
};

View file

@ -0,0 +1,12 @@
import z from "zod"
import { Agent } from "./agent.js"
import { McpServerDefinition } from "./mcp.js"
export const Example = z.object({
id: z.string(),
instructions: z.string().optional(),
description: z.string().optional(),
entryAgent: z.string().optional(),
agents: z.array(Agent).optional(),
mcpServers: z.record(z.string(), McpServerDefinition).optional(),
});

View file

@ -0,0 +1,8 @@
{
"id": "gemini3_svg_pelican",
"provider": "google",
"model": "gemini-3.0-pro",
"description": "Outputs a single valid SVG depicting a pelican riding a bicycle.",
"instructions": "You must output only a single, valid, self-contained SVG XML depicting a pelican riding a bicycle. Requirements: 1) Output must be ONLY raw SVG XML (no markdown fences, no explanations). 2) Use viewBox=\"0 0 512 512\" and set width/height to 512. 3) Include clear, recognizable pelican and bicycle using basic shapes/paths. 4) No external refs, images, scripts, or styles; use inline attributes only. 5) Keep IDs minimal; keep total file size reasonable.",
"tools": {}
}

View file

@ -0,0 +1,9 @@
import twitterPodcast from './twitter-podcast.json' with { type: 'json' };
import gemini3Test from './gemini3-test.json' with { type: 'json' };
import { Example } from '../application/entities/example.js';
import z from 'zod';
export const examples: Record<string, z.infer<typeof Example>> = {
"twitter-podcast": Example.parse(twitterPodcast),
"gemini3-test": Example.parse(gemini3Test),
};

View file

@ -0,0 +1,559 @@
{
"id": "twitter-podcast",
"instructions": "This example workflow generates a narrated podcast episode from recent AI-related tweets using multiple agents.",
"description": "Generates a narrated podcast episode from recent AI-related tweets using multiple agents.",
"entryAgent": "tweet-podcast",
"agents": [
{
"name": "tweet-podcast",
"description": "An agent that will produce a podcast from recent tweets",
"model": "gpt-5.1",
"instructions": "You are the orchestrator for producing a short podcast episode end-to-end. Follow these steps in order and only advance once each step succeeds:\n\n1. Tweets: call the tweets workflow to collect the latest tweets, .\n\n2.Transcript creation: Provide the resulting tweets to the podcast_transcript_agent tool so it can script a ~1 minute alternating dialogue between John and Chloe that references the tweets and a balanced conversation about AI bubble.\n\n4. Audio production: Send the transcript to the elevenlabs_audio_gen tool create an audio file.",
"tools": {
"tweets": {
"type": "agent",
"name": "tweets"
},
"podcast_transcript_agent": {
"type": "agent",
"name": "podcast_transcript_agent"
},
"elevenlabs_audio_gen": {
"type": "agent",
"name": "elevenlabs_audio_gen"
}
}
},
{
"name": "tweets",
"description": "Checks latest tweets",
"model": "gpt-4.1",
"instructions": "Pulls the recent 10 recent tweets each on OpenAI, Anthropic, Nvidia, Grok, Gemini",
"tools": {
"search_tweets": {
"type": "mcp",
"name": "TWITTER_RECENT_SEARCH",
"description": "Search recent Tweets from the last 7 days using X/Twitter's search syntax via Composio's Twitter MCP server.",
"mcpServerName": "twitter",
"inputSchema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query for matching Tweets. Use X search operators like from:username, -is:retweet, -is:reply, has:media, lang:en, etc. Limited to last 7 days."
},
"start_time": {
"type": "string",
"description": "Oldest UTC timestamp (YYYY-MM-DDTHH:mm:ssZ) for results, within the last 7 days."
},
"end_time": {
"type": "string",
"description": "Newest UTC timestamp (YYYY-MM-DDTHH:mm:ssZ) for results; exclusive."
},
"max_results": {
"type": "integer",
"description": "Number of Tweets to return (up to 2000 per call).",
"default": 10
},
"sort_order": {
"type": "string",
"enum": [
"recency",
"relevancy"
],
"description": "Order of results: 'recency' (most recent first) or 'relevancy'."
},
"tweet_fields": {
"anyOf": [
{
"type": "array",
"items": {
"type": "string",
"enum": [
"article",
"attachments",
"author_id",
"card_uri",
"context_annotations",
"conversation_id",
"created_at",
"edit_controls",
"edit_history_tweet_ids",
"entities",
"geo",
"id",
"in_reply_to_user_id",
"lang",
"non_public_metrics",
"note_tweet",
"organic_metrics",
"possibly_sensitive",
"promoted_metrics",
"public_metrics",
"referenced_tweets",
"reply_settings",
"scopes",
"source",
"text",
"withheld"
]
}
},
{
"type": "null"
}
],
"default": null,
"description": "Tweet fields to include in the response. Example: ['created_at','author_id','public_metrics']."
},
"expansions": {
"anyOf": [
{
"type": "array",
"items": {
"type": "string",
"enum": [
"article.cover_media",
"article.media_entities",
"attachments.media_keys",
"attachments.media_source_tweet",
"attachments.poll_ids",
"author_id",
"author_screen_name",
"edit_history_tweet_ids",
"entities.mentions.username",
"entities.note.mentions.username",
"geo.place_id",
"in_reply_to_user_id",
"referenced_tweets.id",
"referenced_tweets.id.author_id"
]
}
},
{
"type": "null"
}
],
"default": null,
"description": "Expansions to hydrate related objects like users, media, polls, and places."
},
"media_fields": {
"anyOf": [
{
"type": "array",
"items": {
"type": "string",
"enum": [
"alt_text",
"duration_ms",
"height",
"media_key",
"non_public_metrics",
"organic_metrics",
"preview_image_url",
"promoted_metrics",
"public_metrics",
"type",
"url",
"variants",
"width"
]
}
},
{
"type": "null"
}
],
"default": null,
"description": "Media fields to include when media keys are expanded."
},
"place_fields": {
"anyOf": [
{
"type": "array",
"items": {
"type": "string",
"enum": [
"contained_within",
"country",
"country_code",
"full_name",
"geo",
"id",
"name",
"place_type"
]
}
},
{
"type": "null"
}
],
"default": null,
"description": "Place fields to include when place IDs are expanded."
},
"poll_fields": {
"anyOf": [
{
"type": "array",
"items": {
"type": "string",
"enum": [
"duration_minutes",
"end_datetime",
"id",
"options",
"voting_status"
]
}
},
{
"type": "null"
}
],
"default": null,
"description": "Poll fields to include when poll IDs are expanded."
},
"user_fields": {
"anyOf": [
{
"type": "array",
"items": {
"type": "string",
"enum": [
"affiliation",
"connection_status",
"created_at",
"description",
"entities",
"id",
"location",
"most_recent_tweet_id",
"name",
"pinned_tweet_id",
"profile_banner_url",
"profile_image_url",
"protected",
"public_metrics",
"receives_your_dm",
"subscription_type",
"url",
"verified",
"verified_type",
"withheld",
"username"
]
}
},
{
"type": "null"
}
],
"default": null,
"description": "User fields to include when user IDs are expanded. Username is always returned by default."
},
"since_id": {
"type": "string",
"description": "Return Tweets more recent than this ID (cannot be used with start_time)."
},
"until_id": {
"type": "string",
"description": "Return Tweets older than this ID (cannot be used with end_time)."
},
"next_token": {
"type": "string",
"description": "Pagination token from a previous response's meta.next_token."
},
"pagination_token": {
"type": "string",
"description": "Alternative pagination token from a previous meta.next_token; next_token is preferred."
}
},
"required": [
"query"
],
"additionalProperties": false
}
},
"bash": {
"type": "builtin",
"name": "executeCommand",
"description": "Execute bash commands to manipulate files like tweets.txt, e.g. writing search results to disk or appending logs.",
"inputSchema": {
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "The bash command to execute, such as 'echo \"text\" >> tweets.txt' or 'cat tweets.txt'."
}
},
"required": [
"command"
],
"additionalProperties": false
}
}
}
},
{
"name": "podcast_transcript_agent",
"description": "An agent that will generate a transcript of a podcast",
"model": "gpt-4.1",
"instructions": "You job is to create a NotebookLM style 1 minute podcast between 2 speakers John and Chloe. Each line should be a new speaker. The podcast should be about the contents of the two papers (that were selected). You can use [sighs], [inhales then exhales], [chuckles], [laughs], [clears throat], [coughs], [sniffs], [pauses] etc. to make the podcast more natural."
},
{
"name": "elevenlabs_audio_gen",
"description": "An agent that will generate an audio file from a text",
"model": "gpt-4.1",
"instructions": "Your job is to take the mutli speaker transcript and generate an audio file from it. Use the elevenlabs text to speech tool to do this. For each speaker turn, you should generate an audio file and then combine them all into a single audio file. Use the voice_name 'Liam' for John and 'Cassidy' for Chloe. Make sure to remove the speaker names from the text before generating the audio files. Use the eleven_v3 model_id. In addition, you should use the compose_music tool to generate a short musical intro and outro for the podcast. The intro should be a small 5-10 second clip modeled after popular podcasts which fades and the podcast starts. The outro should be 10-15 seconds of a related sound. Save the intro and outro to files, and then use the bash tool to stitch them with the main podcast audio so that the final output audio file starts with the intro music, then the full conversation, and ends with the outro music. Place all generated audio on the Desktop by default unless otherwise instructed. Don't wait for confirmation - go ahead and produce the podcast.",
"tools": {
"text_to_speech": {
"type": "mcp",
"name": "text_to_speech",
"description": "Generate an audio file from a text",
"mcpServerName": "elevenLabs",
"inputSchema": {
"type": "object",
"properties": {
"text": {
"type": "string",
"description": "The text to generate an audio file from"
},
"voice_name": {
"type": "string",
"description": "The voice name to use for the audio file"
},
"model_id": {
"type": "string",
"description": "The model id to use for the audio file"
}
}
}
},
"compose_music": {
"type": "mcp",
"name": "compose_music",
"description": "Generate intro and outro music for the podcast and save as audio files",
"mcpServerName": "elevenLabs",
"inputSchema": {
"type": "object",
"properties": {
"prompt": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Prompt"
},
"output_directory": {
"anyOf": [
{
"type": "string"
},
{
"type": "null"
}
],
"default": null,
"title": "Output Directory"
},
"composition_plan": {
"anyOf": [
{
"$ref": "#/$defs/MusicPrompt"
},
{
"type": "null"
}
],
"default": null
},
"music_length_ms": {
"anyOf": [
{
"type": "integer"
},
{
"type": "null"
}
],
"default": null,
"title": "Music Length Ms"
}
},
"$defs": {
"MusicPrompt": {
"additionalProperties": true,
"properties": {
"positive_global_styles": {
"items": {
"type": "string"
},
"title": "Positive Global Styles",
"type": "array"
},
"negative_global_styles": {
"items": {
"type": "string"
},
"title": "Negative Global Styles",
"type": "array"
},
"sections": {
"items": {
"$ref": "#/$defs/SongSection"
},
"title": "Sections",
"type": "array"
}
},
"required": [
"positive_global_styles",
"negative_global_styles",
"sections"
],
"title": "MusicPrompt",
"type": "object"
},
"SectionSource": {
"additionalProperties": true,
"properties": {
"song_id": {
"title": "Song Id",
"type": "string"
},
"range": {
"$ref": "#/$defs/TimeRange"
},
"negative_ranges": {
"anyOf": [
{
"items": {
"$ref": "#/$defs/TimeRange"
},
"type": "array"
},
{
"type": "null"
}
],
"default": null,
"title": "Negative Ranges"
}
},
"required": [
"song_id",
"range"
],
"title": "SectionSource",
"type": "object"
},
"SongSection": {
"additionalProperties": true,
"properties": {
"section_name": {
"title": "Section Name",
"type": "string"
},
"positive_local_styles": {
"items": {
"type": "string"
},
"title": "Positive Local Styles",
"type": "array"
},
"negative_local_styles": {
"items": {
"type": "string"
},
"title": "Negative Local Styles",
"type": "array"
},
"duration_ms": {
"title": "Duration Ms",
"type": "integer"
},
"lines": {
"items": {
"type": "string"
},
"title": "Lines",
"type": "array"
},
"source_from": {
"anyOf": [
{
"$ref": "#/$defs/SectionSource"
},
{
"type": "null"
}
],
"default": null
}
},
"required": [
"section_name",
"positive_local_styles",
"negative_local_styles",
"duration_ms",
"lines"
],
"title": "SongSection",
"type": "object"
},
"TimeRange": {
"additionalProperties": true,
"properties": {
"start_ms": {
"title": "Start Ms",
"type": "integer"
},
"end_ms": {
"title": "End Ms",
"type": "integer"
}
},
"required": [
"start_ms",
"end_ms"
],
"title": "TimeRange",
"type": "object"
}
},
"title": "compose_musicArguments"
}
},
"bash": {
"type": "builtin",
"name": "executeCommand"
}
}
}
],
"mcpServers": {
"elevenLabs": {
"command": "uvx",
"args": [
"elevenlabs-mcp"
],
"env": {
"ELEVENLABS_API_KEY": "<your-api-key>"
}
},
"calendar": {
"type": "http",
"url": "<composio-url>"
},
"twitter": {
"type": "http",
"url": "<composio-url>"
}
}
}