Merge branch 'mgx_ops' into basic_ability

This commit is contained in:
garylin2099 2024-07-19 10:13:48 +08:00
commit 4cc47366f8
94 changed files with 3333 additions and 1200 deletions

1
.gitignore vendored
View file

@ -162,6 +162,7 @@ examples/graph_store.json
examples/image__vector_store.json
examples/index_store.json
.chroma
.chroma_exp_data
*~$*
workspace/*
tmp

View file

@ -74,6 +74,10 @@ s3:
secure: false
bucket: "test"
exp_pool:
enable_read: false
enable_write: false
persist_path: .chroma_exp_data # The directory.
azure_tts_subscription_key: "YOUR_SUBSCRIPTION_KEY"
azure_tts_region: "eastus"

View file

@ -0,0 +1,16 @@
[{
"req": [{
"role": "user",
"content": "\n# Current Plan\n{'goal': \"Please write a 1048 game using JavaScript and HTML code without using any frameworks, user can play with keyboard. Refer to the system design located at '/tmp/system_design.json' and the project schedule at '/tmp/project_schedule.json' for detailed information.\", 'tasks': []}\n\n# Current Task\n\n\n# Instruction\nBased on the context, write a plan or modify an existing plan to achieve the goal. A plan consists of one to 3 tasks.\nIf plan is created, you should track the progress and update the plan accordingly, such as Plan.finish_current_task, Plan.append_task, Plan.reset_task, Plan.replace_task, etc.\nWhen presented a current task, tackle the task using the available commands.\nPay close attention to new user message, review the conversation history, use RoleZero.reply_to_human to respond to new user requirement.\nNote:\n1. If you keeping encountering errors, unexpected situation, or you are not sure of proceeding, use RoleZero.ask_human to ask for help.\n2. Carefully review your progress at the current task, if your actions so far has not fulfilled the task instruction, you should continue with current task. Otherwise, finish current task.\n3. Each time you finish a task, use RoleZero.reply_to_human to report your progress.\n4. Each time you write a code in your response, write with the Editor directly without preparing a repetitive code block beforehand.\n5. Take on ONE task and write ONE code file in each response. DON'T attempt all tasks in one response.\n6. When not specified, you should write files in a folder named \"src\". If you know the project path, then write in a \"src\" folder under the project path.\n7. When provided system design or project schedule, you MUST read them first before making a plan, then adhere to them in your implementation, especially in the programming language, package, or framework. You MUST implement all code files prescribed in the system design or project schedule. You can create a plan first with each task corresponding to implementing one code file.\n8. Write at most one file per task, do your best to implement THE ONLY ONE FILE. CAREFULLY CHECK THAT YOU DONT MISS ANY NECESSARY CLASS/FUNCTION IN THIS FILE.\n9. COMPLETE CODE: Your code will be part of the entire project, so please implement complete, reliable, reusable code snippets.\n10. When provided system design, YOU MUST FOLLOW \"Data structures and interfaces\". DONT CHANGE ANY DESIGN. Do not use public member functions that do not exist in your design.\n11. Write out EVERY CODE DETAIL, DON'T LEAVE TODO.\n12. To modify code in a file, read the entire file, make changes, and update the file with the complete code, ensuring that no line numbers are included in the final write.\n13. When a system design or project schedule is provided, at the end of the plan, add a CodeRview Task for each file; for example, if there are three files, add three CodeRview Tasks. For each CodeRview Task, just call ReviewAndRewriteCode.run.\n"
}],
"resp": "\nExplanation: To begin the task of creating a 2048 game using JavaScript and HTML, I need to first read the system design and project schedule documents to understand the specific requirements and architecture outlined for the game development.\n\n```json\n[\n {\n \"command_name\": \"Editor.read\",\n \"args\": {\n \"path\": \"/tmp/system_design.json\"\n }\n },\n {\n \"command_name\": \"Editor.read\",\n \"args\": {\n \"path\": \"/tmp/project_schedule.json\"\n }\n }\n]\n```\n"
}, {
"req": [{
"role": "user",
"content": "Command Editor.read executed: file_path='/tmp/system_design.json' block_content='001|{\"Implementation approach\":\"We will implement the 2048 game using plain JavaScript and HTML, ensuring no frameworks are used. The game logic will handle tile movements, merging, and game state updates. The UI will be simple and clean, with a responsive design to fit different screen sizes. We will use CSS for styling and ensure the game is playable with keyboard arrow keys. The game will display the current score, have a restart button, and show a game over message when no more moves are possible.\",\"File list\":[\"index.html\",\"style.css\",\"script.js\"],\"Data structures and interfaces\":\"\\\\nclassDiagram\\\\n class Game {\\\\n -grid: int[][]\\\\n -score: int\\\\n +init(): void\\\\n +move(direction: str): void\\\\n +merge(direction: str): void\\\\n +isGameOver(): bool\\\\n +restart(): void\\\\n }\\\\n class UI {\\\\n -game: Game\\\\n +init(): void\\\\n +update(): void\\\\n +showGameOver(): void\\\\n +bindEvents(): void\\\\n }\\\\n Game --> UI\\\\n\",\"Program call flow\":\"\\\\nsequenceDiagram\\\\n participant U as UI\\\\n participant G as Game\\\\n U->>G: init()\\\\n G-->>U: return\\\\n U->>U: bindEvents()\\\\n U->>G: move(direction)\\\\n G->>G: merge(direction)\\\\n G->>U: update()\\\\n U->>U: update()\\\\n U->>G: isGameOver()\\\\n G-->>U: return bool\\\\n alt Game Over\\\\n U->>U: showGameOver()\\\\n end\\\\n U->>G: restart()\\\\n G-->>U: return\\\\n\",\"Anything UNCLEAR\":\"Clarify if there are any specific design preferences or additional features required beyond the basic 2048 game functionality.\"}\\n'\n\nCommand Editor.read executed: file_path='/tmp/project_schedule.json' block_content='001|{\"Required packages\":[\"No third-party dependencies required\"],\"Required Other language third-party packages\":[\"No third-party dependencies required\"],\"Logic Analysis\":[[\"script.js\",\"Contains Game and UI classes, and their methods: init, move, merge, isGameOver, restart, update, showGameOver, bindEvents\"],[\"index.html\",\"Contains the HTML structure for the game UI\"],[\"style.css\",\"Contains the CSS styles for the game UI\"]],\"Task list\":[\"index.html\",\"style.css\",\"script.js\"],\"Full API spec\":\"\",\"Shared Knowledge\":\"The `script.js` file will contain the core game logic and UI handling. The `index.html` file will provide the structure for the game, and `style.css` will handle the styling.\",\"Anything UNCLEAR\":\"Clarify if there are any specific design preferences or additional features required beyond the basic 2048 game functionality.\"}\\n'"
}, {
"role": "user",
"content": "\n# Current Plan\n{'goal': \"Please write a 1048 game using JavaScript and HTML code without using any frameworks, user can play with keyboard. Refer to the system design located at '/tmp/system_design.json' and the project schedule at '/tmp/project_schedule.json' for detailed information.\", 'tasks': []}\n\n# Current Task\n\n\n# Instruction\nBased on the context, write a plan or modify an existing plan to achieve the goal. A plan consists of one to 3 tasks.\nIf plan is created, you should track the progress and update the plan accordingly, such as Plan.finish_current_task, Plan.append_task, Plan.reset_task, Plan.replace_task, etc.\nWhen presented a current task, tackle the task using the available commands.\nPay close attention to new user message, review the conversation history, use RoleZero.reply_to_human to respond to new user requirement.\nNote:\n1. If you keeping encountering errors, unexpected situation, or you are not sure of proceeding, use RoleZero.ask_human to ask for help.\n2. Carefully review your progress at the current task, if your actions so far has not fulfilled the task instruction, you should continue with current task. Otherwise, finish current task.\n3. Each time you finish a task, use RoleZero.reply_to_human to report your progress.\n4. Each time you write a code in your response, write with the Editor directly without preparing a repetitive code block beforehand.\n5. Take on ONE task and write ONE code file in each response. DON'T attempt all tasks in one response.\n6. When not specified, you should write files in a folder named \"src\". If you know the project path, then write in a \"src\" folder under the project path.\n7. When provided system design or project schedule, you MUST read them first before making a plan, then adhere to them in your implementation, especially in the programming language, package, or framework. You MUST implement all code files prescribed in the system design or project schedule. You can create a plan first with each task corresponding to implementing one code file.\n8. Write at most one file per task, do your best to implement THE ONLY ONE FILE. CAREFULLY CHECK THAT YOU DONT MISS ANY NECESSARY CLASS/FUNCTION IN THIS FILE.\n9. COMPLETE CODE: Your code will be part of the entire project, so please implement complete, reliable, reusable code snippets.\n10. When provided system design, YOU MUST FOLLOW \"Data structures and interfaces\". DONT CHANGE ANY DESIGN. Do not use public member functions that do not exist in your design.\n11. Write out EVERY CODE DETAIL, DON'T LEAVE TODO.\n12. To modify code in a file, read the entire file, make changes, and update the file with the complete code, ensuring that no line numbers are included in the final write.\n13. When a system design or project schedule is provided, at the end of the plan, add a CodeRview Task for each file; for example, if there are three files, add three CodeRview Tasks. For each CodeRview Task, just call ReviewAndRewriteCode.run.\n"
}],
"resp": "\nExplanation: Based on the system design and project schedule, I will create a plan to implement the 2048 game. The plan will consist of tasks to create the necessary files (`index.html`, `style.css`, and `script.js`) and review each file to ensure they meet the design specifications.\n\nHere's the plan:\n\n1. **Task 1**: Create `index.html` - This file will contain the HTML structure necessary for the game's UI.\n2. **Task 2**: Create `style.css` - This file will define the CSS styles to make the game visually appealing and responsive.\n3. **Task 3**: Create `script.js` - This file will contain the JavaScript code for the game logic and UI interactions.\n4. **Code Review Tasks**: Review each file to ensure they meet the project requirements and adhere to the system design.\n\nLet's start by appending the first task to the plan.\n\n```json\n[\n {\n \"command_name\": \"Plan.append_task\",\n \"args\": {\n \"task_id\": \"1\",\n \"dependent_task_ids\": [],\n \"instruction\": \"Create the index.html file with the basic HTML structure for the 2048 game.\",\n \"assignee\": \"Alex\"\n }\n },\n {\n \"command_name\": \"Plan.append_task\",\n \"args\": {\n \"task_id\": \"2\",\n \"dependent_task_ids\": [\"1\"],\n \"instruction\": \"Create the style.css file with the necessary CSS to style the 2048 game.\",\n \"assignee\": \"Alex\"\n }\n },\n {\n \"command_name\": \"Plan.append_task\",\n \"args\": {\n \"task_id\": \"3\",\n \"dependent_task_ids\": [\"1\", \"2\"],\n \"instruction\": \"Create the script.js file containing the JavaScript logic for the 2048 game.\",\n \"assignee\": \"Alex\"\n }\n },\n {\n \"command_name\": \"Plan.append_task\",\n \"args\": {\n \"task_id\": \"4\",\n \"dependent_task_ids\": [\"1\"],\n \"instruction\": \"Use ReviewAndRewriteCode to review the code in index.html to ensure it meets the design specifications.\",\n \"assignee\": \"Alex\"\n }\n },\n {\n \"command_name\": \"Plan.append_task\",\n \"args\": {\n \"task_id\": \"5\",\n \"dependent_task_ids\": [\"2\"],\n \"instruction\": \"Use ReviewAndRewriteCode to review the code in style.css to ensure it meets the design specifications.\",\n \"assignee\": \"Alex\"\n }\n },\n {\n \"command_name\": \"Plan.append_task\",\n \"args\": {\n \"task_id\": \"6\",\n \"dependent_task_ids\": [\"3\"],\n \"instruction\": \"Use ReviewAndRewriteCode to review the code in script.js to ensure it meets the design specifications.\",\n \"assignee\": \"Alex\"\n }\n }\n]\n```\n"
}]

View file

@ -0,0 +1,22 @@
[{
"req": [{
"role": "user",
"content": "\n# Current Plan\n{'goal': \"from to {'<all>'}: Write a 1024 game using JavaScript and HTML code without using any frameworks, user can play with keyboard.\", 'tasks': []}\n\n# Current Task\n\n\n# Instruction\nYou are a team leader, and you are responsible for drafting tasks and routing tasks to your team members.\nYour team member:\nTim: Team Leader, \nAlice: Product Manager, efficiently create a successful product that meets market demands and user expectations\nBob: Architect, design a concise, usable, complete software system\nEve: Project Manager, break down tasks according to PRD/technical design, generate a task list, and analyze task dependencies to start with the prerequisite modules\nAlex: Engineer, Take on game, app, and web development\nDavid: DataAnalyst, Take on any data-related tasks, such as data analysis, machine learning, deep learning, web browsing, web scraping, web searching, web deployment, terminal operation, git and github operation, etc.\n\nYou should NOT assign consecutive tasks to the same team member, instead, assign an aggregated task (or the complete requirement) and let the team member to decompose it.\nWhen creating a new plan involving multiple members, create all tasks at once.\nIf plan is created, you should track the progress based on team member feedback message, and update plan accordingly, such as Plan.finish_current_task, Plan.reset_task, Plan.replace_task, etc.\nYou should use TeamLeader.publish_team_message to team members, asking them to start their task. DONT omit any necessary info such as path, link, environment, programming language, framework, requirement, constraint from original content to team members because you are their sole info source.\nPay close attention to new user message, review the conversation history, use RoleZero.reply_to_human to respond to the user directly, DON'T ask your team members.\n\nNote:\n1. If the requirement is a pure DATA-RELATED requirement, such as bug fixes, issue reporting, environment setup, terminal operations, pip install, web browsing, web scraping, web searching, web imitation, data science, data analysis, machine learning, deep learning, text-to-image etc. DON'T decompose it, assign a single task with the original user requirement as instruction directly to Data Analyst.\n2. If the requirement is developing a software, game, app, or website, excluding the above data-related tasks, you should decompose the requirement into multiple tasks and assign them to different team members based on their expertise, usually the sequence of Product Manager -> Architect -> Project Manager -> Engineer -> (optional: QaEngine if present) -> (optional: DataAnalyst if user requests deployment), each assigned ONE task. When publishing message to Product Manager, you should directly copy the full original user requirement.\n3. If the requirement contains both DATA-RELATED part mentioned in 1 and software development part mentioned in 2, you should decompose the software development part and assign them to different team members based on their expertise, and assign the DATA-RELATED part to Data Analyst David directly.\n4. If the requirement is a common-sense, logical, or math problem, you should respond directly without assigning any task to team members.\n5. If you think the requirement is not clear or ambiguous, you should ask the user for clarification immediately. Assign tasks only after all info is clear.\n6. It is helpful for Engineer to have both the system design and the project schedule for writing the code, so include paths of both files (if available) and remind Engineer to definitely read them when publishing message to Engineer.\n7. If the requirement is writing a TRD and software framework, you should assign it to Architect. When publishing message to Architect, you should directly copy the full original user requirement.\n"
}],
"resp": "\nExplanation: The requirement is about software development. Assign each tasks to a different team member based on their expertise. When publishing message to Product Manager, we copy original user requirement directly to ensure no information loss.\n```json\n[\n {\n \"command_name\": \"Plan.append_task\",\n \"args\": {\n \"task_id\": \"1\",\n \"dependent_task_ids\": [],\n \"instruction\": \"Create a product requirement document (PRD) outlining the features, user interface, and user experience of the CLI python snake game.\",\n \"assignee\": \"Alice\"\n }\n },\n {\n \"command_name\": \"Plan.append_task\",\n \"args\": {\n \"task_id\": \"2\",\n \"dependent_task_ids\": [\"1\"],\n \"instruction\": \"Design the software architecture for the CLI snake game, including the choice of programming language, libraries, and data flow.\",\n \"assignee\": \"Bob\"\n }\n },\n {\n \"command_name\": \"Plan.append_task\",\n \"args\": {\n \"task_id\": \"3\",\n \"dependent_task_ids\": [\"2\"],\n \"instruction\": \"Break down the architecture into manageable tasks, identify task dependencies, and prepare a detailed task list for implementation.\",\n \"assignee\": \"Eve\"\n }\n },\n {\n \"command_name\": \"Plan.append_task\",\n \"args\": {\n \"task_id\": \"4\",\n \"dependent_task_ids\": [\"3\"],\n \"instruction\": \"Implement the core game logic for the CLI snake game, including snake movement, food generation, and score tracking.\",\n \"assignee\": \"Alex\"\n }\n },\n {\n \"command_name\": \"Plan.append_task\",\n \"args\": {\n \"task_id\": \"5\",\n \"dependent_task_ids\": [\"4\"],\n \"instruction\": \"Write comprehensive tests for the game logic and user interface to ensure functionality and reliability.\",\n \"assignee\": \"Edward\"\n }\n },\n {\n \"command_name\": \"TeamLeader.publish_message\",\n \"args\": {\n \"content\": \"Create a cli snake game using Python\",\n \"send_to\": \"Alice\"\n }\n },\n {\n \"command_name\": \"RoleZero.reply_to_human\",\n \"args\": {\n \"content\": \"I have assigned the tasks to the team members. Alice will create the PRD, Bob will design the software architecture, Eve will break down the architecture into tasks, Alex will implement the core game logic, and Edward will write comprehensive tests. The team will work on the project accordingly\"\n }\n },\n {\n \"command_name\": \"end\"\n }\n]\n```\n"
}, {
"req": [{
"role": "user",
"content": "\n# Current Plan\n{'goal': \"from to {'<all>'}: Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy.\", 'tasks': []}\n\n# Current Task\n\n\n# Instruction\nYou are a team leader, and you are responsible for drafting tasks and routing tasks to your team members.\nYour team member:\nTim: Team Leader, \nAlice: Product Manager, efficiently create a successful product that meets market demands and user expectations\nBob: Architect, design a concise, usable, complete software system\nEve: Project Manager, break down tasks according to PRD/technical design, generate a task list, and analyze task dependencies to start with the prerequisite modules\nAlex: Engineer, Take on game, app, and web development\nDavid: DataAnalyst, Take on any data-related tasks, such as data analysis, machine learning, deep learning, web browsing, web scraping, web searching, web deployment, terminal operation, git and github operation, etc.\n\nYou should NOT assign consecutive tasks to the same team member, instead, assign an aggregated task (or the complete requirement) and let the team member to decompose it.\nWhen creating a new plan involving multiple members, create all tasks at once.\nIf plan is created, you should track the progress based on team member feedback message, and update plan accordingly, such as Plan.finish_current_task, Plan.reset_task, Plan.replace_task, etc.\nYou should use TeamLeader.publish_team_message to team members, asking them to start their task. DONT omit any necessary info such as path, link, environment, programming language, framework, requirement, constraint from original content to team members because you are their sole info source.\nPay close attention to new user message, review the conversation history, use RoleZero.reply_to_human to respond to the user directly, DON'T ask your team members.\n\nNote:\n1. If the requirement is a pure DATA-RELATED requirement, such as bug fixes, issue reporting, environment setup, terminal operations, pip install, web browsing, web scraping, web searching, web imitation, data science, data analysis, machine learning, deep learning, text-to-image etc. DON'T decompose it, assign a single task with the original user requirement as instruction directly to Data Analyst.\n2. If the requirement is developing a software, game, app, or website, excluding the above data-related tasks, you should decompose the requirement into multiple tasks and assign them to different team members based on their expertise, usually the sequence of Product Manager -> Architect -> Project Manager -> Engineer -> (optional: QaEngine if present) -> (optional: DataAnalyst if user requests deployment), each assigned ONE task. When publishing message to Product Manager, you should directly copy the full original user requirement.\n3. If the requirement contains both DATA-RELATED part mentioned in 1 and software development part mentioned in 2, you should decompose the software development part and assign them to different team members based on their expertise, and assign the DATA-RELATED part to Data Analyst David directly.\n4. If the requirement is a common-sense, logical, or math problem, you should respond directly without assigning any task to team members.\n5. If you think the requirement is not clear or ambiguous, you should ask the user for clarification immediately. Assign tasks only after all info is clear.\n6. It is helpful for Engineer to have both the system design and the project schedule for writing the code, so include paths of both files (if available) and remind Engineer to definitely read them when publishing message to Engineer.\n7. If the requirement is writing a TRD and software framework, you should assign it to Architect. When publishing message to Architect, you should directly copy the full original user requirement.\n"
}],
"resp": "\nExplanation: DON'T decompose requirement if it is a DATA-RELATED task, assign a single task directly to Data Analyst David. He will manage the decomposition and implementation.\n```json\n[\n {\n \"command_name\": \"Plan.append_task\",\n \"args\": {\n \"task_id\": \"1\",\n \"dependent_task_ids\": [],\n \"instruction\": \"Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy.\",\n \"assignee\": \"David\"\n }\n },\n {\n \"command_name\": \"TeamLeader.publish_message\",\n \"args\": {\n \"content\": \"Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy.\",\n \"send_to\": \"David\"\n }\n },\n {\n \"command_name\": \"RoleZero.reply_to_human\",\n \"args\": {\n \"content\": \"I have assigned the task to David. He will break down the task further by himself and starts solving it.\"\n }\n },\n {\n \"command_name\": \"end\"\n }\n]\n```\n"
}, {
"req": "Conversation History:\n[\n ...,\n {'role': 'assistant', 'content': 'from Alice(Product Manager) to {'Bob'}: {'docs': {'20240424153821.json': {'root_path': 'docs/prd', 'filename': '20240424153821.json', 'content': '{\"Language\":\"en_us\",\"Programming Language\":\"Python\",\"Original Requirements\":\"create a cli snake game\",\"Project Name\":\"snake_game\",\"Product Goals\":[\"Develop an intuitive and addictive snake game\",...], ...}}}}},\n]\n",
"resp": "\nExplanation: You received a message from Alice, the Product Manager, that she has completed the PRD, use Plan.finish_current_task to mark her task as finished and moves the plan to the next task. Based on plan status, next task is for Bob (Architect), publish a message asking him to start. The message content should contain important path info.\n```json\n[\n {\n \"command_name\": \"Plan.finish_current_task\",\n \"args\": {}\n },\n {\n \"command_name\": \"TeamLeader.publish_message\",\n \"args\": {\n \"content\": \"Please design the software architecture for the snake game based on the PRD created by Alice. The PRD is at 'docs/prd/20240424153821.json'. Include the choice of programming language, libraries, and data flow, etc.\",\n \"send_to\": \"Bob\"\n }\n },\n {\n \"command_name\": \"RoleZero.reply_to_human\",\n \"args\": {\n \"content\": \"Alice has completed the PRD. I have marked her task as finished and sent the PRD to Bob. Bob will work on the software architecture.\"\n }\n },\n {\n \"command_name\": \"end\"\n }\n]\n```\n"
}, {
"req": [{
"role": "user",
"content": "\n# Current Plan\n{'goal': \"from to {'<all>'}: how does the project go?\", 'tasks': []}\n\n# Current Task\n\n\n# Instruction\nYou are a team leader, and you are responsible for drafting tasks and routing tasks to your team members.\nYour team member:\nTim: Team Leader, \nAlice: Product Manager, efficiently create a successful product that meets market demands and user expectations\nBob: Architect, design a concise, usable, complete software system\nEve: Project Manager, break down tasks according to PRD/technical design, generate a task list, and analyze task dependencies to start with the prerequisite modules\nAlex: Engineer, Take on game, app, and web development\nDavid: DataAnalyst, Take on any data-related tasks, such as data analysis, machine learning, deep learning, web browsing, web scraping, web searching, web deployment, terminal operation, git and github operation, etc.\n\nYou should NOT assign consecutive tasks to the same team member, instead, assign an aggregated task (or the complete requirement) and let the team member to decompose it.\nWhen creating a new plan involving multiple members, create all tasks at once.\nIf plan is created, you should track the progress based on team member feedback message, and update plan accordingly, such as Plan.finish_current_task, Plan.reset_task, Plan.replace_task, etc.\nYou should use TeamLeader.publish_team_message to team members, asking them to start their task. DONT omit any necessary info such as path, link, environment, programming language, framework, requirement, constraint from original content to team members because you are their sole info source.\nPay close attention to new user message, review the conversation history, use RoleZero.reply_to_human to respond to the user directly, DON'T ask your team members.\n\nNote:\n1. If the requirement is a pure DATA-RELATED requirement, such as bug fixes, issue reporting, environment setup, terminal operations, pip install, web browsing, web scraping, web searching, web imitation, data science, data analysis, machine learning, deep learning, text-to-image etc. DON'T decompose it, assign a single task with the original user requirement as instruction directly to Data Analyst.\n2. If the requirement is developing a software, game, app, or website, excluding the above data-related tasks, you should decompose the requirement into multiple tasks and assign them to different team members based on their expertise, usually the sequence of Product Manager -> Architect -> Project Manager -> Engineer -> (optional: QaEngine if present) -> (optional: DataAnalyst if user requests deployment), each assigned ONE task. When publishing message to Product Manager, you should directly copy the full original user requirement.\n3. If the requirement contains both DATA-RELATED part mentioned in 1 and software development part mentioned in 2, you should decompose the software development part and assign them to different team members based on their expertise, and assign the DATA-RELATED part to Data Analyst David directly.\n4. If the requirement is a common-sense, logical, or math problem, you should respond directly without assigning any task to team members.\n5. If you think the requirement is not clear or ambiguous, you should ask the user for clarification immediately. Assign tasks only after all info is clear.\n6. It is helpful for Engineer to have both the system design and the project schedule for writing the code, so include paths of both files (if available) and remind Engineer to definitely read them when publishing message to Engineer.\n7. If the requirement is writing a TRD and software framework, you should assign it to Architect. When publishing message to Architect, you should directly copy the full original user requirement.\n"
}],
"resp": "\nExplanation: The user is asking for a general update on the project status. Give a straight answer about the current task the team is working on and provide a summary of the completed tasks.\n```json\n[\n {\n \"command_name\": \"RoleZero.reply_to_human\",\n \"args\": {\n \"content\": \"The team is currently working on ... We have completed ...\"\n }\n },\n {\n \"command_name\": \"end\"\n }\n]\n```\n"
}]

View file

@ -0,0 +1,19 @@
# Experience Pool
## Prerequisites
- Ensure the RAG module is installed: https://docs.deepwisdom.ai/main/en/guide/in_depth_guides/rag_module.html
- Set both `enable_read` and `enable_write` to `true` in the `exp_pool` section of `config2.yaml`
## Example Files
### 1. decorator.py
Showcases the implementation of the `@exp_cache` decorator.
### 2. init_exp_pool.py
Demonstrates the process of initializing the experience pool.
### 3. manager.py
Illustrates CRUD (Create, Read, Update, Delete) operations for managing experiences in the pool.
### 4. scorer.py
Outlines methods for evaluating and scoring experiences within the pool.

View file

@ -0,0 +1,28 @@
"""
This script demonstrates how to automatically store experiences using @exp_cache and query the stored experiences.
"""
import asyncio
import uuid
from metagpt.exp_pool import exp_cache, exp_manager
from metagpt.logs import logger
@exp_cache()
async def produce(req=""):
return f"{req} {uuid.uuid4().hex}"
async def main():
req = "Water"
resp = await produce(req=req)
logger.info(f"The response of `produce({req})` is: {resp}")
exps = await exp_manager.query_exps(req)
logger.info(f"Find experiences: {exps}")
if __name__ == "__main__":
asyncio.run(main())

View file

@ -0,0 +1,94 @@
"""Init experience pool.
Put some useful experiences into the experience pool.
"""
import asyncio
import json
from pathlib import Path
from metagpt.const import EXAMPLE_DATA_PATH
from metagpt.exp_pool import exp_manager
from metagpt.exp_pool.schema import EntryType, Experience, Metric, Score
from metagpt.logs import logger
from metagpt.utils.common import aread
async def load_file(filepath) -> list[dict]:
"""Asynchronously loads and parses a JSON file.
Args:
filepath: Path to the JSON file.
Returns:
A list of dictionaries parsed from the JSON file.
"""
return json.loads(await aread(filepath))
async def add_exp(req: str, resp: str, tag: str, metric: Metric = None):
"""Adds a new experience to the experience pool.
Args:
req: The request string.
resp: The response string.
tag: A tag for categorizing the experience.
metric: Optional metric for the experience. Defaults to a score of 10.
"""
exp = Experience(
req=req,
resp=resp,
entry_type=EntryType.MANUAL,
tag=tag,
metric=metric or Metric(score=Score(val=10, reason="Manual")),
)
exp_manager.config.exp_pool.enable_write = True
exp_manager.create_exp(exp)
logger.info(f"New experience created for the request `{req[:10]}`.")
async def add_exps(exps: list, tag: str):
"""Adds multiple experiences to the experience pool.
Args:
exps: A list of experience dictionaries.
tag: A tag for categorizing the experiences.
"""
tasks = [add_exp(req=json.dumps(exp["req"]), resp=exp["resp"], tag=tag) for exp in exps]
await asyncio.gather(*tasks)
async def add_exps_from_file(tag: str, filepath: Path):
"""Loads experiences from a file and adds them to the experience pool.
Args:
tag: A tag for categorizing the experiences.
filepath: Path to the file containing experiences.
"""
exps = await load_file(filepath)
await add_exps(exps, tag)
def query_exps_count():
"""Queries and logs the total count of experiences in the pool."""
count = exp_manager.get_exps_count()
logger.info(f"Experiences Count: {count}")
async def main():
await add_exps_from_file("TeamLeader.llm_cached_aask", EXAMPLE_DATA_PATH / "exp_pool/team_leader_exps.json"),
await add_exps_from_file("Engineer2.llm_cached_aask", EXAMPLE_DATA_PATH / "exp_pool/engineer_exps.json")
query_exps_count()
if __name__ == "__main__":
asyncio.run(main())

View file

@ -0,0 +1,30 @@
"""
Demonstrate the creation and querying of experiences.
This script creates a new experience, logs its creation, and then queries for experiences matching the same request.
"""
import asyncio
from metagpt.exp_pool import exp_manager
from metagpt.exp_pool.schema import EntryType, Experience
from metagpt.logs import logger
async def main():
# Define the simple request and response
req = "Simple req"
resp = "Simple resp"
# Add the new experience
exp = Experience(req=req, resp=resp, entry_type=EntryType.MANUAL)
exp_manager.create_exp(exp)
logger.info(f"New experience created for the request `{req}`.")
# Query for experiences matching the request
exps = await exp_manager.query_exps(req)
logger.info(f"Got experiences: {exps}")
if __name__ == "__main__":
asyncio.run(main())

View file

@ -0,0 +1,44 @@
import asyncio
from metagpt.exp_pool.scorers import SimpleScorer
# Request to implement quicksort in Python
REQ = "Write a program to implement quicksort in python."
# First response: Quicksort implementation without base case
RESP1 = """
def quicksort(arr):
return quicksort([x for x in arr[1:] if x <= arr[0]]) + [arr[0]] + quicksort([x for x in arr[1:] if x > arr[0]])
"""
# Second response: Quicksort implementation with base case
RESP2 = """
def quicksort(arr):
if len(arr) <= 1:
return arr
return quicksort([x for x in arr[1:] if x <= arr[0]]) + [arr[0]] + quicksort([x for x in arr[1:] if x > arr[0]])
"""
async def simple():
"""Evaluates two quicksort implementations using SimpleScorer.
Example:
{
"val": 3,
"reason": "The response attempts to implement quicksort but contains a critical flaw: it lacks a base case to terminate the recursion, which will lead to a maximum recursion depth exceeded error for non-empty lists. Additionally, the function does not handle empty lists properly. A correct implementation should include a base case to handle lists of length 0 or 1."
}
"""
scorer = SimpleScorer()
await scorer.evaluate(req=REQ, resp=RESP1)
await scorer.evaluate(req=REQ, resp=RESP2)
async def main():
await simple()
if __name__ == "__main__":
asyncio.run(main())

View file

@ -50,9 +50,9 @@ async def generate_novel():
"Fill the empty nodes with your own ideas. Be creative! Use your own words!"
"I will tip you $100,000 if you write a good novel."
)
novel_node = await ActionNode.from_pydantic(Novel).fill(context=instruction, llm=LLM())
novel_node = await ActionNode.from_pydantic(Novel).fill(req=instruction, llm=LLM())
chap_node = await ActionNode.from_pydantic(Chapters).fill(
context=f"### instruction\n{instruction}\n### novel\n{novel_node.content}", llm=LLM()
req=f"### instruction\n{instruction}\n### novel\n{novel_node.content}", llm=LLM()
)
print(chap_node.instruct_content)

View file

@ -90,7 +90,7 @@ class Action(SerializationMixin, ContextMixin, BaseModel):
msgs = args[0]
context = "## History Messages\n"
context += "\n".join([f"{idx}: {i}" for idx, i in enumerate(reversed(msgs))])
return await self.node.fill(context=context, llm=self.llm)
return await self.node.fill(req=context, llm=self.llm)
async def run(self, *args, **kwargs):
"""Run action"""

View file

@ -18,6 +18,8 @@ from tenacity import retry, stop_after_attempt, wait_random_exponential
from metagpt.actions.action_outcls_registry import register_action_outcls
from metagpt.const import MARKDOWN_TITLE_PREFIX, USE_CONFIG_TIMEOUT
from metagpt.exp_pool import exp_cache
from metagpt.exp_pool.serializers import ActionNodeSerializer
from metagpt.llm import BaseLLM
from metagpt.logs import logger
from metagpt.provider.postprocess.llm_output_postprocess import llm_output_postprocess
@ -465,9 +467,11 @@ class ActionNode:
return self
@exp_cache(serializer=ActionNodeSerializer())
async def fill(
self,
context,
*,
req,
llm,
schema="json",
mode="auto",
@ -478,7 +482,7 @@ class ActionNode:
):
"""Fill the node(s) with mode.
:param context: Everything we should know when filling node.
:param req: Everything we should know when filling node.
:param llm: Large Language Model with pre-defined system message.
:param schema: json/markdown, determine example and output format.
- raw: free form text
@ -497,7 +501,7 @@ class ActionNode:
:return: self
"""
self.set_llm(llm)
self.set_context(context)
self.set_context(req)
if self.schema:
schema = self.schema

View file

@ -178,12 +178,12 @@ class WriteDesign(Action):
)
async def _new_system_design(self, context):
node = await DESIGN_API_NODE.fill(context=context, llm=self.llm, schema=self.prompt_schema)
node = await DESIGN_API_NODE.fill(req=context, llm=self.llm, schema=self.prompt_schema)
return node
async def _merge(self, prd_doc, system_design_doc):
context = NEW_REQ_TEMPLATE.format(old_design=system_design_doc.content, context=prd_doc.content)
node = await REFINED_DESIGN_NODE.fill(context=context, llm=self.llm, schema=self.prompt_schema)
node = await REFINED_DESIGN_NODE.fill(req=context, llm=self.llm, schema=self.prompt_schema)
system_design_doc.content = node.instruct_content.model_dump_json()
return system_design_doc
@ -254,25 +254,29 @@ class WriteDesign(Action):
extra_info=to_markdown_code_block(extra_info),
prd=to_markdown_code_block(prd_content),
)
if not legacy_design_filename:
node = await self._new_system_design(context=context)
design = Document(content=node.instruct_content.model_dump_json())
else:
old_design_content = await aread(filename=legacy_design_filename)
design = await self._merge(
prd_doc=Document(content=context), system_design_doc=Document(content=old_design_content)
)
async with DocsReporter(enable_llm_stream=True) as reporter:
await reporter.async_report({"type": "design"}, "meta")
if not legacy_design_filename:
node = await self._new_system_design(context=context)
design = Document(content=node.instruct_content.model_dump_json())
else:
old_design_content = await aread(filename=legacy_design_filename)
design = await self._merge(
prd_doc=Document(content=context), system_design_doc=Document(content=old_design_content)
)
if not output_pathname:
output_pathname = Path(output_pathname) / "docs" / "sytem_design.json"
output_pathname.mkdir(parents=True, exist_ok=True)
elif not Path(output_pathname).is_absolute():
output_pathname = DEFAULT_WORKSPACE_ROOT / output_pathname
output_pathname = Path(output_pathname)
await awrite(filename=output_pathname, data=design.content)
output_filename = output_pathname.parent / f"{output_pathname.stem}-class-diagram"
await self._save_data_api_design(design_doc=design, output_filename=output_filename)
output_filename = output_pathname.parent / f"{output_pathname.stem}-sequence-diagram"
await self._save_seq_flow(design_doc=design, output_filename=output_filename)
await save_json_to_markdown(content=design.content, output_filename=output_pathname.with_suffix(".md"))
if not output_pathname:
output_pathname = Path(output_pathname) / "docs" / "sytem_design.json"
output_pathname.mkdir(parents=True, exist_ok=True)
elif not Path(output_pathname).is_absolute():
output_pathname = DEFAULT_WORKSPACE_ROOT / output_pathname
output_pathname = Path(output_pathname)
await awrite(filename=output_pathname, data=design.content)
output_filename = output_pathname.parent / f"{output_pathname.stem}-class-diagram"
await self._save_data_api_design(design_doc=design, output_filename=output_filename)
output_filename = output_pathname.parent / f"{output_pathname.stem}-sequence-diagram"
await self._save_seq_flow(design_doc=design, output_filename=output_filename)
md_output_filename = output_pathname.with_suffix(".md")
await save_json_to_markdown(content=design.content, output_filename=md_output_filename)
await reporter.async_report(md_output_filename, "path")
return f'System Design filename: "{str(output_pathname)}"'

View file

@ -30,6 +30,12 @@ from metagpt.logs import logger
from metagpt.utils.report import NotebookReporter
INSTALL_KEEPLEN = 500
INI_CODE = """import warnings
import logging
root_logger = logging.getLogger()
root_logger.setLevel(logging.ERROR)
warnings.filterwarnings('ignore')"""
class RealtimeOutputNotebookClient(NotebookClient):
@ -79,6 +85,12 @@ class ExecuteNbCode(Action):
)
self.reporter = NotebookReporter()
self.set_nb_client()
self.init_called = False
async def init_code(self):
if not self.init_called:
await self.run(INI_CODE)
self.init_called = True
def set_nb_client(self):
self.nb_client = RealtimeOutputNotebookClient(
@ -175,9 +187,12 @@ class ExecuteNbCode(Action):
is_success = False
output_text = remove_escape_and_color_codes(output_text)
if is_success:
output_text = remove_log_and_warning_lines(output_text)
# The useful information of the exception is at the end,
# the useful information of normal output is at the begining.
output_text = output_text[:keep_len] if is_success else output_text[-keep_len:]
if '<!DOCTYPE html>' not in output_text:
output_text = output_text[:keep_len] if is_success else output_text[-keep_len:]
parsed_output.append(output_text)
return is_success, ",".join(parsed_output)
@ -268,6 +283,18 @@ class ExecuteNbCode(Action):
return outputs, success
def remove_log_and_warning_lines(input_str: str) -> str:
delete_lines = ["[warning]", "warning:", "[cv]", "[info]"]
result = "\n".join(
[
line
for line in input_str.split("\n")
if not any(dl in line.lower() for dl in delete_lines)
]
).strip()
return result
def remove_escape_and_color_codes(input_str: str):
# 使用正则表达式去除jupyter notebook输出结果中的转义字符和颜色代码
# Use regular expressions to get rid of escape characters and color codes in jupyter notebook output.

View file

@ -57,7 +57,7 @@ class WriteAnalysisCode(Action):
code = await self._debug_with_reflection(context=context, working_memory=working_memory)
else:
rsp = await self.llm.aask(context, system_msgs=[INTERPRETER_SYSTEM_MSG], **kwargs)
code = CodeParser.parse_code(text=rsp)
code = CodeParser.parse_code(text=rsp, lang="python")
return code

View file

@ -22,4 +22,4 @@ class GenerateQuestions(Action):
name: str = "GenerateQuestions"
async def run(self, context) -> ActionNode:
return await QUESTIONS.fill(context=context, llm=self.llm)
return await QUESTIONS.fill(req=context, llm=self.llm)

View file

@ -22,4 +22,4 @@ class PrepareInterview(Action):
name: str = "PrepareInterview"
async def run(self, context):
return await QUESTIONS.fill(context=context, llm=self.llm)
return await QUESTIONS.fill(req=context, llm=self.llm)

View file

@ -151,12 +151,12 @@ class WriteTasks(Action):
return task_doc
async def _run_new_tasks(self, context: str):
node = await PM_NODE.fill(context, self.llm, schema=self.prompt_schema)
node = await PM_NODE.fill(req=context, llm=self.llm, schema=self.prompt_schema)
return node
async def _merge(self, system_design_doc, task_doc) -> Document:
context = NEW_REQ_TEMPLATE.format(context=system_design_doc.content, old_task=task_doc.content)
node = await REFINED_PM_NODE.fill(context, self.llm, schema=self.prompt_schema)
node = await REFINED_PM_NODE.fill(req=context, llm=self.llm, schema=self.prompt_schema)
task_doc.content = node.instruct_content.model_dump_json()
return task_doc
@ -180,16 +180,20 @@ class WriteTasks(Action):
if design_filename:
content = await aread(filename=design_filename)
context += to_markdown_code_block(content)
node = await self._run_new_tasks(context)
file_content = node.instruct_content.model_dump_json()
if not output_pathname:
output_pathname = Path(output_pathname) / "docs" / "project_schedule.json"
output_pathname.mkdir(parents=True, exist_ok=True)
elif not Path(output_pathname).is_absolute():
output_pathname = DEFAULT_WORKSPACE_ROOT / output_pathname
output_pathname = Path(output_pathname)
await awrite(filename=output_pathname, data=file_content)
await save_json_to_markdown(content=file_content, output_filename=output_pathname.with_suffix(".md"))
async with DocsReporter(enable_llm_stream=True) as reporter:
await reporter.async_report({"type": "task"}, "meta")
node = await self._run_new_tasks(context)
file_content = node.instruct_content.model_dump_json()
if not output_pathname:
output_pathname = Path(output_pathname) / "docs" / "project_schedule.json"
output_pathname.mkdir(parents=True, exist_ok=True)
elif not Path(output_pathname).is_absolute():
output_pathname = DEFAULT_WORKSPACE_ROOT / output_pathname
output_pathname = Path(output_pathname)
await awrite(filename=output_pathname, data=file_content)
md_output_filename = output_pathname.with_suffix(".md")
await save_json_to_markdown(content=file_content, output_filename=md_output_filename)
await reporter.async_report(md_output_filename, "path")
return f'Project Schedule filename: "{str(output_pathname)}"'

View file

@ -578,7 +578,7 @@ class WriteCodeAN(Action):
async def run(self, context):
self.llm.system_prompt = "You are an outstanding engineer and can implement any code"
return await WRITE_MOVE_NODE.fill(context=context, llm=self.llm, schema="json")
return await WRITE_MOVE_NODE.fill(req=context, llm=self.llm, schema="json")
async def main():

View file

@ -229,7 +229,7 @@ class WriteCodePlanAndChange(Action):
code=await self.get_old_codes(),
)
logger.info("Writing code plan and change..")
return await WRITE_CODE_PLAN_AND_CHANGE_NODE.fill(context=context, llm=self.llm, schema="json")
return await WRITE_CODE_PLAN_AND_CHANGE_NODE.fill(req=context, llm=self.llm, schema="json")
async def get_old_codes(self) -> str:
old_codes = await self.repo.srcs.get_all()

View file

@ -211,7 +211,7 @@ class WritePRD(Action):
context = CONTEXT_TEMPLATE.format(requirements=requirement, project_name=project_name)
exclude = [PROJECT_NAME.key] if project_name else []
node = await WRITE_PRD_NODE.fill(
context=context, llm=self.llm, exclude=exclude, schema=self.prompt_schema
req=context, llm=self.llm, exclude=exclude, schema=self.prompt_schema
) # schema=schema
return node
@ -238,7 +238,7 @@ class WritePRD(Action):
async def _is_bugfix(self, context: str) -> bool:
if not self.repo.code_files_exists():
return False
node = await WP_ISSUE_TYPE_NODE.fill(context, self.llm)
node = await WP_ISSUE_TYPE_NODE.fill(req=context, llm=self.llm)
return node.get("issue_type") == "BUG"
async def get_related_docs(self, req: Document, docs: list[Document]) -> list[Document]:
@ -248,14 +248,14 @@ class WritePRD(Action):
async def _is_related(self, req: Document, old_prd: Document) -> bool:
context = NEW_REQ_TEMPLATE.format(old_prd=old_prd.content, requirements=req.content)
node = await WP_IS_RELATIVE_NODE.fill(context, self.llm)
node = await WP_IS_RELATIVE_NODE.fill(req=context, llm=self.llm)
return node.get("is_relative") == "YES"
async def _merge(self, req: Document, related_doc: Document) -> Document:
if not self.project_name:
self.project_name = Path(self.project_path).name
prompt = NEW_REQ_TEMPLATE.format(requirements=req.content, old_prd=related_doc.content)
node = await REFINED_PRD_NODE.fill(context=prompt, llm=self.llm, schema=self.prompt_schema)
node = await REFINED_PRD_NODE.fill(req=prompt, llm=self.llm, schema=self.prompt_schema)
related_doc.content = node.instruct_content.model_dump_json()
await self._rename_workspace(node)
return related_doc
@ -300,23 +300,27 @@ class WritePRD(Action):
user_requirement=to_markdown_code_block(val=user_requirement),
extra_info=to_markdown_code_block(val=extra_info),
)
req = Document(content=content)
if not legacy_prd_filename:
node = await self._new_prd(requirement=req.content)
new_prd = Document(content=node.instruct_content.model_dump_json())
else:
content = await aread(filename=legacy_prd_filename)
old_prd = Document(content=content)
new_prd = await self._merge(req=req, related_doc=old_prd)
async with DocsReporter(enable_llm_stream=True) as reporter:
await reporter.async_report({"type": "prd"}, "meta")
req = Document(content=content)
if not legacy_prd_filename:
node = await self._new_prd(requirement=req.content)
new_prd = Document(content=node.instruct_content.model_dump_json())
else:
content = await aread(filename=legacy_prd_filename)
old_prd = Document(content=content)
new_prd = await self._merge(req=req, related_doc=old_prd)
if not output_pathname:
output_pathname = DEFAULT_WORKSPACE_ROOT / "docs" / "prd.json"
output_pathname.mkdir(parents=True, exist_ok=True)
elif not Path(output_pathname).is_absolute():
output_pathname = DEFAULT_WORKSPACE_ROOT / output_pathname
output_pathname = Path(output_pathname)
await awrite(filename=output_pathname, data=new_prd.content)
competitive_analysis_filename = output_pathname.parent / f"{output_pathname.stem}-competitive-analysis"
await self._save_competitive_analysis(prd_doc=new_prd, output_filename=Path(competitive_analysis_filename))
await save_json_to_markdown(content=new_prd.content, output_filename=output_pathname.with_suffix(".md"))
if not output_pathname:
output_pathname = DEFAULT_WORKSPACE_ROOT / "docs" / "prd.json"
output_pathname.mkdir(parents=True, exist_ok=True)
elif not Path(output_pathname).is_absolute():
output_pathname = DEFAULT_WORKSPACE_ROOT / output_pathname
output_pathname = Path(output_pathname)
await awrite(filename=output_pathname, data=new_prd.content)
competitive_analysis_filename = output_pathname.parent / f"{output_pathname.stem}-competitive-analysis"
await self._save_competitive_analysis(prd_doc=new_prd, output_filename=Path(competitive_analysis_filename))
md_output_filename = output_pathname.with_suffix(".md")
await save_json_to_markdown(content=new_prd.content, output_filename=md_output_filename)
await reporter.async_report(md_output_filename, "path")
return f'PRD filename: "{str(output_pathname)}"'

View file

@ -36,4 +36,4 @@ class WriteReview(Action):
name: str = "WriteReview"
async def run(self, context):
return await WRITE_REVIEW_NODE.fill(context=context, llm=self.llm, schema="json")
return await WRITE_REVIEW_NODE.fill(req=context, llm=self.llm, schema="json")

View file

@ -13,6 +13,7 @@ from pydantic import BaseModel, model_validator
from metagpt.configs.browser_config import BrowserConfig
from metagpt.configs.embedding_config import EmbeddingConfig
from metagpt.configs.exp_pool_config import ExperiencePoolConfig
from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.configs.mermaid_config import MermaidConfig
from metagpt.configs.redis_config import RedisConfig
@ -71,6 +72,9 @@ class Config(CLIParams, YamlModel):
enable_longterm_memory: bool = False
code_review_k_times: int = 2
# Experience Pool Parameters
exp_pool: ExperiencePoolConfig = ExperiencePoolConfig()
# Will be removed in the future
metagpt_tti_url: str = ""
language: str = "English"

View file

@ -0,0 +1,9 @@
from pydantic import Field
from metagpt.utils.yaml_model import YamlModel
class ExperiencePoolConfig(YamlModel):
enable_read: bool = Field(default=False, description="Enable to read from experience pool.")
enable_write: bool = Field(default=False, description="Enable to write to experience pool.")
persist_path: str = Field(default=".chroma_exp_data", description="The persist path for experience pool.")

View file

@ -20,12 +20,6 @@ import metagpt
def get_metagpt_package_root():
"""Get the root directory of the installed package."""
package_root = Path(metagpt.__file__).parent.parent
for i in (".git", ".project_root", ".gitignore"):
if (package_root / i).exists():
break
else:
package_root = Path.cwd()
logger.info(f"Package root set to {str(package_root)}")
return package_root
@ -40,6 +34,12 @@ def get_metagpt_root():
else:
# Fallback to package root if no environment variable is set
project_root = get_metagpt_package_root()
for i in (".git", ".project_root", ".gitignore"):
if (project_root / i).exists():
break
else:
project_root = Path.cwd()
return project_root
@ -151,4 +151,4 @@ METAGPT_REPORTER_DEFAULT_URL = os.environ.get("METAGPT_REPORTER_URL", "")
AGENT = "agent"
# SWE agent
SWE_SETUP_PATH = METAGPT_ROOT / "metagpt/tools/swe_agent_commands/setup_default.sh"
SWE_SETUP_PATH = get_metagpt_package_root() / "metagpt/tools/swe_agent_commands/setup_default.sh"

View file

@ -27,7 +27,7 @@ class MGXEnv(Environment):
def publish_message(self, message: Message, user_defined_recipient: str = "", publicer: str = "") -> bool:
"""let the team leader take over message publishing"""
tl = self.get_role("Tim") # TeamLeader's name is Tim
tl = self.get_role("Mike") # TeamLeader's name is Mike
if user_defined_recipient:
# human user's direct chat message to a certain role
@ -91,7 +91,7 @@ class MGXEnv(Environment):
async def reply_to_human(self, content: str, sent_from: Role = None) -> str:
# NOTE: Can be overwritten in remote setting
return content
return "The monitor has verified the message, confirmation acknowledged. Refrain from resending duplicate messages."
def message_within_software_sop(self, message: Message) -> bool:
# Engineer, QaEngineer can be end of the SOP. Their msg requires routing outside.

View file

@ -0,0 +1,6 @@
"""Experience pool init."""
from metagpt.exp_pool.manager import exp_manager
from metagpt.exp_pool.decorator import exp_cache
__all__ = ["exp_manager", "exp_cache"]

View file

@ -0,0 +1,7 @@
"""Context builders init."""
from metagpt.exp_pool.context_builders.base import BaseContextBuilder
from metagpt.exp_pool.context_builders.simple import SimpleContextBuilder
from metagpt.exp_pool.context_builders.role_zero import RoleZeroContextBuilder
__all__ = ["BaseContextBuilder", "SimpleContextBuilder", "RoleZeroContextBuilder"]

View file

@ -0,0 +1,30 @@
"""Action Node context builder."""
from typing import Any
from metagpt.exp_pool.context_builders.base import BaseContextBuilder
ACTION_NODE_CONTEXT_TEMPLATE = """
{req}
### Experiences
-----
{exps}
-----
## Instruction
Consider **Experiences** to generate a better answer.
"""
class ActionNodeContextBuilder(BaseContextBuilder):
async def build(self, req: Any) -> str:
"""Builds the action node context string.
If there are no experiences, returns the original `req`;
otherwise returns context with `req` and formatted experiences.
"""
exps = self.format_exps()
return ACTION_NODE_CONTEXT_TEMPLATE.format(req=req, exps=exps) if exps else req

View file

@ -0,0 +1,41 @@
"""Base context builder."""
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel, ConfigDict
from metagpt.exp_pool.schema import Experience
EXP_TEMPLATE = """Given the request: {req}, We can get the response: {resp}, which scored: {score}."""
class BaseContextBuilder(BaseModel, ABC):
model_config = ConfigDict(arbitrary_types_allowed=True)
exps: list[Experience] = []
@abstractmethod
async def build(self, req: Any) -> Any:
"""Build context from req.
Do not modify `req`. If modification is necessary, use copy.deepcopy to create a copy first.
"""
def format_exps(self) -> str:
"""Format experiences into a numbered list of strings.
Example:
1. Given the request: req1, We can get the response: resp1, which scored: 8.
2. Given the request: req2, We can get the response: resp2, which scored: 9.
Returns:
str: The formatted experiences as a string.
"""
result = []
for i, exp in enumerate(self.exps, start=1):
score_val = exp.metric.score.val if exp.metric and exp.metric.score else "N/A"
result.append(f"{i}. " + EXP_TEMPLATE.format(req=exp.req, resp=exp.resp, score=score_val))
return "\n".join(result)

View file

@ -0,0 +1,56 @@
"""RoleZero context builder."""
import copy
import re
from typing import Any
from metagpt.exp_pool.context_builders.base import BaseContextBuilder
class RoleZeroContextBuilder(BaseContextBuilder):
async def build(self, req: Any) -> list[dict]:
"""Builds the role zero context string.
Note:
1. The expected format for `req`, e.g., [{...}, {"role": "user", "content": "context"}].
2. Returns the original `req` if it is empty.
3. Creates a copy of req and replaces the example content in the copied req with actual experiences.
"""
if not req:
return req
exps = self.format_exps()
if not exps:
return req
req_copy = copy.deepcopy(req)
req_copy[-1]["content"] = self.replace_example_content(req_copy[-1].get("content", ""), exps)
return req_copy
def replace_example_content(self, text: str, new_example_content: str) -> str:
return self.replace_content_between_markers(text, "# Example", "# Instruction", new_example_content)
@staticmethod
def replace_content_between_markers(text: str, start_marker: str, end_marker: str, new_content: str) -> str:
"""Replace the content between `start_marker` and `end_marker` in the text with `new_content`.
Args:
text (str): The original text.
new_content (str): The new content to replace the old content.
start_marker (str): The marker indicating the start of the content to be replaced, such as '# Example'.
end_marker (str): The marker indicating the end of the content to be replaced, such as '# Instruction'.
Returns:
str: The text with the content replaced.
"""
pattern = re.compile(f"({start_marker}\n)(.*?)(\n{end_marker})", re.DOTALL)
def replacement(match):
return f"{match.group(1)}{new_content}\n{match.group(3)}"
replaced_text = pattern.sub(replacement, text)
return replaced_text

View file

@ -0,0 +1,26 @@
"""Simple context builder."""
from typing import Any
from metagpt.exp_pool.context_builders.base import BaseContextBuilder
SIMPLE_CONTEXT_TEMPLATE = """
## Context
### Experiences
-----
{exps}
-----
## User Requirement
{req}
## Instruction
Consider **Experiences** to generate a better answer.
"""
class SimpleContextBuilder(BaseContextBuilder):
async def build(self, req: Any) -> str:
return SIMPLE_CONTEXT_TEMPLATE.format(req=req, exps=self.format_exps())

View file

@ -0,0 +1,211 @@
"""Experience Decorator."""
import asyncio
import functools
from typing import Any, Callable, Optional, TypeVar
from pydantic import BaseModel, ConfigDict, model_validator
from metagpt.config2 import config
from metagpt.exp_pool.context_builders import BaseContextBuilder, SimpleContextBuilder
from metagpt.exp_pool.manager import ExperienceManager, exp_manager
from metagpt.exp_pool.perfect_judges import BasePerfectJudge, SimplePerfectJudge
from metagpt.exp_pool.schema import Experience, Metric, QueryType, Score
from metagpt.exp_pool.scorers import BaseScorer, SimpleScorer
from metagpt.exp_pool.serializers import BaseSerializer, SimpleSerializer
from metagpt.logs import logger
from metagpt.utils.async_helper import NestAsyncio
from metagpt.utils.exceptions import handle_exception
ReturnType = TypeVar("ReturnType")
def exp_cache(
_func: Optional[Callable[..., ReturnType]] = None,
query_type: QueryType = QueryType.SEMANTIC,
manager: Optional[ExperienceManager] = None,
scorer: Optional[BaseScorer] = None,
perfect_judge: Optional[BasePerfectJudge] = None,
context_builder: Optional[BaseContextBuilder] = None,
serializer: Optional[BaseSerializer] = None,
tag: Optional[str] = None,
):
"""Decorator to get a perfect experience, otherwise, it executes the function, and create a new experience.
Note:
1. This can be applied to both synchronous and asynchronous functions.
2. The function must have a `req` parameter, and it must be provided as a keyword argument.
3. If `config.exp_pool.enable_read` is False, the decorator will just directly execute the function.
4. If `config.exp_pool.enable_write` is False, the decorator will skip evaluating and saving the experience.
Args:
_func: Just to make the decorator more flexible, for example, it can be used directly with @exp_cache by default, without the need for @exp_cache().
query_type: The type of query to be used when fetching experiences.
manager: How to fetch, evaluate and save experience, etc. Default to `exp_manager`.
scorer: Evaluate experience. Default to `SimpleScorer()`.
perfect_judge: Determines if an experience is perfect. Defaults to `SimplePerfectJudge()`.
context_builder: Build the context from exps and the function parameters. Default to `SimpleContextBuilder()`.
serializer: Serializes the request and the function's return value for storage, deserializes the stored response back to the function's return value. Defaults to `SimpleSerializer()`.
tag: An optional tag for the experience. Default to `ClassName.method_name` or `function_name`.
"""
def decorator(func: Callable[..., ReturnType]) -> Callable[..., ReturnType]:
if not config.exp_pool.enable_read:
return func
@functools.wraps(func)
async def get_or_create(args: Any, kwargs: Any) -> ReturnType:
handler = ExpCacheHandler(
func=func,
args=args,
kwargs=kwargs,
query_type=query_type,
exp_manager=manager,
exp_scorer=scorer,
exp_perfect_judge=perfect_judge,
context_builder=context_builder,
serializer=serializer,
tag=tag,
)
await handler.fetch_experiences()
if exp := await handler.get_one_perfect_exp():
return exp
await handler.execute_function()
if config.exp_pool.enable_write:
await handler.process_experience()
return handler._raw_resp
return ExpCacheHandler.choose_wrapper(func, get_or_create)
return decorator(_func) if _func else decorator
class ExpCacheHandler(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
func: Callable
args: Any
kwargs: Any
query_type: QueryType = QueryType.SEMANTIC
exp_manager: Optional[ExperienceManager] = None
exp_scorer: Optional[BaseScorer] = None
exp_perfect_judge: Optional[BasePerfectJudge] = None
context_builder: Optional[BaseContextBuilder] = None
serializer: Optional[BaseSerializer] = None
tag: Optional[str] = None
_exps: list[Experience] = None
_req: str = ""
_resp: str = ""
_raw_resp: Any = None
_score: Score = None
@model_validator(mode="after")
def initialize(self):
"""Initialize default values for optional parameters if they are None.
This is necessary because the decorator might pass None, which would override the default values set by Field.
"""
self._validate_params()
self.exp_manager = self.exp_manager or exp_manager
self.exp_scorer = self.exp_scorer or SimpleScorer()
self.exp_perfect_judge = self.exp_perfect_judge or SimplePerfectJudge()
self.context_builder = self.context_builder or SimpleContextBuilder()
self.serializer = self.serializer or SimpleSerializer()
self.tag = self.tag or self._generate_tag()
self._req = self.serializer.serialize_req(**self.kwargs)
return self
async def fetch_experiences(self):
"""Fetch experiences by query_type."""
self._exps = await self.exp_manager.query_exps(self._req, query_type=self.query_type, tag=self.tag)
async def get_one_perfect_exp(self) -> Optional[Any]:
"""Get a potentially perfect experience, and resolve resp."""
for exp in self._exps:
if await self.exp_perfect_judge.is_perfect_exp(exp, self._req, *self.args, **self.kwargs):
logger.info(f"Get one perfect experience: {exp.req[:20]}...")
return self.serializer.deserialize_resp(exp.resp)
return None
async def execute_function(self):
"""Execute the function, and save resp."""
self._raw_resp = await self._execute_function()
self._resp = self.serializer.serialize_resp(self._raw_resp)
@handle_exception
async def process_experience(self):
"""Process experience.
Evaluates and saves experience.
Use `handle_exception` to ensure robustness, do not stop subsequent operations.
"""
await self.evaluate_experience()
self.save_experience()
async def evaluate_experience(self):
"""Evaluate the experience, and save the score."""
self._score = await self.exp_scorer.evaluate(self._req, self._resp)
def save_experience(self):
"""Save the new experience."""
exp = Experience(req=self._req, resp=self._resp, tag=self.tag, metric=Metric(score=self._score))
self.exp_manager.create_exp(exp)
@staticmethod
def choose_wrapper(func, wrapped_func):
"""Choose how to run wrapped_func based on whether the function is asynchronous."""
async def async_wrapper(*args, **kwargs):
return await wrapped_func(args, kwargs)
def sync_wrapper(*args, **kwargs):
NestAsyncio.apply_once()
return asyncio.get_event_loop().run_until_complete(wrapped_func(args, kwargs))
return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
def _validate_params(self):
if "req" not in self.kwargs:
raise ValueError("`req` must be provided as a keyword argument.")
def _generate_tag(self) -> str:
"""Generates a tag for the self.func.
"ClassName.method_name" if the first argument is a class instance, otherwise just "function_name".
"""
if self.args and hasattr(self.args[0], "__class__"):
cls_name = type(self.args[0]).__name__
return f"{cls_name}.{self.func.__name__}"
return self.func.__name__
async def _build_context(self) -> str:
self.context_builder.exps = self._exps
return await self.context_builder.build(self.kwargs["req"])
async def _execute_function(self):
self.kwargs["req"] = await self._build_context()
if asyncio.iscoroutinefunction(self.func):
return await self.func(*self.args, **self.kwargs)
return self.func(*self.args, **self.kwargs)

116
metagpt/exp_pool/manager.py Normal file
View file

@ -0,0 +1,116 @@
"""Experience Manager."""
from typing import TYPE_CHECKING, Any
from pydantic import BaseModel, ConfigDict
from metagpt.config2 import Config, config
from metagpt.exp_pool.schema import (
DEFAULT_COLLECTION_NAME,
DEFAULT_SIMILARITY_TOP_K,
Experience,
QueryType,
)
from metagpt.logs import logger
from metagpt.utils.exceptions import handle_exception
if TYPE_CHECKING:
from llama_index.vector_stores.chroma import ChromaVectorStore
class ExperienceManager(BaseModel):
"""ExperienceManager manages the lifecycle of experiences, including CRUD and optimization.
Args:
config (Config): Configuration for managing experiences.
_storage (SimpleEngine): Engine to handle the storage and retrieval of experiences.
_vector_store (ChromaVectorStore): The actual place where vectors are stored.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
config: Config = config
_storage: Any = None
_vector_store: Any = None
@property
def storage(self):
if self._storage is None:
try:
from metagpt.rag.engines import SimpleEngine
from metagpt.rag.schema import ChromaRetrieverConfig, LLMRankerConfig
except ImportError:
raise ImportError("To use the experience pool, you need to install the rag module.")
retriever_configs = [
ChromaRetrieverConfig(
persist_path=self.config.exp_pool.persist_path,
collection_name=DEFAULT_COLLECTION_NAME,
similarity_top_k=DEFAULT_SIMILARITY_TOP_K,
)
]
ranker_configs = [LLMRankerConfig(top_n=DEFAULT_SIMILARITY_TOP_K)]
self._storage: SimpleEngine = SimpleEngine.from_objs(
retriever_configs=retriever_configs, ranker_configs=ranker_configs
)
logger.debug(f"exp_pool config: {self.config.exp_pool}")
return self._storage
@property
def vector_store(self):
if not self._vector_store:
self._vector_store: ChromaVectorStore = self.storage._retriever._vector_store
return self._vector_store
@handle_exception
def create_exp(self, exp: Experience):
"""Adds an experience to the storage if writing is enabled.
Args:
exp (Experience): The experience to add.
"""
if not self.config.exp_pool.enable_write:
return
self.storage.add_objs([exp])
@handle_exception(default_return=[])
async def query_exps(self, req: str, tag: str = "", query_type: QueryType = QueryType.SEMANTIC) -> list[Experience]:
"""Retrieves and filters experiences.
Args:
req (str): The query string to retrieve experiences.
tag (str): Optional tag to filter the experiences by.
query_type (QueryType): Default semantic to vector matching. exact to same matching.
Returns:
list[Experience]: A list of experiences that match the args.
"""
if not self.config.exp_pool.enable_read:
return []
nodes = await self.storage.aretrieve(req)
exps: list[Experience] = [node.metadata["obj"] for node in nodes]
# TODO: filter by metadata
if tag:
exps = [exp for exp in exps if exp.tag == tag]
if query_type == QueryType.EXACT:
exps = [exp for exp in exps if exp.req == req]
return exps
def get_exps_count(self) -> int:
"""Get the total number of experiences."""
return self.vector_store._collection.count()
exp_manager = ExperienceManager()

View file

@ -0,0 +1,6 @@
"""Perfect judges init."""
from metagpt.exp_pool.perfect_judges.base import BasePerfectJudge
from metagpt.exp_pool.perfect_judges.simple import SimplePerfectJudge
__all__ = ["BasePerfectJudge", "SimplePerfectJudge"]

View file

@ -0,0 +1,20 @@
"""Base perfect judge."""
from abc import ABC, abstractmethod
from pydantic import BaseModel, ConfigDict
from metagpt.exp_pool.schema import Experience
class BasePerfectJudge(BaseModel, ABC):
model_config = ConfigDict(arbitrary_types_allowed=True)
@abstractmethod
async def is_perfect_exp(self, exp: Experience, serialized_req: str, *args, **kwargs) -> bool:
"""Determine whether the experience is perfect.
Args:
exp (Experience): The experience to evaluate.
serialized_req (str): The serialized request to compare against the experience's request.
"""

View file

@ -0,0 +1,27 @@
"""Simple perfect judge."""
from pydantic import ConfigDict
from metagpt.exp_pool.perfect_judges.base import BasePerfectJudge
from metagpt.exp_pool.schema import MAX_SCORE, Experience
class SimplePerfectJudge(BasePerfectJudge):
model_config = ConfigDict(arbitrary_types_allowed=True)
async def is_perfect_exp(self, exp: Experience, serialized_req: str, *args, **kwargs) -> bool:
"""Determine whether the experience is perfect.
Args:
exp (Experience): The experience to evaluate.
serialized_req (str): The serialized request to compare against the experience's request.
Returns:
bool: True if the serialized request matches the experience's request and the experience's score is perfect, False otherwise.
"""
if not exp.metric or not exp.metric.score:
return False
return serialized_req == exp.req and exp.metric.score.val == MAX_SCORE

View file

@ -0,0 +1,72 @@
"""Experience schema."""
from enum import Enum
from typing import Optional
from pydantic import BaseModel, Field
MAX_SCORE = 10
DEFAULT_COLLECTION_NAME = "experience_pool"
DEFAULT_SIMILARITY_TOP_K = 2
class QueryType(str, Enum):
"""Type of query experiences."""
EXACT = "exact"
SEMANTIC = "semantic"
class ExperienceType(str, Enum):
"""Experience Type."""
SUCCESS = "success"
FAILURE = "failure"
INSIGHT = "insight"
class EntryType(Enum):
"""Experience Entry Type."""
AUTOMATIC = "Automatic"
MANUAL = "Manual"
class Score(BaseModel):
"""Score in Metric."""
val: int = Field(default=1, description="Value of the score, Between 1 and 10, higher is better.")
reason: str = Field(default="", description="Reason for the value.")
class Metric(BaseModel):
"""Experience Metric."""
time_cost: float = Field(default=0.000, description="Time cost, the unit is milliseconds.")
money_cost: float = Field(default=0.000, description="Money cost, the unit is US dollars.")
score: Score = Field(default=None, description="Score, with value and reason.")
class Trajectory(BaseModel):
"""Experience Trajectory."""
plan: str = Field(default="", description="The plan.")
action: str = Field(default="", description="Action for the plan.")
observation: str = Field(default="", description="Output of the action.")
reward: int = Field(default=0, description="Measure the action.")
class Experience(BaseModel):
"""Experience."""
req: str = Field(..., description="")
resp: str = Field(..., description="The type is string/json/code.")
metric: Optional[Metric] = Field(default=None, description="Metric.")
exp_type: ExperienceType = Field(default=ExperienceType.SUCCESS, description="The type of experience.")
entry_type: EntryType = Field(default=EntryType.AUTOMATIC, description="Type of entry: Manual or Automatic.")
tag: str = Field(default="", description="Tagging experience.")
traj: Optional[Trajectory] = Field(default=None, description="Trajectory.")
def rag_key(self):
return self.req

View file

@ -0,0 +1,6 @@
"""Scorers init."""
from metagpt.exp_pool.scorers.base import BaseScorer
from metagpt.exp_pool.scorers.simple import SimpleScorer
__all__ = ["BaseScorer", "SimpleScorer"]

View file

@ -0,0 +1,15 @@
"""Base scorer."""
from abc import ABC, abstractmethod
from pydantic import BaseModel, ConfigDict
from metagpt.exp_pool.schema import Score
class BaseScorer(BaseModel, ABC):
model_config = ConfigDict(arbitrary_types_allowed=True)
@abstractmethod
async def evaluate(self, req: str, resp: str) -> Score:
"""Evaluates the quality of a response relative to a given request."""

View file

@ -0,0 +1,65 @@
"""Simple scorer."""
import json
from pydantic import Field
from metagpt.exp_pool.schema import Score
from metagpt.exp_pool.scorers.base import BaseScorer
from metagpt.llm import LLM
from metagpt.provider.base_llm import BaseLLM
from metagpt.utils.common import CodeParser
SIMPLE_SCORER_TEMPLATE = """
Role: You are a highly efficient assistant, tasked with evaluating a response to a given request. The response is generated by a large language model (LLM).
I will provide you with a request and a corresponding response. Your task is to assess this response and provide a score from a human perspective.
## Context
### Request
{req}
### Response
{resp}
## Format Example
```json
{{
"val": "the value of the score, int from 1 to 10, higher is better.",
"reason": "an explanation supporting the score."
}}
```
## Instructions
- Understand the request and response given by the user.
- Evaluate the response based on its quality relative to the given request.
- Provide a score from 1 to 10, where 10 is the best.
- Provide a reason supporting your score.
## Constraint
Format: Just print the result in json format like **Format Example**.
## Action
Follow instructions, generate output and make sure it follows the **Constraint**.
"""
class SimpleScorer(BaseScorer):
llm: BaseLLM = Field(default_factory=LLM)
async def evaluate(self, req: str, resp: str) -> Score:
"""Evaluates the quality of a response relative to a given request, as scored by an LLM.
Args:
req (str): The request.
resp (str): The response.
Returns:
Score: An object containing the score (1-10) and the reasoning.
"""
prompt = SIMPLE_SCORER_TEMPLATE.format(req=req, resp=resp)
resp = await self.llm.aask(prompt)
resp_json = json.loads(CodeParser.parse_code(resp, lang="json"))
return Score(**resp_json)

View file

@ -0,0 +1,9 @@
"""Serializers init."""
from metagpt.exp_pool.serializers.base import BaseSerializer
from metagpt.exp_pool.serializers.simple import SimpleSerializer
from metagpt.exp_pool.serializers.action_node import ActionNodeSerializer
from metagpt.exp_pool.serializers.role_zero import RoleZeroSerializer
__all__ = ["BaseSerializer", "SimpleSerializer", "ActionNodeSerializer", "RoleZeroSerializer"]

View file

@ -0,0 +1,36 @@
"""ActionNode Serializer."""
from __future__ import annotations
from typing import TYPE_CHECKING, Type
# Import ActionNode only for type checking to avoid circular imports
if TYPE_CHECKING:
from metagpt.actions.action_node import ActionNode
from metagpt.exp_pool.serializers.simple import SimpleSerializer
class ActionNodeSerializer(SimpleSerializer):
def serialize_resp(self, resp: ActionNode) -> str:
return resp.instruct_content.model_dump_json()
def deserialize_resp(self, resp: str) -> ActionNode:
"""Customized deserialization, it will be triggered when a perfect experience is found.
ActionNode cannot be serialized, it throws an error 'cannot pickle 'SSLContext' object'.
"""
class InstructContent:
def __init__(self, json_data):
self.json_data = json_data
def model_dump_json(self):
return self.json_data
from metagpt.actions.action_node import ActionNode
action_node = ActionNode(key="", expected_type=Type[str], instruction="", example="")
action_node.instruct_content = InstructContent(resp)
return action_node

View file

@ -0,0 +1,29 @@
"""Base serializer."""
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel, ConfigDict
class BaseSerializer(BaseModel, ABC):
model_config = ConfigDict(arbitrary_types_allowed=True)
@abstractmethod
def serialize_req(self, **kwargs) -> str:
"""Serializes the request for storage.
Do not modify kwargs. If modification is necessary, use copy.deepcopy to create a copy first.
Note that copy.deepcopy may raise errors, such as TypeError: cannot pickle '_thread.RLock' object.
"""
@abstractmethod
def serialize_resp(self, resp: Any) -> str:
"""Serializes the function's return value for storage.
Do not modify resp. The rest is the same as `serialize_req`.
"""
@abstractmethod
def deserialize_resp(self, resp: str) -> Any:
"""Deserializes the stored response back to the function's return value"""

View file

@ -0,0 +1,58 @@
"""RoleZero Serializer."""
import copy
import json
from metagpt.exp_pool.serializers.simple import SimpleSerializer
class RoleZeroSerializer(SimpleSerializer):
def serialize_req(self, **kwargs) -> str:
"""Serialize the request for database storage, ensuring it is a string.
Only extracts the necessary content from `req` because `req` may be very lengthy and could cause embedding errors.
Args:
req (list[dict]): The request to be serialized. Example:
[
{"role": "user", "content": "..."},
{"role": "assistant", "content": "..."},
{"role": "user", "content": "context"},
]
Returns:
str: The serialized request as a JSON string.
"""
req = kwargs.get("req", [])
if not req:
return ""
filtered_req = self._filter_req(req)
if state_data := kwargs.get("state_data"):
filtered_req.append({"role": "user", "content": state_data})
return json.dumps(filtered_req)
def _filter_req(self, req: list[dict]) -> list[dict]:
"""Filter the `req` to include only necessary items.
Args:
req (list[dict]): The original request.
Returns:
list[dict]: The filtered request.
"""
filtered_req = [copy.deepcopy(item) for item in req if self._is_useful_content(item["content"])]
return filtered_req
def _is_useful_content(self, content: str) -> bool:
"""Currently, only the content of the file is considered, and more judgments can be added later."""
if "Command Editor.read executed: file_path" in content:
return True
return False

View file

@ -0,0 +1,22 @@
"""Simple Serializer."""
from typing import Any
from metagpt.exp_pool.serializers.base import BaseSerializer
class SimpleSerializer(BaseSerializer):
def serialize_req(self, **kwargs) -> str:
"""Just use `str` to convert the request object into a string."""
return str(kwargs.get("req", ""))
def serialize_resp(self, resp: Any) -> str:
"""Just use `str` to convert the response object into a string."""
return str(resp)
def deserialize_resp(self, resp: str) -> Any:
"""Just return the string response as it is."""
return resp

View file

@ -5,7 +5,7 @@
import json
import re
from unidiff import PatchedFile, PatchSet
from unidiff import PatchSet
from metagpt.actions.action import Action
from metagpt.ext.cr.utils.cleaner import (
@ -19,6 +19,7 @@ from metagpt.utils.common import parse_json_code_block
CODE_REVIEW_PROMPT_TEMPLATE = """
NOTICE
Let's think and work step by step.
With the given pull-request(PR) Patch, and referenced Points(Code Standards), you should compare each point with the code one-by-one.
The Patch code has added line number at the first character each line for reading, but the review should focus on new added code inside the `Patch` (lines starting with line number and '+').
@ -52,12 +53,13 @@ CodeReview guidelines:
- Don't suggest to add docstring unless it's necessary indeed.
- If the same code error occurs multiple times, it cannot be omitted, and all places need to be identified.But Don't duplicate at the same place with the same comment!
- Every line of code in the patch needs to be carefully checked, and laziness cannot be omitted. It is necessary to find out all the places.
- The `comment` and `point_id` in the Output must correspond to and belong to the same one `Point`.
Just print the PR Patch comments in json format like **Output Format**.
"""
CODE_REVIEW_COMFIRM_SYSTEM_PROMPT = """
You are a professional engineer with Java stack, and good at code review comment result judgement.
You are a professional engineer with {code_language} stack, and good at code review comment result judgement.Let's think and work step by step.
"""
CODE_REVIEW_COMFIRM_TEMPLATE = """
@ -76,7 +78,7 @@ CODE_REVIEW_COMFIRM_TEMPLATE = """
## Your Task:
1. First, check if the code meets the requirements and does not violate any defects. If it meets the requirements and does not violate any defects, print `False` and do not proceed with further judgment.
2. If the check in step 1 does not print `False`, proceed to further judgment. Based on the "Reference Example for Judgment" provided, determine if the "Code" and "Code Review Comments" match. If they match, print `True`; otherwise, print `False`.
2. Based on the `Reference Example for Judgment` provided, determine if the `Code` and `Code Review Comments` match. If they match, print `True`; otherwise, print `False`.
Note: Your output should only be `True` or `False` without any explanations.
"""
@ -89,25 +91,35 @@ class CodeReview(Action):
new_comments = []
logger.debug(f"original comments: {comments}")
for cmt in comments:
for p in points:
if int(cmt.get("point_id", -1)) == p.id:
code_start_line = cmt.get("code_start_line")
code_end_line = cmt.get("code_end_line")
code = get_code_block_from_patch(patch, code_start_line, code_end_line)
try:
if cmt.get("commented_file").endswith(".py"):
points = [p for p in points if p.language == "Python"]
elif cmt.get("commented_file").endswith(".java"):
points = [p for p in points if p.language == "Java"]
else:
continue
for p in points:
point_id = int(cmt.get("point_id", -1))
if point_id == p.id:
code_start_line = cmt.get("code_start_line")
code_end_line = cmt.get("code_end_line")
code = get_code_block_from_patch(patch, code_start_line, code_end_line)
new_comments.append(
{
"commented_file": cmt.get("commented_file"),
"code": code,
"code_start_line": code_start_line,
"code_end_line": code_end_line,
"comment": cmt.get("comment"),
"point_id": p.id,
"point": p.text,
"point_detail": p.detail,
}
)
break
new_comments.append(
{
"commented_file": cmt.get("commented_file"),
"code": code,
"code_start_line": code_start_line,
"code_end_line": code_end_line,
"comment": cmt.get("comment"),
"point_id": p.id,
"point": p.text,
"point_detail": p.detail,
}
)
break
except Exception:
pass
logger.debug(f"new_comments: {new_comments}")
return new_comments
@ -132,51 +144,49 @@ class CodeReview(Action):
code = get_code_block_from_patch(
patch, str(max(1, int(code_start_line) - 5)), str(int(code_end_line) + 5)
)
code_language = "Java"
code_file_ext = cmt.get("commented_file", ".java").split(".")[-1]
if code_file_ext == ".java":
code_language = "Java"
elif code_file_ext == ".py":
code_language = "Python"
prompt = CODE_REVIEW_COMFIRM_TEMPLATE.format(
code=code,
comment=cmt.get("comment"),
desc=point.text,
example=point.yes_example + "\n" + point.no_example,
)
resp = await self.llm.aask(prompt, system_msgs=[CODE_REVIEW_COMFIRM_SYSTEM_PROMPT])
system_prompt = [CODE_REVIEW_COMFIRM_SYSTEM_PROMPT.format(code_language=code_language)]
resp = await self.llm.aask(prompt, system_msgs=system_prompt)
if "True" in resp or "true" in resp:
new_comments.append(cmt)
logger.info(f"original comments num: {len(comments)}, confirmed comments num: {len(new_comments)}")
return new_comments
async def cr_by_full_points(self, patch: PatchSet, points: list[Point]):
async def cr_by_points(self, patch: PatchSet, points: list[Point]):
comments = []
points_str = "\n".join([f"{p.id} {p.text}" for p in points])
for patched_file in patch:
if not patched_file:
continue
if patched_file.path.endswith(".py"):
points_str = "\n".join([f"{p.id} {p.text}" for p in points if p.language == "Python"])
points = [p for p in points if p.language == "Python"]
elif patched_file.path.endswith(".java"):
points_str = "\n".join([f"{p.id} {p.text}" for p in points if p.language == "Java"])
points = [p for p in points if p.language == "Java"]
else:
continue
if len(str(patched_file).splitlines()) >= 50:
cr_by_segment_points_comments = await self.cr_by_segment_points(
patched_file=patched_file, points=points
)
comments += cr_by_segment_points_comments
continue
prompt = CODE_REVIEW_PROMPT_TEMPLATE.format(patch=str(patched_file), points=points_str)
resp = await self.llm.aask(prompt)
json_str = parse_json_code_block(resp)[0]
comments += json.loads(json_str)
return comments
async def cr_by_segment_points(self, patched_file: PatchedFile, points: list[Point]):
comments = []
group_points = [points[i : i + 3] for i in range(0, len(points), 3)]
for group_point in group_points:
points_str = "\n".join([f"{p.id} {p.text}" for p in group_point])
prompt = CODE_REVIEW_PROMPT_TEMPLATE.format(patch=str(patched_file), points=points_str)
resp = await self.llm.aask(prompt)
json_str = parse_json_code_block(resp)[0]
comments_batch = json.loads(json_str)
comments += comments_batch
group_points = [points[i : i + 3] for i in range(0, len(points), 3)]
for group_point in group_points:
points_str = "id description\n"
points_str += "\n".join([f"{p.id} {p.text}" for p in group_point])
prompt = CODE_REVIEW_PROMPT_TEMPLATE.format(patch=str(patched_file), points=points_str)
resp = await self.llm.aask(prompt)
json_str = parse_json_code_block(resp)[0]
comments_batch = json.loads(json_str)
if comments_batch:
patched_file_path = patched_file.path
for c in comments_batch:
c["commented_file"] = patched_file_path
comments += comments_batch
return comments
@ -185,7 +195,7 @@ class CodeReview(Action):
patch: PatchSet = add_line_num_on_patch(patch)
result = []
comments = await self.cr_by_full_points(patch=patch, points=points)
comments = await self.cr_by_points(patch=patch, points=points)
if len(comments) != 0:
comments = self.format_comments(comments, points, patch)
comments = await self.confirm_comments(patch=patch, comments=comments, points=points)

View file

@ -81,17 +81,18 @@ class ModifyCode(Action):
}
resp = None
for patched_file in patch:
patch_target_file_name = str(patched_file.target_file).split("/", maxsplit=1)[-1]
if patch_target_file_name not in grouped_comments:
patch_target_file_name = str(patched_file.path).split("/")[-1]
if patched_file.path not in grouped_comments:
continue
comments_prompt = ""
index = 1
for grouped_comment in grouped_comments[patch_target_file_name]:
for grouped_comment in grouped_comments[patched_file.path]:
comments_prompt += f"""
<comment{index}>
{grouped_comment}
</comment{index}>\n
"""
index += 1
prompt = MODIFY_CODE_PROMPT.format(patch=patched_file, comments=comments_prompt)
output_dir = (
Path(output_dir)

File diff suppressed because it is too large Load diff

View file

@ -1,44 +1,26 @@
CMD_PROMPT = """
# Data Structure
class Task(BaseModel):
task_id: str = ""
dependent_task_ids: list[str] = []
instruction: str = ""
task_type: str = ""
assignee: str = "David"
from metagpt.strategy.task_type import TaskType
# Available Commands
{available_commands}
# Current Plan
{plan_status}
# Example
{example}
# Instructions
Based on the context, write a plan or modify an existing plan to achieve the goal. A plan consists of one to 3 tasks.
If plan is created, you should track the progress and update the plan accordingly, such as finish_current_task, append_task, reset_task, replace_task, etc.
Pay close attention to new user message, review the conversation history, use reply_to_human to respond to new user requirement.
Note:
1. If you keeping encountering errors, unexpected situation, or you are not sure of proceeding, use ask_human to ask for help.
2. Carefully review your progress at the current task, if your actions so far has not fulfilled the task instruction, you should continue with current task. Otherwise, finish current task.
3. Each time you finish a task, use reply_to_human to report your progress.
Pay close attention to the Example provided, you can reuse the example for your current situation if it fits.
You may use any of the available commands to create a plan or update the plan. You may output mutiple commands, they will be executed sequentially.
If you finish current task, you will automatically take the next task in the existing plan, use finish_task, DON'T append a new task.
# Your commands in a json array, in the following output format, always output a json array, if there is nothing to do, use the pass command:
Some text indicating your thoughts, such as how you should update the plan status, respond to inquiry, or seek for help. Then a json array of commands.
```json
[
{{
"command_name": str,
"args": {{"arg_name": arg_value, ...}}
}},
...
]
```
Notice: your output JSON data section must start with **```json [**
BROWSER_INSTRUCTION = """
4. Carefully choose to use or not use the browser tool to assist you in web tasks.
- When no click action is required, no need to use the browser tool to navigate to the webpage before scraping.
- If you need detail HTML content, write code to get it but not to use the browser tool.
- Make sure the command_name are certainly in Available Commands when you use the browser tool.
"""
TASK_TYPE_DESC = "\n".join([f"- **{tt.type_name}**: {tt.value.desc}" for tt in TaskType])
CODE_STATUS = """
**Code written**:
{code}
**Execution status**: {status}
**Execution result**: {result}
"""
BROWSER_INFO = """
Here are ordered web actions in the browser environment, note that you can not use the browser tool in the current environment.
{browser_actions}
The latest url is the one you should use to view the page. If view page has been done, directly use the variable and html content in executing result.
"""

View file

@ -11,6 +11,7 @@ EXTRA_INSTRUCTION = """
11. Write out EVERY CODE DETAIL, DON'T LEAVE TODO.
12. To modify code in a file, read the entire file, make changes, and update the file with the complete code, ensuring that no line numbers are included in the final write.
13. When a system design or project schedule is provided, at the end of the plan, add a CodeRview Task for each file; for example, if there are three files, add three CodeRview Tasks. For each CodeRview Task, just call ReviewAndRewriteCode.run.
14. When you are making plan.it is hightly recommand to plan all the coding plan and reviews plan in first response.
"""

View file

@ -5,10 +5,12 @@ When presented a current task, tackle the task using the available commands.
Pay close attention to new user message, review the conversation history, use RoleZero.reply_to_human to respond to new user requirement.
Note:
1. If you keeping encountering errors, unexpected situation, or you are not sure of proceeding, use RoleZero.ask_human to ask for help.
2. Carefully review your progress at the current task, if your actions so far has not fulfilled the task instruction, you should continue with current task. Otherwise, finish current task.
2. Carefully review your progress at the current task, if your actions so far has not fulfilled the task instruction, you should continue with current task. Otherwise, finish current task by Plan.finish_current_task explicitly.
3. Each time you finish a task, use RoleZero.reply_to_human to report your progress.
4. Don't forget to append task first when all existing tasks are finished and new tasks are required.
5. Avoid repeating tasks you have already completed. And end loop when all requirements are met.
"""
# To ensure compatibility with hard-coded experience, do not add any other content between "# Example" and "# Available Commands".
CMD_PROMPT = """
# Data Structure
class Task(BaseModel):
@ -17,6 +19,9 @@ class Task(BaseModel):
instruction: str = ""
task_type: str = ""
assignee: str = ""
# Available Task Types
{task_type_desc}
# Available Commands
{available_commands}
@ -37,9 +42,10 @@ Special Command: Use {{"command_name": "end"}} to do nothing or indicate complet
Pay close attention to the Example provided, you can reuse the example for your current situation if it fits.
You may use any of the available commands to create a plan or update the plan. You may output mutiple commands, they will be executed sequentially.
If you finish current task, you will automatically take the next task in the existing plan, use Plan.finish_task, DON'T append a new task.
Pay close attention to what you have done. Be different with your previous action.
# Your commands in a json array, in the following output format. If there is nothing to do, use the pass or end command:
Some text indicating your thoughts, such as how you should update the plan status, respond to inquiry, or seek for help. Then a json array of commands. You must output ONE and ONLY ONE json array. DON'T output multiple json arrays with thoughts between them.
# Your commands in a json array, in the following output format with correct command_name and args. If there is nothing to do, use the pass or end command:
Some text indicating your thoughts before JSON is required, such as what tasks have been completed, what tasks are next, how you should update the plan status, respond to inquiry, or seek for help. Then a json array of commands. You must output ONE and ONLY ONE json array. DON'T output multiple json arrays with thoughts between them.
```json
[
{{
@ -49,6 +55,7 @@ Some text indicating your thoughts, such as how you should update the plan statu
...
]
```
Notice: your output JSON data must be a command list.
Notice: your output JSON data section must start with **```json [**
"""
@ -58,9 +65,9 @@ JSON_REPAIR_PROMPT = """
## Output Format
```json
Formatted JSON data
```
Help check if there are any formatting issues with the JSON data? If so, please help format it
Help check if there are any formatting issues with the JSON data? If so, please help format it.
"""
QUICK_THINK_PROMPT = """

View file

@ -7,7 +7,7 @@ https://github.com/princeton-nlp/SWE-agent/tree/main/config/configs
SWE_AGENT_SYSTEM_TEMPLATE = """
SETTING: You are an autonomous programmer, and you're working directly in the environment line with a special interface.
The special interface consists of a file editor that shows you {WINDOW} lines of a file at a time.
The special interface consists of a file editor that shows you 100 lines of a file at a time.
Please note that THE EDIT COMMAND REQUIRES PROPER INDENTATION. Pay attention to the original indentation when replacing the function.
If you'd like to add the line ' print(x)' you must fully write that out, with all those spaces before the code! Indentation is important and code that is not indented correctly will fail and require fixing before it can be run.
@ -50,7 +50,8 @@ MINIMAL_EXAMPLE = """
## Example of a actions trajectory
User Requirement and Issue: Fix the bug in the repo. Because the environment is not available, you DO NOT need to run and modify any existing test case files or add new test case files to ensure that the bug is fixed.
### Read and understand issue(Require):
### Read and understand issue:
Thought: Firstly, I need to review the detailed information of this issue in order to understand the problem that needs fixing.
{{
"command_name": "Browser.goto",
"args": {{
@ -60,43 +61,99 @@ User Requirement and Issue: Fix the bug in the repo. Because the environment is
->
### Locate issue(Require): Locate the issue in the code by searching for the relevant file, function, or class and open the file to view the code.
Thought: I need to come under the repo path
{{
"command_name": "Bash.run",
"args": {{
"cmd": "cd /workspace/django__django_3.0"
"cmd": "cd /workspace/MetaGPT"
}}
}}
->
Bash.run(cmd='search_dir_and_preview ASCIIUsernameValidator')
Thought: Let's start by locating the `openai_api.py` file.\nFirst, let's search for the `openai_api.py` file.
{{
"command_name": "Bash.run",
"args": {{
"cmd": "open /workspace/django__django_3.0/django/contrib/auth/validators.py"
"cmd": "find_file 'openai_api.py'"
}}
}}
->
Thought: We have located both the `openai_api.py` file. Let's start by opening the `openai_api.py` file to apply the necessary changes.",
{{
"command_name": "Bash.run",
"args": {{
"cmd": "open '/workspace/MetaGPT/provider/openai_api.py'"
}}
}}
->
### Fix the Bug(Require): Fix the bug in the code by editing the relevant function, class or code snippet.
Thought: Now that I've found the bug, let's fix it by edit.
{{
"command_name": "Bash.run",
"args": {{
"cmd": "edit 10:20 <<EOF\n regex = r'\A[\w.@+-]+\Z'\n message = _( \n 'Enter a valid username. This value may contain only English letters, ' \n 'numbers, and @/./+/-/_ characters.'\n )\n flags = re.ASCII\n\n@deconstructible\nclass UnicodeUsernameValidator(validators.RegexValidator):\n regex = r'\A[\w.@+-]+\Z'\nEOF"
"cmd": "edit 93:95 <<EOF\n usage = None\n collected_messages = []\n async for chunk in response:\n if chunk.usage is not None:\n usage = CompletionUsage(**chunk.usage)\n chunk_message = chunk.choices[0].delta.content or '' if chunk.choices else '' # extract the message\n finish_reason = (\n chunk.choices[0].finish_reason if chunk.choices and hasattr(chunk.choices[0], 'finish_reason') else None\n )\n log_llm_stream(chunk_message)\nEOF"
}}
}}
->
Thought: Due to a syntax error related to an undefined name 'Image', we need to address this issue even though it is not directly related to our work. Let's try importing the package to fix it.
{{
"command_name": "Bash.run",
"args": {{
"cmd": "edit 14:14 <<EOF\nfrom PIL.Image import Image\nEOF"
}}
}}
->
### Submit the Changes(Require): Submit the changes to the repository.
### Save the Changes (Required): After all changes have been made, save them to the repository.
> You must choose one of the following two methods.
#### Just save the changes locally, it only need one action.
Thought: The bug has been fixed. Let's submit the changes.
{{
"command_name": "Bash.run",
"args": {{
"cmd": "submit"
}}
}}
Bash.run(cmd='submit')
->
#### Save the changes and commit them to the remote repository.
##### Push the changes from the local repository to the remote repository.
Thought: All changes have been saved, let's push the code to the remote repository.
{{
"command_name": "end",
"command_name": "git_push",
"args": {{
"local_path": "/workspace/MetaGPT",
"app_name": "github",
"comments": "Fix Issue #1275: produced TypeError: openai.types.completion_usage.CompletionUsage() argument after ** must be a mapping, not NoneType"",
"new_branch": "test-fix"
}}
}}
->
##### Create a pull request (Optional): Merge the changes from the new branch into the master branch.
Thought: Now that the changes have been pushed to the remote repository, due to the user's requirement, let's create a pull request to merge the changes into the master branch.
[{{
"command_name": "git_create_pull",
"args": {{
"base": "master",
"head": "test-fix",
"base_repo_name": "garylin2099/MetaGPT",
"head_repo_name": "seeker-jie/MetaGPT",
"app_name": "github",
"title": "Fix Issue #1275: produced TypeError: openai.types.completion_usage.CompletionUsage() argument after ** must be a mapping, not NoneType"",
"body": "This pull request addresses issue #1275 by ensuring that chunk.usage is not None before passing it to CompletionUsage."
}}
}}]
->
### Finally
Thought: All task has been done, let's end the conversation.
{{
"command_name": "end"
}}
"""
@ -162,7 +219,10 @@ IMPORTANT_TIPS = """
- If a search command fails, modify the search criteria and check for typos or incorrect paths, then try again.
- Based on feedback of observation or bash command in trajectory to guide adjustments in your search strategy.
13. If the task results in succeed, fail, or NO PROGRESS, output `submit`.
13. Save the code change:
- If you need to submit changes to the remote repository, first use the regular git commit command to save the changes locally, then select a command from the `Available Commands: [git_push, git_create_pull]` to submit the changes to the remote repository.
- If you don't need to submit code changes to the remote repository. use the command Bash.run('submit') to commit the changes locally.
14. If provided an issue link, you MUST go to the issue page using Browser tool to understand the issue before starting your fix.
@ -185,5 +245,4 @@ The current bash state is:
(Current directory: {{working_dir}})
Avoid repeating the same command. Instead, please think about the current situation and provide the next bash command to execute in JSON format:"
"""

View file

@ -28,7 +28,10 @@ your code
```
"""
REFLECTION_SYSTEM_MSG = """You are an AI Python assistant. You will be given your previous implementation code of a task, runtime error results, and a hint to change the implementation appropriately. Write your full implementation."""
REFLECTION_SYSTEM_MSG = """
You are an AI Python assistant. You will be given your previous implementation code of a task, runtime error results, and a hint to change the implementation appropriately. Write your full implementation.
When occuring ModuleNotFoundError, always install the required package. And use Terminal tool if available.
"""
DEBUG_REFLECTION_EXAMPLE = '''
[previous impl]:

View file

@ -53,3 +53,9 @@ The current task is about converting image into webpage code. please note the fo
- Single-Step Code Generation: Execute the entire code generation process in a single step, encompassing HTML, CSS, and JavaScript. Avoid fragmenting the code generation into multiple separate steps to maintain consistency and simplify the development workflow.
- Save webpages: Be sure to use the save method provided.
"""
# Prompt for taking on "web_scraping" tasks
WEB_SCRAPING_PROMPT = """
- Remember to view and print the necessary HTML content in a separate task to understand the structure first before scraping data. Such as `html_content = await view_page_element_to_scrape(...)\nprint(html_content)`.
- Since the data required by user may not correspond directly to the actual HTML element names, you should thoroughly analyze the HTML structure and meanings of all elements in the executing result first. Ensure the `class_` in your code should derived from the actual HTML structure directly, not based on your knowledge. To ensure it, analyse the most suitable location of the 'class_' in the actual HTML content before code.
"""

View file

@ -1,151 +1,111 @@
from __future__ import annotations
import json
from typing import Literal
import re
from typing import List
from pydantic import model_validator
from pydantic import Field, model_validator
from metagpt.actions import Action
from metagpt.actions.di.execute_nb_code import ExecuteNbCode
from metagpt.actions.di.write_analysis_code import WriteAnalysisCode
from metagpt.logs import logger
from metagpt.prompts.di.data_analyst import CMD_PROMPT
from metagpt.prompts.di.role_zero import JSON_REPAIR_PROMPT
from metagpt.roles.di.data_interpreter import DataInterpreter
from metagpt.schema import Message, TaskResult
from metagpt.strategy.experience_retriever import KeywordExpRetriever
from metagpt.strategy.planner import Planner
from metagpt.strategy.thinking_command import (
Command,
prepare_command_prompt,
run_commands,
)
from metagpt.tools.tool_recommend import BM25ToolRecommender
from metagpt.utils.common import CodeParser
from metagpt.utils.report import ThoughtReporter
from metagpt.utils.repair_llm_raw_output import repair_llm_raw_output, RepairType
from metagpt.prompts.di.data_analyst import BROWSER_INSTRUCTION, TASK_TYPE_DESC, CODE_STATUS, BROWSER_INFO
from metagpt.prompts.di.role_zero import ROLE_INSTRUCTION
from metagpt.roles.di.role_zero import RoleZero
from metagpt.schema import TaskResult, Message
from metagpt.strategy.experience_retriever import ExpRetriever, KeywordExpRetriever
from metagpt.tools.tool_recommend import BM25ToolRecommender, ToolRecommender
from metagpt.tools.tool_registry import register_tool
class DataAnalyst(DataInterpreter):
@register_tool(include_functions=["write_and_exec_code"])
class DataAnalyst(RoleZero):
name: str = "David"
profile: str = "DataAnalyst"
goal: str = "Take on any data-related tasks, such as data analysis, machine learning, deep learning, web browsing, web scraping, web searching, web deployment, terminal operation, git and github operation, etc."
react_mode: Literal["react"] = "react"
max_react_loop: int = 20 # used for react mode
task_result: TaskResult = None
available_commands: list[Command] = [
Command.APPEND_TASK,
Command.RESET_TASK,
Command.REPLACE_TASK,
Command.FINISH_CURRENT_TASK,
# Command.PUBLISH_MESSAGE,
Command.ASK_HUMAN,
Command.REPLY_TO_HUMAN,
# Command.PASS,
]
commands: list[dict] = [] # issued commands to be executed
user_requirement: str = ""
instruction: str = ROLE_INSTRUCTION + BROWSER_INSTRUCTION
task_type_desc: str = TASK_TYPE_DESC
tools: list[str] = ["Plan", "DataAnalyst", "RoleZero", "Browser"]
custom_tools: list[str] = ["machine learning", "web scraping", "Terminal"]
custom_tool_recommender: ToolRecommender = None
experience_retriever: ExpRetriever = KeywordExpRetriever()
use_reflection: bool = True
write_code: WriteAnalysisCode = Field(default_factory=WriteAnalysisCode, exclude=True)
execute_code: ExecuteNbCode = Field(default_factory=ExecuteNbCode, exclude=True)
@model_validator(mode="after")
def set_plan_and_tool(self) -> "DataInterpreter":
# We force using this parameter for DataAnalyst
assert self.react_mode == "react"
assert self.auto_run
assert self.use_plan
def set_custom_tool(self):
if self.custom_tools and not self.custom_tool_recommender:
self.custom_tool_recommender = BM25ToolRecommender(tools=self.custom_tools)
# Roughly the same part as DataInterpreter.set_plan_and_tool
self._set_react_mode(react_mode=self.react_mode, max_react_loop=self.max_react_loop, auto_run=self.auto_run)
if self.tools and not self.tool_recommender:
self.tool_recommender = BM25ToolRecommender(tools=self.tools)
self.set_actions([WriteAnalysisCode])
def _update_tool_execution(self):
self.tool_execution_map.update({
"DataAnalyst.write_and_exec_code": self.write_and_exec_code,
})
# HACK: Init Planner, control it through dynamic thinking; Consider formalizing as a react mode
self.planner = Planner(goal="", working_memory=self.rc.working_memory, auto_run=True)
async def parse_browser_actions(self, memory: List[Message]) -> List[Message]:
memory = await super().parse_browser_actions(memory)
browser_actions = []
for index, msg in enumerate(memory):
if msg.cause_by == "browser":
browser_url = re.search('URL: (.*?)\\n', msg.content).group(1)
pattern = re.compile(r"Command Browser\.(\w+) executed")
browser_actions.append({
'command': pattern.match(memory[index - 1].content).group(1),
'current url': browser_url
})
if browser_actions:
browser_actions = BROWSER_INFO.format(browser_actions=browser_actions)
self.rc.working_memory.add(Message(content=browser_actions, role="user", cause_by="browser"))
return memory
return self
async def write_and_exec_code(self):
"""Write a code block for current task and execute it in an interactive notebook environment."""
counter = 0
success = False
await self.execute_code.init_code()
async def _think(self) -> bool:
"""Useful in 'react' mode. Use LLM to decide whether and what to do next."""
self._set_state(0)
example = ""
if not self.planner.plan.goal:
self.user_requirement = self.get_memories()[-1].content
self.planner.plan.goal = self.user_requirement
example = KeywordExpRetriever().retrieve(self.user_requirement)
# plan info
plan_status = self.planner.get_plan_status()
plan_status = self.planner.plan.model_dump(include=["goal", "tasks"])
# for task in plan_status["tasks"]:
# task.pop("code")
# task.pop("result")
prompt = CMD_PROMPT.format(
plan_status=plan_status,
example=example,
available_commands=prepare_command_prompt(self.available_commands),
)
context = self.llm.format_msg(self.working_memory.get() + [Message(content=prompt, role="user")])
# print(*context, sep="\n" + "*" * 5 + "\n")
async with ThoughtReporter(enable_llm_stream=True):
rsp = await self.llm.aask(context)
# tool info
if self.custom_tool_recommender:
plan = self.planner.plan
fixed = ["Terminal"] if "Terminal" in self.custom_tools else None
tool_info = await self.custom_tool_recommender.get_recommended_tool_info(fixed=fixed, plan=plan)
else:
tool_info = ""
# 临时方案待role zero的版本完成可将本注释内的代码直接替换掉
# -------------开始---------------
try:
commands = CodeParser.parse_code(block=None, lang="json", text=rsp)
commands = json.loads(repair_llm_raw_output(output=commands, req_keys=[None], repair_type=RepairType.JSON))
except json.JSONDecodeError as e:
commands = await self.llm.aask(msg=JSON_REPAIR_PROMPT.format(json_data=rsp))
commands = json.loads(CodeParser.parse_code(block=None, lang="json", text=commands))
except Exception as e:
tb = traceback.format_exc()
print(tb)
while not success and counter < 3:
### write code ###
logger.info(f"ready to WriteAnalysisCode")
use_reflection = (counter > 0 and self.use_reflection) # only use reflection after the first trial
# 为了对LLM不按格式生成进行容错
if isinstance(commands, dict):
commands = commands["commands"] if "commands" in commands else [commands]
# -------------结束---------------
code = await self.write_code.run(
user_requirement=self.planner.plan.goal,
plan_status=plan_status,
tool_info=tool_info,
working_memory=self.rc.working_memory.get(),
use_reflection=use_reflection,
)
self.rc.working_memory.add(Message(content=code, role="assistant", cause_by=WriteAnalysisCode))
self.rc.working_memory.add(Message(content=rsp, role="assistant"))
await run_commands(self, commands, self.rc.working_memory)
return bool(self.rc.todo)
### execute code ###
result, success = await self.execute_code.run(code)
print(result)
async def _act(self) -> Message:
"""Useful in 'react' mode. Return a Message conforming to Role._act interface."""
logger.info(f"ready to take on task {self.planner.plan.current_task}")
self.rc.working_memory.add(Message(content=result, role="user", cause_by=ExecuteNbCode))
# TODO: Consider an appropriate location to insert task experience formally
experience = KeywordExpRetriever().retrieve(self.planner.plan.current_task.instruction, exp_type="task")
if experience and experience not in [msg.content for msg in self.rc.working_memory.get()]:
exp_msg = Message(content=experience, role="assistant")
self.rc.working_memory.add(exp_msg)
### process execution result ###
counter += 1
if success:
task_result = TaskResult(code=code, result=result, is_success=success)
self.planner.current_task.update_task_result(task_result)
code, result, is_success = await self._write_and_exec_code()
self.planner.plan.current_task.is_success = (
is_success # mark is_success, determine is_finished later in thinking
)
# FIXME: task result is always overwritten by the last act, whereas it can be made of of multiple acts
self.task_result = TaskResult(code=code, result=result, is_success=is_success)
return Message(content="Task completed", role="assistant", sent_from=self._setting, cause_by=WriteAnalysisCode)
async def _react(self) -> Message:
# NOTE: Diff 1: Each time landing here means observing news, set todo to allow news processing in _think
self._set_state(0)
actions_taken = 0
rsp = Message(content="No actions taken yet", cause_by=Action) # will be overwritten after Role _act
while actions_taken < self.rc.max_react_loop:
# NOTE: Diff 2: Keep observing within _react, news will go into memory, allowing adapting to new info
# add news from self._observe, the one called in self.run, consider removing when switching from working_memory to memory
self.working_memory.add_batch(self.rc.news)
await self._observe()
# add news from this self._observe, we need twice because _observe rewrites rc.news
self.working_memory.add_batch(self.rc.news)
# think
has_todo = await self._think()
if not has_todo:
break
# act
logger.debug(f"{self._setting}: {self.rc.state=}, will do {self.rc.todo}")
rsp = await self._act()
actions_taken += 1
return rsp # return output from the last action
status = 'Success' if success else 'Failed'
output = CODE_STATUS.format(code=code, status=status, result=result)
if success:
output += 'The code written has been executed successfully.'
self.rc.working_memory.clear()
return output

View file

@ -10,6 +10,9 @@ from pydantic import model_validator
from metagpt.actions import Action, UserRequirement
from metagpt.actions.di.run_command import RunCommand
from metagpt.exp_pool import exp_cache
from metagpt.exp_pool.context_builders import RoleZeroContextBuilder
from metagpt.exp_pool.serializers import RoleZeroSerializer
from metagpt.logs import logger
from metagpt.prompts.di.role_zero import (
CMD_PROMPT,
@ -41,6 +44,7 @@ class RoleZero(Role):
system_msg: list[str] = None # Use None to conform to the default value at llm.aask
cmd_prompt: str = CMD_PROMPT
instruction: str = ROLE_INSTRUCTION
task_type_desc: str = None
# React Mode
react_mode: Literal["react"] = "react"
@ -142,27 +146,50 @@ class RoleZero(Role):
tool_info = json.dumps({tool.name: tool.schemas for tool in tools})
### Make Decision Dynamically ###
instruction = self.instruction.strip()
prompt = self.cmd_prompt.format(
plan_status=plan_status,
current_task=current_task,
example=example,
available_commands=tool_info,
instruction=self.instruction.strip(),
task_type_desc=self.task_type_desc,
plan_status=plan_status,
current_task=current_task,
instruction=instruction,
)
memory = self.rc.memory.get(self.memory_k)
memory = await self.parse_browser_actions(memory)
req = self.llm.format_msg(memory + [UserMessage(content=prompt)])
async with ThoughtReporter(enable_llm_stream=True) as reporter:
await reporter.async_report({"type": "react"})
state_data = dict(
plan_status=plan_status,
current_task=current_task,
instruction=instruction,
)
self.command_rsp = await self.llm_cached_aask(req=req, system_msgs=self.system_msg, state_data=state_data)
self.rc.memory.add(AIMessage(content=self.command_rsp))
return True
@exp_cache(context_builder=RoleZeroContextBuilder(), serializer=RoleZeroSerializer())
async def llm_cached_aask(self, *, req: list[dict], system_msgs: list[str], **kwargs) -> str:
"""Use `exp_cache` to automatically manage experiences.
The `RoleZeroContextBuilder` attempts to add experiences to `req`.
The `RoleZeroSerializer` extracts essential parts of `req` for the experience pool, trimming lengthy entries to retain only necessary parts.
"""
return await self.llm.aask(req, system_msgs=system_msgs)
async def parse_browser_actions(self, memory: List[Message]) -> List[Message]:
if not self.browser.is_empty_page:
pattern = re.compile(r"Command Browser\.(\w+) executed")
for index, msg in zip(range(len(memory), 0, -1), memory[::-1]):
if pattern.match(msg.content):
memory.insert(index, UserMessage(cause_by="browser", content=await self.browser.view()))
break
context = self.llm.format_msg(memory + [UserMessage(content=prompt)])
# print(*context, sep="\n" + "*" * 5 + "\n")
async with ThoughtReporter(enable_llm_stream=True):
self.command_rsp = await self.llm.aask(context, system_msgs=self.system_msg)
self.rc.memory.add(AIMessage(content=self.command_rsp))
return True
return memory
async def _act(self) -> Message:
if self.use_fixed_sop:
@ -217,12 +244,13 @@ class RoleZero(Role):
# routing
memory = self.get_memories(k=4)
context = self.llm.format_msg(memory + [UserMessage(content=QUICK_THINK_PROMPT)])
# print(context)
rsp = await self.llm.aask(context)
if "yes" in rsp.lower():
# llm call with the original context
answer = await self.llm.aask(self.llm.format_msg(memory))
async with ThoughtReporter(enable_llm_stream=True) as reporter:
await reporter.async_report({"type": "quick"})
answer = await self.llm.aask(self.llm.format_msg(memory))
self.rc.memory.add(AIMessage(content=answer, cause_by=RunCommand))
await self.reply_to_human(content=answer)
rsp_msg = AIMessage(
@ -246,6 +274,8 @@ class RoleZero(Role):
"""
try:
commands = CodeParser.parse_code(block=None, lang="json", text=self.command_rsp)
if commands.endswith("]") and not commands.startswith("["):
commands = "[" + commands
commands = json.loads(repair_llm_raw_output(output=commands, req_keys=[None], repair_type=RepairType.JSON))
except json.JSONDecodeError:
logger.warning(f"Failed to parse JSON for: {self.command_rsp}. Trying to repair...")
@ -266,13 +296,15 @@ class RoleZero(Role):
async def _run_commands(self, commands) -> str:
outputs = []
for cmd in commands:
output = f"Command {cmd['command_name']} executed"
# handle special command first
if await self._run_special_command(cmd):
if self._is_special_command(cmd):
special_command_output = await self._run_special_command(cmd)
outputs.append(output + ":" + special_command_output)
continue
# run command as specified by tool_execute_map
if cmd["command_name"] in self.tool_execution_map:
tool_obj = self.tool_execution_map[cmd["command_name"]]
output = f"Command {cmd['command_name']} executed"
try:
if inspect.iscoroutinefunction(tool_obj):
tool_output = await tool_obj(**cmd["args"])
@ -293,19 +325,24 @@ class RoleZero(Role):
return outputs
async def _run_special_command(self, cmd) -> bool:
def _is_special_command(self, cmd) -> bool:
return cmd["command_name"] in self.special_tool_commands
async def _run_special_command(self, cmd) -> str:
"""command requiring special check or parsing"""
is_special_cmd = cmd["command_name"] in self.special_tool_commands
command_output = ""
if cmd["command_name"] == "Plan.finish_current_task" and not self.planner.plan.is_plan_finished():
# task_result = TaskResult(code=str(commands), result=outputs, is_success=is_success)
# self.planner.plan.current_task.update_task_result(task_result=task_result)
self.planner.plan.finish_current_task()
command_output = "Current task is finished. "
elif cmd["command_name"] == "end":
self._set_state(-1)
command_output = "Everything Done"
return is_special_cmd
return command_output
def _get_plan_status(self) -> Tuple[str, str]:
plan_status = self.planner.plan.model_dump(include=["goal", "tasks"])

View file

@ -1,5 +1,4 @@
import json
import os
from pydantic import Field
@ -10,6 +9,7 @@ from metagpt.prompts.di.swe_agent import (
SWE_AGENT_SYSTEM_TEMPLATE,
)
from metagpt.roles.di.role_zero import RoleZero
from metagpt.tools.libs.git import git_create_pull, git_push
from metagpt.tools.libs.terminal import Bash
@ -17,50 +17,46 @@ class SWEAgent(RoleZero):
name: str = "Swen"
profile: str = "Issue Solver"
goal: str = "Resolve GitHub issue"
_bash_window_size: int = 100
_system_msg: str = SWE_AGENT_SYSTEM_TEMPLATE
system_msg: list[str] = [_system_msg.format(WINDOW=_bash_window_size)]
system_msg: str = [SWE_AGENT_SYSTEM_TEMPLATE]
_instruction: str = NEXT_STEP_TEMPLATE
tools: list[str] = ["Bash", "Browser:goto,scroll"]
tools: list[str] = [
"Bash",
"Browser:goto,scroll",
"RoleZero",
"git_push",
"git_create_pull",
]
terminal: Bash = Field(default_factory=Bash, exclude=True)
output_diff: str = ""
max_react_loop: int = 40
run_eval: bool = False
async def _think(self) -> bool:
self._update_system_msg()
self._format_instruction()
await self._format_instruction()
res = await super()._think()
if self.run_eval:
await self._parse_commands_for_eval()
return res
def _update_system_msg(self):
"""
Sets the system message for the SWE agent.
def _update_tool_execution(self):
self.tool_execution_map.update(
{
"Bash.run": self.terminal.run,
"git_push": git_push,
"git_create_pull": git_create_pull,
}
)
Sets the `_bash_window_size` from the environment variable `WINDOW` if it exists.
Formats the `_system_msg` template with the current `_bash_window_size`.
"""
if os.getenv("WINDOW"):
self._bash_window_size = int(os.getenv("WINDOW"))
self.system_msg = [self._system_msg.format(WINDOW=self._bash_window_size)]
def _format_instruction(self):
async def _format_instruction(self):
"""
Formats the instruction message for the SWE agent.
Runs the "state" command in the terminal, parses its output as JSON,
and uses it to format the `_instruction` template.
"""
state_output = self.terminal.run("state")
state_output = await self.terminal.run("state")
bash_state = json.loads(state_output)
self.instruction = self._instruction.format(
WINDOW=self._bash_window_size, examples=MINIMAL_EXAMPLE, **bash_state
).strip()
return self.instruction
self.instruction = self._instruction.format(**bash_state).strip()
async def _parse_commands_for_eval(self):
"""
@ -81,7 +77,7 @@ class SWEAgent(RoleZero):
if "end" != cmd.get("command_name", ""):
return
try:
diff_output = self.terminal.run("git diff --cached")
diff_output = await self.terminal.run("git diff --cached")
clear_diff = extract_patch(diff_output)
logger.info(f"Diff output: \n{clear_diff}")
if clear_diff:
@ -90,8 +86,5 @@ class SWEAgent(RoleZero):
except Exception as e:
logger.error(f"Error during submission: {e}")
def _update_tool_execution(self):
self.tool_execution_map.update({"Bash.run": self.terminal.run})
def _retrieve_experience(self) -> str:
return MINIMAL_EXAMPLE

View file

@ -15,7 +15,7 @@ from metagpt.tools.tool_registry import register_tool
@register_tool(include_functions=["publish_team_message"])
class TeamLeader(RoleZero):
name: str = "Tim"
name: str = "Mike"
profile: str = "Team Leader"
goal: str = "Manage a team to assist users"
system_msg: list[str] = [SYSTEM_PROMPT]

View file

@ -464,8 +464,8 @@ class Task(BaseModel):
self.is_finished = False
def update_task_result(self, task_result: TaskResult):
self.code = task_result.code
self.result = task_result.result
self.code = self.code + "\n" + task_result.code
self.result = self.result + "\n" + task_result.result
self.is_success = task_result.is_success
@ -669,10 +669,14 @@ class Plan(BaseModel):
"""
return [task for task in self.tasks if task.is_finished]
def append_task(self, task_id: str, dependent_task_ids: list[str], instruction: str, assignee: str):
def append_task(self, task_id: str, dependent_task_ids: list[str], instruction: str, assignee: str, task_type: str = ""):
"""Append a new task with task_id (number) to the end of existing task sequences. If dependent_task_ids is not empty, the task will depend on the tasks with the ids in the list."""
new_task = Task(
task_id=task_id, dependent_task_ids=dependent_task_ids, instruction=instruction, assignee=assignee
task_id=task_id,
dependent_task_ids=dependent_task_ids,
instruction=instruction,
assignee=assignee,
task_type=task_type
)
return self._append_task(new_task)

View file

@ -520,7 +520,7 @@ Explanation: The requirement is about software development. Assign each tasks to
{
"command_name": "RoleZero.reply_to_human",
"args": {
"content": "I have assigned the tasks to the team members. Alice will create the PRD, Bob will design the software architecture, Eve will break down the architecture into tasks, Alex will implement the core game logic, and Edward will write comprehensive tests. The team will work on the project accordingly",
"content": "I have assigned the tasks to the team members. Alice will create the PRD, Bob will design the software architecture, Eve will break down the architecture into tasks, Alex will implement the core game logic, and Edward will write comprehensive tests. The team will work on the project accordingly"
}
},
{
@ -585,7 +585,7 @@ Explanation: You received a message from Alice, the Product Manager, that she ha
{
"command_name": "RoleZero.reply_to_human",
"args": {
"content": "Alice has completed the PRD. I have marked her task as finished and sent the PRD to Bob. Bob will work on the software architecture.",
"content": "Alice has completed the PRD. I have marked her task as finished and sent the PRD to Bob. Bob will work on the software architecture."
}
},
{
@ -602,7 +602,7 @@ Explanation: The user is asking for a general update on the project status. Give
{
"command_name": "RoleZero.reply_to_human",
"args": {
"content": "The team is currently working on ... We have completed ...",
"content": "The team is currently working on ... We have completed ..."
}
},
{
@ -610,6 +610,17 @@ Explanation: The user is asking for a general update on the project status. Give
}
]
```
## example 4
OBSERVATION : current task is none and all task is finished.
Explanation: Last task is "Plan.finish_current_task" or 'RoleZero.reply_to_human' and now the current task is none, it means everything is done.Just coutput command "end".
```json
[
{
"command_name": "end"
}
]
```
"""
@ -629,6 +640,10 @@ class KeywordExpRetriever(ExpRetriever):
return DEPLOY_EXAMPLE
elif "issue" in context.lower():
return FIX_ISSUE_EXAMPLE
elif "https:" in context.lower() or "http:" in context.lower():
if "search" in context.lower() or "click" in context.lower():
return WEB_SCRAPING_EXAMPLE
return WEB_SCRAPING_EXAMPLE_SIMPLE
elif exp_type == "task":
if "diagnose" in context.lower():
return SEARCH_SYMBOL_EXAMPLE
@ -789,13 +804,13 @@ Explanation: I will first need to read the system design document and the projec
{
"command_name": "Editor.read",
"args": {
"path": "/tmp/docs/project_schedule.json"
"path": "/tmp/project_schedule.json"
}
},
{
"command_name": "Editor.read",
"args": {
"path": "/tmp/docs/system_design.json"
"path": "/tmp/system_design.json"
}
}
]
@ -890,3 +905,177 @@ Explanation: to review the code, call ReviewAndRewriteCode.run.
]
```
"""
WEB_SCRAPING_EXAMPLE = """
## action 1
User Requirement: Scrap and list the restaurant names of first page by searching for the keyword `beef` on the website https://www.yelp.com/.
Explanation: The requirement is to scrape data from a website and extract information about restaurants. The process involves searching for restaurants with a specific keyword, retrieving and presenting the data in a structured format.
```json
[
{
"command_name": "Plan.append_task",
"args": {
"task_id": "1",
"dependent_task_ids": [],
"instruction": "Navigate to the yelp website.",
"assignee": "David"
}
},
{
"command_name": "Plan.append_task",
"args": {
"task_id": "2",
"dependent_task_ids": ["1"],
"instruction": "Search for restaurants with the keyword 'beef'.",
"assignee": "David"
}
},
{
"command_name": "Plan.append_task",
"args": {
"task_id": "3",
"dependent_task_ids": ["2"],
"instruction": "View the html content of the search result page before scrap data to understand the structure.",
"assignee": "David"
}
},
{
"command_name": "Plan.append_task",
"args": {
"task_id": "4",
"dependent_task_ids": ["3"],
"instruction": "Parse the html content to scrape the restaurant names and print it.",
"assignee": "David"
}
}
]
```
## action 2
Explanation: To search for restaurants, I will now go to the website https://www.yelp.com/ first.
```json
[
{
"command_name": "Browser.goto",
"args": {
"url": "https://www.yelp.com/"
}
}
]
```
## action 3
Explanation: Since the Browser has successfully navigated to the website, and I find that the element id of the search box is 53. I will finish the current task and then use the Browser tool to type the keyword `beef` in the search box and press enter.
```json
[
{
"command_name": "Plan.finish_current_task",
"args": {}
},
{
"command_name": "Browser.type",
"args": {
"element_id": 53,
"content": "beef",
"press_enter_after": true
}
}
]
```
## action 4
Explanation: Since the Browser has successfully search the keyword `beef`, I will finish the current task and then write code to view the html content of the page.
```json
[
{
"command_name": "Plan.finish_current_task",
"args": {}
},
{
"command_name": "DataAnalyst.write_and_exec_code",
"args": {}
}
]
```
## action 5
Explanation: Since I has successfully viewed the html content in the context, I will first finish the current task and then write code to parse the html content and extract the restaurant names.
```json
[
{
"command_name": "Plan.finish_current_task",
"args": {}
},
{
"command_name": "DataAnalyst.write_and_exec_code",
"args": {}
}
]
...
"""
WEB_SCRAPING_EXAMPLE_SIMPLE = """
## action 1
User Requirement: List the restaurant names on the website https://www.yelp.com/search?find_desc=beef&find_loc=New+York%2C+NY.
Explanation: The requirement is to scrape data from a website and extract information about restaurants. The process involves retrieving and presenting the data in a structured format.
```json
[
{
"command_name": "Plan.append_task",
"args": {
"task_id": "1",
"dependent_task_ids": [],
"instruction": "View the html content of the page before scrap data to understand the structure.",
"assignee": "David"
}
},
{
"command_name": "Plan.append_task",
"args": {
"task_id": "2",
"dependent_task_ids": ["1"],
"instruction": "Parse the html content to scrape the restaurant names and print it.",
"assignee": "David"
}
}
]
```
## action 2
Explanation: To scrap data from the website, I will first view the html content of the page.
```json
[
{
"command_name": "DataAnalyst.write_and_exec_code",
"args": {}
}
]
```
## action 3
Explanation: Since I has successfully viewed the html content in the context, I will first finish the current task and then write code to parse the html content and extract the restaurant names.
```json
[
{
"command_name": "Plan.finish_current_task",
"args": {}
},
{
"command_name": "DataAnalyst.write_and_exec_code",
"args": {}
}
]
```
...
"""

View file

@ -40,8 +40,14 @@ PLAN_STATUS = """
## Current Task
{current_task}
## Finished Section of Current Task
### code
{current_task_code}
### execution result
{current_task_result}
## Task Guidance
Write complete code for 'Current Task'. And avoid duplicating code from 'Finished Tasks', such as repeated import of packages, reading data, etc.
Write code for the incomplete sections of 'Current Task'. And avoid duplicating code from 'Finished Tasks' and 'Finished Section of Current Task', such as repeated import of packages, reading data, etc.
Specifically, {guidance}
"""
@ -173,6 +179,8 @@ class Planner(BaseModel):
code_written=code_written,
task_results=task_results,
current_task=self.current_task.instruction,
current_task_code=self.current_task.code if self.current_task.code else "",
current_task_result=self.current_task.result if self.current_task.result else "",
guidance=guidance,
)

View file

@ -39,7 +39,7 @@ class NaiveSolver(BaseSolver):
self.graph.topological_sort()
for key in self.graph.execution_order:
op = self.graph.nodes[key]
await op.fill(self.context, self.llm, mode="root")
await op.fill(req=self.context, llm=self.llm, mode="root")
class TOTSolver(BaseSolver):

View file

@ -8,7 +8,7 @@ from metagpt.prompts.task_type import (
FEATURE_ENGINEERING_PROMPT,
IMAGE2WEBPAGE_PROMPT,
MODEL_EVALUATE_PROMPT,
MODEL_TRAIN_PROMPT,
MODEL_TRAIN_PROMPT, WEB_SCRAPING_PROMPT,
)
@ -62,6 +62,7 @@ class TaskType(Enum):
WEBSCRAPING = TaskTypeDef(
name="web scraping",
desc="For scraping data from web pages.",
guidance=WEB_SCRAPING_PROMPT,
)
EMAIL_LOGIN = TaskTypeDef(
name="email login",

View file

@ -5,11 +5,11 @@
# @File : __init__.py
# @Desc :
from metagpt.tools.libs import (
# data_preprocess,
# feature_engineering,
data_preprocess,
feature_engineering,
sd_engine,
gpt_v_generator,
# web_scraping,
web_scraping,
# email_login,
terminal,
editor,
@ -20,11 +20,11 @@ from metagpt.tools.libs import (
from metagpt.tools.libs.env import get_env, set_get_env_entry, default_get_env, get_env_description
_ = (
# data_preprocess,
# feature_engineering,
data_preprocess,
feature_engineering,
sd_engine,
gpt_v_generator,
# web_scraping,
web_scraping,
# email_login,
terminal,
editor,

View file

@ -24,6 +24,7 @@ from metagpt.utils.a11y_tree import (
scroll_page,
type_text,
)
from metagpt.utils.proxy_env import get_proxy_from_env
from metagpt.utils.report import BrowserReporter
@ -72,7 +73,7 @@ class Browser:
self.page: Optional[Page] = None
self.accessibility_tree: list = []
self.headless: bool = True
self.proxy = None
self.proxy = get_proxy_from_env()
self.is_empty_page = True
self.reporter = BrowserReporter()
@ -120,7 +121,7 @@ class Browser:
await scroll_page(self.page, direction)
return await self._wait_page()
async def goto(self, url: str, timeout: float = 30000):
async def goto(self, url: str, timeout: float = 90000):
"""Navigate to a specific URL."""
if self.page is None:
await self.start()
@ -161,7 +162,7 @@ class Browser:
await self._wait_until_page_idle(page)
self.accessibility_tree = await get_accessibility_tree(page)
await self.reporter.async_report(page, "page")
return f"SUCCESS, URL: {page.url}"
return f"SUCCESS, URL: {page.url} have been loaded."
def _register_page_event(self, page: Page):
page.last_busy_time = time.time()

View file

@ -3,6 +3,7 @@ from pathlib import Path
from typing import Optional
import aiofiles
from bs4 import BeautifulSoup
from unidiff import PatchSet
import metagpt.ext.cr
@ -29,7 +30,7 @@ class CodeReview:
Args:
patch_path: The local path of the patch file or the url of the pull request. Example: "/data/xxx-pr-1.patch", "https://github.com/xx/XX/pull/1362"
cr_output_file: Output file path where code review comments will be saved. Example: "cr/xxx-pr-1.json"
cr_point_file: File path for specifying code review points. Defaults to a predefined file.
cr_point_file: File path for specifying code review points. If not specified, this parameter is not passed..
"""
patch = await self._get_patch_content(patch_path)
cr_point_file = cr_point_file if cr_point_file else Path(metagpt.ext.cr.__file__).parent / "points.json"
@ -45,7 +46,7 @@ class CodeReview:
)
comments = await CodeReview_().run(patch, cr_points)
cr_output_path.parent.mkdir(exist_ok=True, parents=True)
async with aiofiles.open(cr_output_path, "w") as f:
async with aiofiles.open(cr_output_path, "w", encoding="utf-8") as f:
await f.write(json.dumps(comments, ensure_ascii=False))
await reporter.async_report(cr_output_path)
@ -65,7 +66,7 @@ class CodeReview:
output_dir: File path where code review comments are stored.
"""
patch = await self._get_patch_content(patch_path)
async with aiofiles.open(cr_file, "r") as f:
async with aiofiles.open(cr_file, "r", encoding="utf-8") as f:
comments = json.loads(await f.read())
await ModifyCode(pr="").run(patch, comments, output_dir)
return f"The fixed patch files store in {output_dir}"
@ -75,12 +76,14 @@ class CodeReview:
# async with aiohttp.ClientSession(trust_env=True) as client:
# async with client.get(f"{patch_path}.diff", ) as resp:
# patch_file_content = await resp.text()
browser = Browser()
browser.proxy = {"server": "http://127.0.0.1:20172"}
async with browser:
async with Browser() as browser:
await browser.goto(f"{patch_path}.diff")
patch_file_content = await browser.page.content()
if patch_file_content.startswith("<html>"):
soup = BeautifulSoup(patch_file_content, "html.parser")
pre = soup.find("pre")
if pre:
patch_file_content = pre.text
else:
async with aiofiles.open(patch_path) as f:
patch_file_content = await f.read()

View file

@ -38,7 +38,7 @@ class Editor:
# self.resource.report(path, "path")
def read(self, path: str) -> FileBlock:
"""Read the whole content of a file."""
"""Read the whole content of a file. It is strongly advised to utilize absolute paths"""
with open(path, "r") as f:
self.resource.report(path, "path")
lines = f.readlines()

View file

@ -11,10 +11,10 @@ from github.PullRequest import PullRequest
from metagpt.tools.tool_registry import register_tool
@register_tool(tags=["software development", "git", "Commit the changes and push to remote git repository."])
@register_tool(tags=["software development", "git", "Push to remote git repository."])
async def git_push(
local_path: Union[str, Path],
access_token: str,
app_name: str,
comments: str = "Commit",
new_branch: str = "",
) -> "GitBranch":
@ -22,38 +22,41 @@ async def git_push(
Pushes changes from a local Git repository to its remote counterpart.
Args:
local_path (Union[str, Path]): The path to the local Git repository.
access_token (str): The access token for authentication. Use `get_env` to get access token.
comments (str, optional): The commit message to use. Defaults to "Commit".
local_path (Union[str, Path]): The absolute path to the local Git repository.
app_name (str): The name of the platform hosting the repository (e.g., "github", "gitlab", "bitbucket").
comments (str, optional): Comments to be associated with the push. Defaults to "Commit".
new_branch (str, optional): The name of the new branch to create and push changes to.
If not provided, changes will be pushed to the current branch. Defaults to "".
Returns:
GitBranch: The branch to which the changes were pushed.
Raises:
ValueError: If the provided local_path does not point to a valid Git repository.
Example:
>>> url = "https://github.com/iorisa/snake-game.git"
>>> local_path = await git_clone(url=url)
>>> from metagpt.tools.libs import get_env
>>> access_token = await get_env(key="access_token", app_name="github") # Read access token from enviroment variables.
>>> comments = "Archive"
>>> app_name = "github"
>>> comments = "Commit"
>>> new_branch = "feature/new"
>>> branch = await git_push(local_path=local_path, access_token=access_token, comments=comments, new_branch=new_branch)
>>> branch = await git_push(local_path=local_path, app_name=app_name, comments=comments, new_branch=new_branch)
>>> base = branch.base
>>> head = branch.head
>>> repo_name = branch.repo_name
>>> print(f"base branch:'{base}', head branch:'{head}', repo_name:'{repo_name}'")
base branch:'master', head branch:'feature/new', repo_name:'iorisa/snake-game'
"""
from metagpt.tools.libs import get_env
from metagpt.utils.git_repository import GitRepository
if not GitRepository.is_git_dir(local_path):
raise ValueError("Invalid local git repository")
repo = GitRepository(local_path=local_path, auto_init=False)
# Read access token from environment variables.
access_token = await get_env(key="access_token", app_name=app_name)
branch = await repo.push(new_branch=new_branch, comments=comments, access_token=access_token)
return branch
@ -62,9 +65,9 @@ async def git_push(
async def git_create_pull(
base: str,
head: str,
app_name: str,
base_repo_name: str,
access_token: str,
head_repo_name: Optional[str] = None,
head_repo_name: str = None,
title: Optional[str] = None,
body: Optional[str] = None,
issue: Optional[Issue] = None,
@ -73,46 +76,16 @@ async def git_create_pull(
Creates a pull request on a Git repository. Use this tool in priority over Browser to create a pull request.
Args:
base (str): The base branch of the pull request.
head (str): The head branch of the pull request.
base_repo_name (str): The full repository name (user/repo) where the pull request will be created.
access_token (str): The access token for authentication. Use `get_env` to get access token.
head_repo_name (Optional[str], optional): The full repository name (user/repo) where the pull request will merge from. Defaults to None.
title (Optional[str], optional): The title of the pull request. Defaults to None.
body (Optional[str], optional): The body of the pull request. Defaults to None.
issue (Optional[Issue], optional): The related issue of the pull request. Defaults to None.
base (str): The name of the base branch where the pull request will be merged.
head (str): The name of the branch that contains the changes for the pull request.
app_name (str): The name of the platform hosting the repository (e.g., "github", "gitlab", "bitbucket").
base_repo_name (str): The full name of the target repository (in the format "user/repo") where the pull request will be created.
head_repo_name (Optional[str]): The full name of the source repository (in the format "user/repo") from which the changes will be pulled.
title (Optional[str]): The title of the pull request. Defaults to None.
body (Optional[str]): The description or body content of the pull request. Defaults to None.
issue (Optional[Issue]): An optional issue related to the pull request. Defaults to None.
Example:
>>> # push and create pull
>>> url = "https://github.com/iorisa/snake-game.git"
>>> local_path = await git_clone(url=url)
>>> from metagpt.tools.libs import get_env
>>> access_token = await get_env(key="access_token", app_name="github")
>>> comments = "Archive"
>>> new_branch = "feature/new"
>>> branch = await git_push(local_path=local_path, access_token=access_token, comments=comments, new_branch=new_branch)
>>> base = branch.base
>>> head = branch.head
>>> repo_name = branch.repo_name
>>> print(f"base branch:'{base}', head branch:'{head}', repo_name:'{repo_name}'")
base branch:'master', head branch:'feature/new', repo_name:'iorisa/snake-game'
>>> title = "feat: modify http lib",
>>> body = "Change HTTP library used to send requests"
>>> pr = await git_create_pull(
>>> base_repo_name=repo_name,
>>> base=base,
>>> head=head,
>>> title=title,
>>> body=body,
>>> access_token=access_token,
>>> )
>>> if isinstance(pr, PullRequest):
>>> print(pr)
PullRequest("feat: modify http lib")
>>> if isinstance(pr, str):
>>> print(f"Visit this url to create a new pull request: '{pr}'")
Visit this url to create a new pull request: 'https://github.com/iorisa/snake-game/compare/master...feature/new'
>>> # create pull request
>>> base_repo_name = "geekan/MetaGPT"
>>> head_repo_name = "ioris/MetaGPT"
@ -120,8 +93,7 @@ async def git_create_pull(
>>> head = "feature/http"
>>> title = "feat: modify http lib",
>>> body = "Change HTTP library used to send requests"
>>> from metagpt.tools.libs import get_env
>>> access_token = await get_env(key="access_token", app_name="github")
>>> app_name = "github"
>>> pr = await git_create_pull(
>>> base_repo_name=base_repo_name,
>>> head_repo_name=head_repo_name,
@ -129,7 +101,7 @@ async def git_create_pull(
>>> head=head,
>>> title=title,
>>> body=body,
>>> access_token=access_token,
>>> app_name=app_name,
>>> )
>>> if isinstance(pr, PullRequest):
>>> print(pr)
@ -141,8 +113,11 @@ async def git_create_pull(
Returns:
PullRequest: The created pull request.
"""
from metagpt.tools.libs import get_env
from metagpt.utils.git_repository import GitRepository
access_token = await get_env(key="access_token", app_name=app_name)
return await GitRepository.create_pull(
base=base,
head=head,

View file

@ -1,8 +1,10 @@
import subprocess
import threading
from queue import Queue
import asyncio
from asyncio import Queue
from asyncio.subprocess import PIPE, STDOUT
from typing import Optional
from metagpt.const import DEFAULT_WORKSPACE_ROOT, SWE_SETUP_PATH
from metagpt.logs import logger
from metagpt.tools.tool_registry import register_tool
from metagpt.utils.report import END_MARKER_VALUE, TerminalReporter
@ -19,62 +21,54 @@ class Terminal:
def __init__(self):
self.shell_command = ["bash"] # FIXME: should consider windows support later
self.command_terminator = "\n"
# Start a persistent shell process
self.process = subprocess.Popen(
self.shell_command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
executable="/bin/bash",
)
self.stdout_queue = Queue()
self.stdout_queue = Queue(maxsize=1000)
self.observer = TerminalReporter()
self.process: Optional[asyncio.subprocess.Process] = None
self._check_state()
async def _start_process(self):
# Start a persistent shell process
self.process = await asyncio.create_subprocess_exec(
*self.shell_command, stdin=PIPE, stdout=PIPE, stderr=STDOUT, executable="bash"
)
await self._check_state()
def _check_state(self):
"""Check the state of the terminal, e.g. the current directory of the terminal process. Useful for agent to understand."""
print("The terminal is at:", self.run_command("pwd"))
async def _check_state(self):
"""
Check the state of the terminal, e.g. the current directory of the terminal process. Useful for agent to understand.
"""
output = await self.run_command("pwd")
logger.info("The terminal is at:", output)
def run_command(self, cmd: str, daemon=False) -> str:
async def run_command(self, cmd: str, daemon=False) -> str:
"""
Executes a specified command in the terminal and streams the output back in real time.
This command maintains state across executions, such as the current directory,
allowing for sequential commands to be contextually aware. The output from the
command execution is placed into `stdout_queue`, which can be consumed as needed.
allowing for sequential commands to be contextually aware.
Args:
cmd (str): The command to execute in the terminal.
daemon (bool): If True, executes the command in a background thread, allowing
the main program to continue execution. The command's output is
collected asynchronously in daemon mode and placed into `stdout_queue`.
daemon (bool): If True, executes the command in an asynchronous task, allowing
the main program to continue execution.
Returns:
str: The command's output or an empty string if `daemon` is True. Remember that
when `daemon` is True, the output is collected into `stdout_queue` and must
be consumed from there.
Note:
If `stdout_queue` is not periodically consumed, it could potentially grow indefinitely,
consuming memory. Ensure that there's a mechanism in place to consume this queue,
especially during long-running or output-heavy command executions.
when `daemon` is True, use the `get_stdout_output` method to get the output.
"""
if self.process is None:
await self._start_process()
# Send the command
self.process.stdin.write((cmd + self.command_terminator).encode())
self.process.stdin.write(
(f'echo "{END_MARKER_VALUE}"{self.command_terminator}').encode() # write EOF
f'echo "{END_MARKER_VALUE}"{self.command_terminator}'.encode() # write EOF
) # Unique marker to signal command end
self.process.stdin.flush()
await self.process.stdin.drain()
if daemon:
threading.Thread(target=self._read_and_process_output, args=(cmd,), daemon=True).start()
asyncio.create_task(self._read_and_process_output(cmd))
return ""
else:
return self._read_and_process_output(cmd)
return await self._read_and_process_output(cmd)
def execute_in_conda_env(self, cmd: str, env, daemon=False) -> str:
async def execute_in_conda_env(self, cmd: str, env, daemon=False) -> str:
"""
Executes a given command within a specified Conda environment automatically without
the need for manual activation. Users just need to provide the name of the Conda
@ -84,7 +78,7 @@ class Terminal:
cmd (str): The command to execute within the Conda environment.
env (str, optional): The name of the Conda environment to activate before executing the command.
If not specified, the command will run in the current active environment.
daemon (bool): If True, the command is run in a background thread, similar to `run_command`,
daemon (bool): If True, the command is run in an asynchronous task, similar to `run_command`,
affecting error logging and handling in the same manner.
Returns:
@ -96,19 +90,34 @@ class Terminal:
to ensure the specified environment is active for the command's execution.
"""
cmd = f"conda run -n {env} {cmd}"
return self.run_command(cmd, daemon=daemon)
return await self.run_command(cmd, daemon=daemon)
def _read_and_process_output(self, cmd):
with self.observer as observer:
async def get_stdout_output(self) -> str:
"""
Retrieves all collected output from background running commands and returns it as a string.
Returns:
str: The collected output from background running commands, returned as a string.
"""
output_lines = []
while not self.stdout_queue.empty():
line = await self.stdout_queue.get()
output_lines.append(line)
return "\n".join(output_lines)
async def _read_and_process_output(self, cmd, daemon=False) -> str:
async with self.observer as observer:
cmd_output = []
observer.report(cmd + self.command_terminator, "cmd")
# report the comman
await observer.async_report(cmd + self.command_terminator, "cmd")
# report the command
# Read the output until the unique marker is found.
# We read bytes directly from stdout instead of text because when reading text,
# '\r' is changed to '\n', resulting in excessive output.
tmp = b""
while True:
output = tmp + self.process.stdout.read(1)
output = tmp + await self.process.stdout.read(1)
if not output:
continue
*lines, tmp = output.splitlines(True)
for line in lines:
line = line.decode()
@ -116,20 +125,20 @@ class Terminal:
if ix >= 0:
line = line[0:ix]
if line:
observer.report(line, "output")
await observer.async_report(line, "output")
# report stdout in real-time
cmd_output.append(line)
return "".join(cmd_output)
# log stdout in real-time
observer.report(line, "output")
await observer.async_report(line, "output")
cmd_output.append(line)
self.stdout_queue.put(line)
if daemon:
await self.stdout_queue.put(line)
def close(self):
async def close(self):
"""Close the persistent shell process."""
self.process.stdin.close()
self.process.terminate()
self.process.wait()
await self.process.wait()
@register_tool(include_functions=["run"])
@ -142,10 +151,13 @@ class Bash(Terminal):
def __init__(self):
"""init"""
super().__init__()
self.run_command(f"cd {DEFAULT_WORKSPACE_ROOT}")
self.run_command(f"source {SWE_SETUP_PATH}")
self.start_flag = False
def run(self, cmd) -> str:
async def start(self):
await self.run_command(f"cd {DEFAULT_WORKSPACE_ROOT}")
await self.run_command(f"source {SWE_SETUP_PATH}")
async def run(self, cmd) -> str:
"""
Executes a bash command.
@ -184,9 +196,6 @@ class Bash(Terminal):
Arguments:
filename (str): The name of the file to create.
- submit
Submits your current code. it can only be executed once, the last action before the `end`.
- search_dir_and_preview <search_term> [<dir>]
Searches for search_term in all files in dir and gives their code preview
with line numbers. If dir is not provided, searches in the current directory.
@ -220,6 +229,13 @@ class Bash(Terminal):
end_line (int): The line number to end the edit at (inclusive), starting from 1.
replacement_text (str): The text to replace the current selection with, must conform to PEP8 standards.
- submit
Submits your current code locally. it can only be executed once, the last action before the `end`.
Note: Make sure to use these functions as per their defined arguments and behaviors.
"""
return self.run_command(cmd)
if not self.start_flag:
await self.start()
self.start_flag = True
return await self.run_command(cmd)

View file

@ -8,13 +8,15 @@ from metagpt.utils.parse_html import simplify_html
@register_tool(tags=["web scraping"])
async def view_page_element_to_scrape(url: str, requirement: str, keep_links: bool = False) -> None:
"""view the HTML content of current page to understand the structure. When executed, the content will be printed out
async def view_page_element_to_scrape(url: str, requirement: str, keep_links: bool = False) -> str:
"""view the HTML content of current page to understand the structure.
Args:
url (str): The URL of the web page to scrape.
requirement (str): Providing a clear and detailed requirement helps in focusing the inspection on the desired elements.
keep_links (bool): Whether to keep the hyperlinks in the HTML content. Set to True if links are required
Returns:
str: The HTML content of the page.
"""
async with Browser() as browser:
await browser.goto(url)
@ -36,7 +38,7 @@ async def view_page_element_to_scrape(url: str, requirement: str, keep_links: bo
html = "\n".join(i.text for i in nodes)
mem_fs.rm_file(filename)
print(html)
return html
# async def get_elements_outerhtml(self, element_ids: list[int]):

View file

@ -17,4 +17,3 @@ source $REPO_ROOT_DIR/metagpt/tools/swe_agent_commands/search.sh
source $REPO_ROOT_DIR/metagpt/tools/swe_agent_commands/edit_linting.sh
export SWE_CMD_WORK_DIR="$REPO_ROOT_DIR/workspace/swe_agent_workdir"
#sudo chmod 777 $REPO_ROOT_DIR/workspace/swe_agent_workdir

View file

@ -104,11 +104,13 @@ class ToolRecommender(BaseModel):
return ranked_tools
async def get_recommended_tool_info(self, **kwargs) -> str:
async def get_recommended_tool_info(self, fixed: list[str] = None, **kwargs) -> str:
"""
Wrap recommended tools with their info in a string, which can be used directly in a prompt.
"""
recommended_tools = await self.recommend_tools(**kwargs)
if fixed:
recommended_tools.extend([self.tools[tool_name] for tool_name in fixed if tool_name in self.tools])
if not recommended_tools:
return ""
tool_schemas = {tool.name: tool.schemas for tool in recommended_tools}

View file

@ -41,7 +41,7 @@ class WebPage(BaseModel):
def get_slim_soup(self, keep_links: bool = False):
soup = _get_soup(self.html)
keep_attrs = ["class"]
keep_attrs = ["class", "id"]
if keep_links:
keep_attrs.append("href")

View file

@ -0,0 +1,19 @@
import os
def get_proxy_from_env():
proxy_config = {}
server = None
for i in ("ALL_PROXY", "all_proxy", "HTTPS_PROXY", "https_proxy", "HTTP_PROXY", "http_proxy"):
if os.environ.get(i):
server = os.environ.get(i)
if server:
proxy_config["server"] = server
no_proxy = os.environ.get("NO_PROXY") or os.environ.get("no_proxy")
if no_proxy:
proxy_config["bypass"] = no_proxy
if not proxy_config:
proxy_config = None
return proxy_config

View file

@ -266,10 +266,6 @@ class ThoughtReporter(ObjectReporter):
block: Literal[BlockType.THOUGHT] = BlockType.THOUGHT
async def __aenter__(self):
await self.async_report({})
return await super().__aenter__()
class FileReporter(ResourceReporter):
"""File resource callback for reporting complete file paths.

View file

@ -31,9 +31,9 @@ TOKEN_COSTS = {
"gpt-4-0125-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4-1106-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4-vision-preview": {"prompt": 0.01, "completion": 0.03}, # TODO add extra image price calculator
"gpt-4-1106-vision-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4o": {"prompt": 0.005, "completion": 0.015},
"gpt-4o-2024-05-13": {"prompt": 0.005, "completion": 0.015},
"gpt-4-1106-vision-preview": {"prompt": 0.01, "completion": 0.03},
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
"glm-3-turbo": {"prompt": 0.0007, "completion": 0.0007}, # 128k version, prompt + completion tokens=0.005¥/k-tokens
"glm-4": {"prompt": 0.014, "completion": 0.014}, # 128k version, prompt + completion tokens=0.1¥/k-tokens
@ -147,11 +147,14 @@ FIREWORKS_GRADE_TOKEN_COSTS = {
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
TOKEN_MAX = {
"gpt-4o-2024-05-13": 128000,
"gpt-4o": 128000,
"gpt-4-0125-preview": 128000,
"gpt-4-turbo-preview": 128000,
"gpt-4-1106-preview": 128000,
"gpt-4-vision-preview": 128000,
"gpt-4-1106-vision-preview": 128000,
"gpt-4-turbo": 128000,
"gpt-4": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,

View file

@ -91,10 +91,10 @@ async def test_action_node_two_layer():
assert node_b in root.children.values()
# FIXME: ADD MARKDOWN SUPPORT. NEED TO TUNE MARKDOWN SYMBOL FIRST.
answer1 = await root.fill(context="what's the answer to 123+456?", schema="json", strgy="simple", llm=LLM())
answer1 = await root.fill(req="what's the answer to 123+456?", schema="json", strgy="simple", llm=LLM())
assert "579" in answer1.content
answer2 = await root.fill(context="what's the answer to 123+456?", schema="json", strgy="complex", llm=LLM())
answer2 = await root.fill(req="what's the answer to 123+456?", schema="json", strgy="complex", llm=LLM())
assert "579" in answer2.content
@ -112,7 +112,7 @@ async def test_action_node_review():
with pytest.raises(RuntimeError):
_ = await node_a.review()
_ = await node_a.fill(context=None, llm=LLM())
_ = await node_a.fill(req=None, llm=LLM())
setattr(node_a.instruct_content, key, "game snake") # wrong content to review
review_comments = await node_a.review(review_mode=ReviewMode.AUTO)
@ -126,7 +126,7 @@ async def test_action_node_review():
with pytest.raises(RuntimeError):
_ = await node.review()
_ = await node.fill(context=None, llm=LLM())
_ = await node.fill(req=None, llm=LLM())
review_comments = await node.review(review_mode=ReviewMode.AUTO)
assert len(review_comments) == 1
@ -151,7 +151,7 @@ async def test_action_node_revise():
with pytest.raises(RuntimeError):
_ = await node_a.review()
_ = await node_a.fill(context=None, llm=LLM())
_ = await node_a.fill(req=None, llm=LLM())
setattr(node_a.instruct_content, key, "game snake") # wrong content to revise
revise_contents = await node_a.revise(revise_mode=ReviseMode.AUTO)
assert len(revise_contents) == 1
@ -164,7 +164,7 @@ async def test_action_node_revise():
with pytest.raises(RuntimeError):
_ = await node.revise()
_ = await node.fill(context=None, llm=LLM())
_ = await node.fill(req=None, llm=LLM())
setattr(node.instruct_content, key, "game snake")
revise_contents = await node.revise(revise_mode=ReviseMode.AUTO)
assert len(revise_contents) == 1
@ -257,7 +257,7 @@ async def test_action_node_with_image(mocker):
invoice_path = Path(__file__).parent.joinpath("..", "..", "data", "invoices", "invoice-2.png")
img_base64 = encode_image(invoice_path)
mocker.patch("metagpt.provider.openai_api.OpenAILLM._cons_kwargs", _cons_kwargs)
node = await invoice.fill(context="", llm=LLM(), images=[img_base64])
node = await invoice.fill(req="", llm=LLM(), images=[img_base64])
assert node.instruct_content.invoice

View file

@ -38,7 +38,7 @@ async def test_write_design_an(mocker):
mocker.patch("metagpt.actions.design_api_an.REFINED_DESIGN_NODE.fill", return_value=root)
prompt = NEW_REQ_TEMPLATE.format(old_design=DESIGN_SAMPLE, context=dict_to_markdown(REFINED_PRD_JSON))
node = await REFINED_DESIGN_NODE.fill(prompt, llm)
node = await REFINED_DESIGN_NODE.fill(req=prompt, llm=llm)
assert "Refined Implementation Approach" in node.instruct_content.model_dump()
assert "Refined File list" in node.instruct_content.model_dump()

View file

@ -42,7 +42,7 @@ async def test_project_management_an(mocker):
root.instruct_content.model_dump = mock_task_json
mocker.patch("metagpt.actions.project_management_an.PM_NODE.fill", return_value=root)
node = await PM_NODE.fill(dict_to_markdown(REFINED_DESIGN_JSON), llm)
node = await PM_NODE.fill(req=dict_to_markdown(REFINED_DESIGN_JSON), llm=llm)
assert "Logic Analysis" in node.instruct_content.model_dump()
assert "Task list" in node.instruct_content.model_dump()
@ -59,7 +59,7 @@ async def test_project_management_an_inc(mocker):
mocker.patch("metagpt.actions.project_management_an.REFINED_PM_NODE.fill", return_value=root)
prompt = NEW_REQ_TEMPLATE.format(old_task=TASK_SAMPLE, context=dict_to_markdown(REFINED_DESIGN_JSON))
node = await REFINED_PM_NODE.fill(prompt, llm)
node = await REFINED_PM_NODE.fill(req=prompt, llm=llm)
assert "Refined Logic Analysis" in node.instruct_content.model_dump()
assert "Refined Task list" in node.instruct_content.model_dump()

View file

@ -39,7 +39,7 @@ async def test_write_prd_an(mocker):
requirements=NEW_REQUIREMENT_SAMPLE,
old_prd=PRD_SAMPLE,
)
node = await REFINED_PRD_NODE.fill(prompt, llm)
node = await REFINED_PRD_NODE.fill(req=prompt, llm=llm)
assert "Refined Requirements" in node.instruct_content.model_dump()
assert "Refined Product Goals" in node.instruct_content.model_dump()

View file

@ -0,0 +1,32 @@
import pytest
from metagpt.exp_pool.context_builders.base import (
EXP_TEMPLATE,
BaseContextBuilder,
Experience,
)
from metagpt.exp_pool.schema import Metric, Score
class TestBaseContextBuilder:
class ConcreteContextBuilder(BaseContextBuilder):
async def build(self, *args, **kwargs):
pass
@pytest.fixture
def context_builder(self):
return self.ConcreteContextBuilder()
def test_format_exps(self, context_builder):
exp1 = Experience(req="req1", resp="resp1", metric=Metric(score=Score(val=8)))
exp2 = Experience(req="req2", resp="resp2", metric=Metric(score=Score(val=9)))
context_builder.exps = [exp1, exp2]
result = context_builder.format_exps()
expected = "\n".join(
[
f"1. {EXP_TEMPLATE.format(req='req1', resp='resp1', score=8)}",
f"2. {EXP_TEMPLATE.format(req='req2', resp='resp2', score=9)}",
]
)
assert result == expected

View file

@ -0,0 +1,51 @@
import pytest
from metagpt.exp_pool.context_builders.base import BaseContextBuilder
from metagpt.exp_pool.context_builders.role_zero import RoleZeroContextBuilder
class TestRoleZeroContextBuilder:
@pytest.fixture
def context_builder(self):
return RoleZeroContextBuilder()
@pytest.mark.asyncio
async def test_build_empty_req(self, context_builder):
result = await context_builder.build(req=[])
assert result == []
@pytest.mark.asyncio
async def test_build_no_experiences(self, context_builder, mocker):
mocker.patch.object(BaseContextBuilder, "format_exps", return_value="")
req = [{"content": "Original content"}]
result = await context_builder.build(req=req)
assert result == req
@pytest.mark.asyncio
async def test_build_with_experiences(self, context_builder, mocker):
mocker.patch.object(BaseContextBuilder, "format_exps", return_value="Formatted experiences")
mocker.patch.object(RoleZeroContextBuilder, "replace_example_content", return_value="Updated content")
req = [{"content": "Original content 1"}]
result = await context_builder.build(req=req)
assert result == [{"content": "Updated content"}]
def test_replace_example_content(self, context_builder, mocker):
mocker.patch.object(RoleZeroContextBuilder, "replace_content_between_markers", return_value="Replaced content")
result = context_builder.replace_example_content("Original text", "New example content")
assert result == "Replaced content"
context_builder.replace_content_between_markers.assert_called_once_with(
"Original text", "# Example", "# Instruction", "New example content"
)
def test_replace_content_between_markers(self):
text = "Start\n# Example\nOld content\n# Instruction\nEnd"
new_content = "New content"
result = RoleZeroContextBuilder.replace_content_between_markers(text, "# Example", "# Instruction", new_content)
expected = "Start\n# Example\nNew content\n\n# Instruction\nEnd"
assert result == expected
def test_replace_content_between_markers_no_match(self):
text = "Start\nNo markers\nEnd"
new_content = "New content"
result = RoleZeroContextBuilder.replace_content_between_markers(text, "# Example", "# Instruction", new_content)
assert result == text

View file

@ -0,0 +1,47 @@
import pytest
from metagpt.exp_pool.context_builders.base import BaseContextBuilder
from metagpt.exp_pool.context_builders.simple import (
SIMPLE_CONTEXT_TEMPLATE,
SimpleContextBuilder,
)
class TestSimpleContextBuilder:
@pytest.fixture
def context_builder(self):
return SimpleContextBuilder()
@pytest.mark.asyncio
async def test_build_with_experiences(self, mocker, context_builder: SimpleContextBuilder):
# Mock the format_exps method
mock_exps = "Mocked experiences"
mocker.patch.object(BaseContextBuilder, "format_exps", return_value=mock_exps)
req = "Test request"
result = await context_builder.build(req=req)
expected = SIMPLE_CONTEXT_TEMPLATE.format(req=req, exps=mock_exps)
assert result == expected
@pytest.mark.asyncio
async def test_build_without_experiences(self, mocker, context_builder: SimpleContextBuilder):
# Mock the format_exps method to return an empty string
mocker.patch.object(BaseContextBuilder, "format_exps", return_value="")
req = "Test request"
result = await context_builder.build(req=req)
expected = SIMPLE_CONTEXT_TEMPLATE.format(req=req, exps="")
assert result == expected
@pytest.mark.asyncio
async def test_build_without_req(self, mocker, context_builder: SimpleContextBuilder):
# Mock the format_exps method
mock_exps = "Mocked experiences"
mocker.patch.object(BaseContextBuilder, "format_exps", return_value=mock_exps)
result = await context_builder.build(req="")
expected = SIMPLE_CONTEXT_TEMPLATE.format(req="", exps=mock_exps)
assert result == expected

View file

@ -0,0 +1,200 @@
import asyncio
import pytest
from metagpt.config2 import Config
from metagpt.configs.exp_pool_config import ExperiencePoolConfig
from metagpt.exp_pool.context_builders import SimpleContextBuilder
from metagpt.exp_pool.decorator import ExpCacheHandler, exp_cache
from metagpt.exp_pool.manager import ExperienceManager
from metagpt.exp_pool.perfect_judges import SimplePerfectJudge
from metagpt.exp_pool.schema import Experience, QueryType, Score
from metagpt.exp_pool.scorers import SimpleScorer
from metagpt.rag.engines import SimpleEngine
class TestExpCacheHandler:
@pytest.fixture
def mock_func(self, mocker):
return mocker.AsyncMock()
@pytest.fixture
def mock_exp_manager(self, mocker):
manager = mocker.MagicMock(spec=ExperienceManager)
manager.storage = mocker.MagicMock(spec=SimpleEngine)
manager.config = mocker.MagicMock(spec=Config)
manager.config.exp_pool = ExperiencePoolConfig()
manager.query_exps = mocker.AsyncMock()
manager.create_exp = mocker.MagicMock()
return manager
@pytest.fixture
def mock_scorer(self, mocker):
scorer = mocker.MagicMock(spec=SimpleScorer)
scorer.evaluate = mocker.AsyncMock()
return scorer
@pytest.fixture
def mock_perfect_judge(self, mocker):
return mocker.MagicMock(spec=SimplePerfectJudge)
@pytest.fixture
def mock_context_builder(self, mocker):
return mocker.MagicMock(spec=SimpleContextBuilder)
@pytest.fixture
def exp_cache_handler(self, mock_func, mock_exp_manager, mock_scorer, mock_perfect_judge, mock_context_builder):
return ExpCacheHandler(
func=mock_func,
args=(),
kwargs={"req": "test_req"},
exp_manager=mock_exp_manager,
exp_scorer=mock_scorer,
exp_perfect_judge=mock_perfect_judge,
context_builder=mock_context_builder,
)
@pytest.mark.asyncio
async def test_fetch_experiences(self, exp_cache_handler, mock_exp_manager):
mock_exp_manager.query_exps.return_value = [Experience(req="test_req", resp="test_resp")]
await exp_cache_handler.fetch_experiences()
mock_exp_manager.query_exps.assert_called_once_with(
"test_req", query_type=QueryType.SEMANTIC, tag=exp_cache_handler.tag
)
assert len(exp_cache_handler._exps) == 1
@pytest.mark.asyncio
async def test_get_one_perfect_exp(self, exp_cache_handler, mock_perfect_judge):
exp = Experience(req="test_req", resp="perfect_resp")
exp_cache_handler._exps = [exp]
mock_perfect_judge.is_perfect_exp.return_value = True
result = await exp_cache_handler.get_one_perfect_exp()
assert result == "perfect_resp"
@pytest.mark.asyncio
async def test_execute_function(self, exp_cache_handler, mock_func, mock_context_builder):
mock_context_builder.build.return_value = "built_context"
mock_func.return_value = "function_result"
await exp_cache_handler.execute_function()
mock_context_builder.build.assert_called_once()
mock_func.assert_called_once_with(req="built_context")
assert exp_cache_handler._raw_resp == "function_result"
assert exp_cache_handler._resp == "function_result"
@pytest.mark.asyncio
async def test_process_experience(self, exp_cache_handler, mock_scorer, mock_exp_manager):
exp_cache_handler._resp = "test_resp"
mock_scorer.evaluate.return_value = Score(val=8)
await exp_cache_handler.process_experience()
mock_scorer.evaluate.assert_called_once()
mock_exp_manager.create_exp.assert_called_once()
@pytest.mark.asyncio
async def test_evaluate_experience(self, exp_cache_handler, mock_scorer):
exp_cache_handler._resp = "test_resp"
mock_scorer.evaluate.return_value = Score(val=9)
await exp_cache_handler.evaluate_experience()
assert exp_cache_handler._score.val == 9
def test_save_experience(self, exp_cache_handler, mock_exp_manager):
exp_cache_handler._req = "test_req"
exp_cache_handler._resp = "test_resp"
exp_cache_handler._score = Score(val=7)
exp_cache_handler.save_experience()
mock_exp_manager.create_exp.assert_called_once()
def test_choose_wrapper_async(self, mocker):
async def async_func():
pass
wrapper = ExpCacheHandler.choose_wrapper(async_func, mocker.AsyncMock())
assert asyncio.iscoroutinefunction(wrapper)
def test_choose_wrapper_sync(self, mocker):
def sync_func():
pass
wrapper = ExpCacheHandler.choose_wrapper(sync_func, mocker.AsyncMock())
assert not asyncio.iscoroutinefunction(wrapper)
def test_validate_params(self):
with pytest.raises(ValueError):
ExpCacheHandler(func=lambda x: x, args=(), kwargs={})
def test_generate_tag(self):
class TestClass:
def test_method(self):
pass
handler = ExpCacheHandler(func=TestClass().test_method, args=(TestClass(),), kwargs={"req": "test"})
assert handler._generate_tag() == "TestClass.test_method"
handler = ExpCacheHandler(func=lambda x: x, args=(), kwargs={"req": "test"})
assert handler._generate_tag() == "<lambda>"
class TestExpCache:
@pytest.fixture
def mock_exp_manager(self, mocker, mock_config):
manager = mocker.MagicMock(spec=ExperienceManager)
manager.storage = mocker.MagicMock(spec=SimpleEngine)
manager.config = mock_config
manager.query_exps = mocker.AsyncMock()
manager.create_exp = mocker.MagicMock()
return manager
@pytest.fixture
def mock_scorer(self, mocker):
scorer = mocker.MagicMock(spec=SimpleScorer)
scorer.evaluate = mocker.AsyncMock(return_value=Score())
return scorer
@pytest.fixture
def mock_perfect_judge(self, mocker):
return mocker.MagicMock(spec=SimplePerfectJudge)
@pytest.fixture
def mock_config(self, mocker):
return mocker.patch("metagpt.exp_pool.decorator.config")
@pytest.mark.asyncio
async def test_exp_cache_disabled(self, mock_config, mock_exp_manager):
mock_config.exp_pool.enable_read = False
@exp_cache(manager=mock_exp_manager)
async def test_func(req):
return "result"
result = await test_func(req="test")
assert result == "result"
mock_exp_manager.query_exps.assert_not_called()
@pytest.mark.asyncio
async def test_exp_cache_enabled_no_perfect_exp(self, mock_config, mock_exp_manager, mock_scorer):
mock_config.exp_pool.enable_read = True
mock_exp_manager.query_exps.return_value = []
@exp_cache(manager=mock_exp_manager, scorer=mock_scorer)
async def test_func(req):
return "computed_result"
result = await test_func(req="test")
assert result == "computed_result"
mock_exp_manager.query_exps.assert_called()
mock_exp_manager.create_exp.assert_called()
@pytest.mark.asyncio
async def test_exp_cache_enabled_with_perfect_exp(self, mock_config, mock_exp_manager, mock_perfect_judge):
mock_config.exp_pool.enable_read = True
perfect_exp = Experience(req="test", resp="perfect_result")
mock_exp_manager.query_exps.return_value = [perfect_exp]
mock_perfect_judge.is_perfect_exp.return_value = True
@exp_cache(manager=mock_exp_manager, perfect_judge=mock_perfect_judge)
async def test_func(req):
return "should_not_be_called"
result = await test_func(req="test")
assert result == "perfect_result"
mock_exp_manager.query_exps.assert_called_once()
mock_exp_manager.create_exp.assert_not_called()

View file

@ -0,0 +1,78 @@
import pytest
from metagpt.config2 import Config
from metagpt.configs.exp_pool_config import ExperiencePoolConfig
from metagpt.configs.llm_config import LLMConfig
from metagpt.exp_pool.manager import Experience, ExperienceManager
from metagpt.exp_pool.schema import QueryType
class TestExperienceManager:
@pytest.fixture
def mock_config(self):
return Config(llm=LLMConfig(), exp_pool=ExperiencePoolConfig(enable_write=True, enable_read=True))
@pytest.fixture
def mock_storage(self, mocker):
engine = mocker.MagicMock()
engine.add_objs = mocker.MagicMock()
engine.aretrieve = mocker.AsyncMock(return_value=[])
engine._retriever = mocker.MagicMock()
engine._retriever._vector_store = mocker.MagicMock()
engine._retriever._vector_store._collection = mocker.MagicMock()
engine._retriever._vector_store._collection.count = mocker.MagicMock(return_value=10)
return engine
@pytest.fixture
def exp_manager(self, mock_config, mock_storage):
manager = ExperienceManager(config=mock_config)
manager._storage = mock_storage
return manager
def test_vector_store_property(self, exp_manager):
assert exp_manager.vector_store == exp_manager.storage._retriever._vector_store
@pytest.mark.asyncio
async def test_query_exps_with_exact_match(self, exp_manager, mocker):
req = "exact query"
exp1 = Experience(req=req, resp="response1")
exp2 = Experience(req="different query", resp="response2")
mock_node1 = mocker.MagicMock(metadata={"obj": exp1})
mock_node2 = mocker.MagicMock(metadata={"obj": exp2})
exp_manager.storage.aretrieve.return_value = [mock_node1, mock_node2]
result = await exp_manager.query_exps(req, query_type=QueryType.EXACT)
assert len(result) == 1
assert result[0].req == req
@pytest.mark.asyncio
async def test_query_exps_with_tag_filter(self, exp_manager, mocker):
tag = "test_tag"
exp1 = Experience(req="query1", resp="response1", tag=tag)
exp2 = Experience(req="query2", resp="response2", tag="other_tag")
mock_node1 = mocker.MagicMock(metadata={"obj": exp1})
mock_node2 = mocker.MagicMock(metadata={"obj": exp2})
exp_manager.storage.aretrieve.return_value = [mock_node1, mock_node2]
result = await exp_manager.query_exps("query", tag=tag)
assert len(result) == 1
assert result[0].tag == tag
def test_get_exps_count(self, exp_manager):
assert exp_manager.get_exps_count() == 10
def test_create_exp_write_disabled(self, exp_manager, mock_config):
mock_config.exp_pool.enable_write = False
exp = Experience(req="test", resp="response")
exp_manager.create_exp(exp)
exp_manager.storage.add_objs.assert_not_called()
@pytest.mark.asyncio
async def test_query_exps_read_disabled(self, exp_manager, mock_config):
mock_config.exp_pool.enable_read = False
result = await exp_manager.query_exps("query")
assert result == []

View file

@ -0,0 +1,40 @@
import pytest
from metagpt.exp_pool.perfect_judges import SimplePerfectJudge
from metagpt.exp_pool.schema import MAX_SCORE, Experience, Metric, Score
class TestSimplePerfectJudge:
@pytest.fixture
def simple_perfect_judge(self):
return SimplePerfectJudge()
@pytest.mark.asyncio
async def test_is_perfect_exp_perfect_match(self, simple_perfect_judge):
exp = Experience(req="test_request", resp="resp", metric=Metric(score=Score(val=MAX_SCORE)))
result = await simple_perfect_judge.is_perfect_exp(exp, "test_request")
assert result is True
@pytest.mark.asyncio
async def test_is_perfect_exp_imperfect_score(self, simple_perfect_judge):
exp = Experience(req="test_request", resp="resp", metric=Metric(score=Score(val=MAX_SCORE - 1)))
result = await simple_perfect_judge.is_perfect_exp(exp, "test_request")
assert result is False
@pytest.mark.asyncio
async def test_is_perfect_exp_mismatched_request(self, simple_perfect_judge):
exp = Experience(req="test_request", resp="resp", metric=Metric(score=Score(val=MAX_SCORE)))
result = await simple_perfect_judge.is_perfect_exp(exp, "different_request")
assert result is False
@pytest.mark.asyncio
async def test_is_perfect_exp_no_metric(self, simple_perfect_judge):
exp = Experience(req="test_request", resp="resp")
result = await simple_perfect_judge.is_perfect_exp(exp, "test_request")
assert result is False
@pytest.mark.asyncio
async def test_is_perfect_exp_no_score(self, simple_perfect_judge):
exp = Experience(req="test_request", resp="resp", metric=Metric())
result = await simple_perfect_judge.is_perfect_exp(exp, "test_request")
assert result is False

View file

@ -0,0 +1,64 @@
import json
import pytest
from metagpt.exp_pool.schema import Score
from metagpt.exp_pool.scorers.simple import SIMPLE_SCORER_TEMPLATE, SimpleScorer
from metagpt.llm import BaseLLM
class TestSimpleScorer:
@pytest.fixture
def mock_llm(self, mocker):
mock_llm = mocker.MagicMock(spec=BaseLLM)
return mock_llm
@pytest.fixture
def simple_scorer(self, mock_llm):
return SimpleScorer(llm=mock_llm)
def test_init(self, mock_llm):
scorer = SimpleScorer(llm=mock_llm)
assert isinstance(scorer.llm, BaseLLM)
@pytest.mark.asyncio
async def test_evaluate(self, simple_scorer, mock_llm, mocker):
# Mock request and response
req = "What is the capital of France?"
resp = "The capital of France is Paris."
# Mock LLM response
mock_llm_response = '{"val": 9, "reason": "Accurate and concise answer"}'
mock_llm.aask.return_value = f"```json\n{mock_llm_response}\n```"
# Mock CodeParser.parse_code
mocker.patch("metagpt.utils.common.CodeParser.parse_code", return_value=mock_llm_response)
# Test evaluate method
result = await simple_scorer.evaluate(req, resp)
# Assert LLM was called with correct prompt
expected_prompt = SIMPLE_SCORER_TEMPLATE.format(req=req, resp=resp)
mock_llm.aask.assert_called_once_with(expected_prompt)
# Assert the result is correct
assert isinstance(result, Score)
assert result.val == 9
assert result.reason == "Accurate and concise answer"
@pytest.mark.asyncio
async def test_evaluate_invalid_response(self, simple_scorer, mock_llm, mocker):
# Mock request and response
req = "What is the capital of France?"
resp = "The capital of France is Paris."
# Mock LLM response with invalid JSON
mock_llm_response = "Invalid JSON"
mock_llm.aask.return_value = f"```json\n{mock_llm_response}\n```"
# Mock CodeParser.parse_code
mocker.patch("metagpt.utils.common.CodeParser.parse_code", return_value=mock_llm_response)
# Test evaluate method with invalid response
with pytest.raises(json.JSONDecodeError):
await simple_scorer.evaluate(req, resp)

View file

@ -0,0 +1,35 @@
from typing import Type
import pytest
from metagpt.actions.action_node import ActionNode
from metagpt.exp_pool.serializers.action_node import ActionNodeSerializer
class TestActionNodeSerializer:
@pytest.fixture
def serializer(self):
return ActionNodeSerializer()
@pytest.fixture
def action_node(self):
class InstructContent:
def __init__(self, json_data):
self.json_data = json_data
def model_dump_json(self):
return self.json_data
action_node = ActionNode(key="", expected_type=Type[str], instruction="", example="")
action_node.instruct_content = InstructContent('{"key": "value"}')
return action_node
def test_serialize_resp(self, serializer: ActionNodeSerializer, action_node: ActionNode):
serialized = serializer.serialize_resp(action_node)
assert serialized == '{"key": "value"}'
def test_deserialize_resp(self, serializer: ActionNodeSerializer):
deserialized = serializer.deserialize_resp('{"key": "value"}')
assert isinstance(deserialized, ActionNode)
assert deserialized.instruct_content.model_dump_json() == '{"key": "value"}'

View file

@ -0,0 +1,46 @@
import json
import pytest
from metagpt.exp_pool.serializers import RoleZeroSerializer
class TestRoleZeroSerializer:
@pytest.fixture
def serializer(self) -> RoleZeroSerializer:
return RoleZeroSerializer()
@pytest.fixture
def last_item(self) -> dict:
return {
"role": "user",
"content": "# Current Plan\nsome plan\n# Current Plan\nsome plan\n# Instruction\nsome instruction",
}
@pytest.fixture
def sample_req(self):
return [{"role": "user", "content": "..."}, {"role": "assistant", "content": "..."}]
def test_serialize_req_empty_input(self, serializer: RoleZeroSerializer):
assert serializer.serialize_req(req=[]) == ""
def test_serialize_req_with_content(self, serializer: RoleZeroSerializer, last_item: dict):
req = [
{"role": "user", "content": "Command Editor.read executed: file_path=test.py"},
{"role": "assistant", "content": "Some other content"},
last_item,
]
expected_output = json.dumps([{"role": "user", "content": "Command Editor.read executed: file_path=test.py"}])
assert serializer.serialize_req(req=req) == expected_output
def test_filter_req(self, serializer: RoleZeroSerializer):
req = [
{"role": "user", "content": "Command Editor.read executed: file_path=test1.py"},
{"role": "assistant", "content": "Some other content"},
{"role": "user", "content": "Command Editor.read executed: file_path=test2.py"},
{"role": "assistant", "content": "Final content"},
]
filtered_req = serializer._filter_req(req)
assert len(filtered_req) == 2
assert filtered_req[0]["content"] == "Command Editor.read executed: file_path=test1.py"
assert filtered_req[1]["content"] == "Command Editor.read executed: file_path=test2.py"

View file

@ -0,0 +1,44 @@
import pytest
from metagpt.exp_pool.serializers.simple import SimpleSerializer
class TestSimpleSerializer:
@pytest.fixture
def serializer(self):
return SimpleSerializer()
def test_serialize_req(self, serializer: SimpleSerializer):
# Test with different types of input
assert serializer.serialize_req(req=123) == "123"
assert serializer.serialize_req(req="test") == "test"
assert serializer.serialize_req(req=[1, 2, 3]) == "[1, 2, 3]"
assert serializer.serialize_req(req={"a": 1}) == "{'a': 1}"
def test_serialize_resp(self, serializer: SimpleSerializer):
# Test with different types of input
assert serializer.serialize_resp(456) == "456"
assert serializer.serialize_resp("response") == "response"
assert serializer.serialize_resp([4, 5, 6]) == "[4, 5, 6]"
assert serializer.serialize_resp({"b": 2}) == "{'b': 2}"
def test_deserialize_resp(self, serializer: SimpleSerializer):
# Test with different types of input
assert serializer.deserialize_resp("789") == "789"
assert serializer.deserialize_resp("test_response") == "test_response"
assert serializer.deserialize_resp("[7, 8, 9]") == "[7, 8, 9]"
assert serializer.deserialize_resp("{'c': 3}") == "{'c': 3}"
def test_roundtrip(self, serializer: SimpleSerializer):
# Test serialization and deserialization roundtrip
original = "test_roundtrip"
serialized = serializer.serialize_resp(original)
deserialized = serializer.deserialize_resp(serialized)
assert deserialized == original
@pytest.mark.parametrize("input_value", [123, "test", [1, 2, 3], {"a": 1}, None])
def test_serialize_req_types(self, serializer: SimpleSerializer, input_value):
# Test serialize_req with various input types
result = serializer.serialize_req(req=input_value)
assert isinstance(result, str)
assert result == str(input_value)

View file

@ -0,0 +1,54 @@
from metagpt.roles.di.data_analyst import DataAnalyst
HOUSE_PRICE_TRAIN_PATH = '/data/house-prices-advanced-regression-techniques/split_train.csv'
HOUSE_PRICE_EVAL_PATH = '/data/house-prices-advanced-regression-techniques/split_eval.csv'
HOUSE_PRICE_REQ = f"""
This is a house price dataset, your goal is to predict the sale price of a property based on its features. The target column is SalePrice. Perform data analysis, data preprocessing, feature engineering, and modeling to predict the target. Report RMSE between the logarithm of the predicted value and the logarithm of the observed sales price on the eval data. Train data path: '{HOUSE_PRICE_TRAIN_PATH}', eval data path: '{HOUSE_PRICE_EVAL_PATH}'.
"""
CALIFORNIA_HOUSING_REQ = """
Analyze the 'Canifornia-housing-dataset' using https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_california_housing.html#sklearn.datasets.fetch_california_housing to predict the median house value. you need to perfrom data preprocessing, feature engineering and finally modeling to predict the target. Use machine learning techniques such as linear regression (including ridge regression and lasso regression), random forest, CatBoost, LightGBM, XGBoost or other appropriate method. You also need to report the MSE on the test dataset
"""
# For web scraping task, please provide url begin with `https://` or `http://`
PAPER_LIST_REQ = """"
Get data from `paperlist` table in https://papercopilot.com/statistics/iclr-statistics/iclr-2024-statistics/,
and save it to a csv file. paper title must include `multiagent` or `large language model`.
**Notice: view the page element before writing scraping code**
"""
ECOMMERCE_REQ = """
Get products data from website https://scrapeme.live/shop/ and save it as a csv file.
The first page product name, price, product URL, and image URL must be saved in the csv.
**Notice: view the page element before writing scraping code**
"""
NEWS_36KR_REQ = """从36kr创投平台https://pitchhub.36kr.com/financing-flash 所有初创企业融资的信息, **注意: 这是一个中文网站**;
下面是一个大致流程, 你会根据每一步的运行结果对当前计划中的任务做出适当调整:
1. 爬取并本地保存html结构;
2. 直接打印第7个*`快讯`*关键词后2000个字符的html内容, 作为*快讯的html内容示例*;
3. 反思*快讯的html内容示例*中的规律, 设计正则匹配表达式来获取*`快讯`*的标题链接时间;
4. 筛选最近3天的初创企业融资*`快讯`*, 以list[dict]形式打印前5个
5. 将全部结果存在本地csv中
**Notice: view the page element before writing scraping code**
"""
WIKIPEDIA_SEARCH_REQ = """
Search for `LLM` on https://www.wikipedia.org/ and print all the meaningful significances of the entry.
"""
STACKOVERFLOW_CLICK_REQ = """
Click the Questions tag in https://stackoverflow.com/ and scrap question name, votes, answers and views num to csv in the first result page.
"""
async def main():
di = DataAnalyst()
await di.browser.start()
await di.run(STACKOVERFLOW_CLICK_REQ)
if __name__ == "__main__":
import asyncio
asyncio.run(main())

View file

@ -55,15 +55,15 @@ async def run(instance, swe_result_dir):
logger.info(f"Instance {instance['instance_id']} already exists, skipping execution.")
return
repo_path = TEST_REPO_DIR / instance["repo"].replace("-", "_").replace("/", "__") + "_" + instance["version"]
repo_path = TEST_REPO_DIR / (instance["repo"].replace("-", "_").replace("/", "__") + "_" + instance["version"])
# 前处理
terminal = Terminal()
terminal.run_command(f"cd {repo_path} && git reset --hard && git clean -n -d && git clean -f -d")
terminal.run_command("BRANCH=$(git remote show origin | awk '/HEAD branch/ {print $NF}')")
logger.info(terminal.run_command("echo $BRANCH"))
logger.info(terminal.run_command('git checkout "$BRANCH"'))
logger.info(terminal.run_command("git branch"))
await terminal.run_command(f"cd {repo_path} && git reset --hard && git clean -n -d && git clean -f -d")
await terminal.run_command("BRANCH=$(git remote show origin | awk '/HEAD branch/ {print $NF}')")
logger.info(await terminal.run_command("echo $BRANCH"))
logger.info(await terminal.run_command('git checkout "$BRANCH"'))
logger.info(await terminal.run_command("git branch"))
user_requirement_and_issue = INSTANCE_TEMPLATE.format(
issue=instance["problem_statement"],

View file

@ -4,16 +4,17 @@ from metagpt.const import DATA_PATH, METAGPT_ROOT
from metagpt.tools.libs.terminal import Terminal
def test_terminal():
@pytest.mark.asyncio
async def test_terminal():
terminal = Terminal()
terminal.run_command(f"cd {METAGPT_ROOT}")
output = terminal.run_command("pwd")
await terminal.run_command(f"cd {METAGPT_ROOT}")
output = await terminal.run_command("pwd")
assert output.strip() == str(METAGPT_ROOT)
# pwd now should be METAGPT_ROOT, cd data should land in DATA_PATH
terminal.run_command("cd data")
output = terminal.run_command("pwd")
await terminal.run_command("cd data")
output = await terminal.run_command("pwd")
assert output.strip() == str(DATA_PATH)