feat: merge geekan/dev

This commit is contained in:
莘权 马 2024-01-27 13:38:14 +08:00
commit aa0909525e
62 changed files with 1733 additions and 154 deletions

View file

@ -15,6 +15,7 @@ from pydantic import BaseModel, ConfigDict, Field, model_validator
from metagpt.actions.action_node import ActionNode
from metagpt.context_mixin import ContextMixin
from metagpt.schema import (
CodePlanAndChangeContext,
CodeSummarizeContext,
CodingContext,
RunCodeContext,
@ -28,7 +29,9 @@ class Action(SerializationMixin, ContextMixin, BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str = ""
i_context: Union[dict, CodingContext, CodeSummarizeContext, TestingContext, RunCodeContext, str, None] = ""
i_context: Union[
dict, CodingContext, CodeSummarizeContext, TestingContext, RunCodeContext, CodePlanAndChangeContext, str, None
] = ""
prefix: str = "" # aask*时会加上prefix作为system_message
desc: str = "" # for skill manager
node: ActionNode = Field(default=None, exclude=True)

View file

@ -12,7 +12,7 @@ import json
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Type, Union
from pydantic import BaseModel, create_model, model_validator
from pydantic import BaseModel, Field, create_model, model_validator
from tenacity import retry, stop_after_attempt, wait_random_exponential
from metagpt.actions.action_outcls_registry import register_action_outcls
@ -186,11 +186,27 @@ class ActionNode:
obj.add_children(nodes)
return obj
def get_children_mapping(self, exclude=None) -> Dict[str, Tuple[Type, Any]]:
def get_children_mapping_old(self, exclude=None) -> Dict[str, Tuple[Type, Any]]:
"""获得子ActionNode的字典以key索引"""
exclude = exclude or []
return {k: (v.expected_type, ...) for k, v in self.children.items() if k not in exclude}
def get_children_mapping(self, exclude=None) -> Dict[str, Tuple[Type, Any]]:
"""获得子ActionNode的字典以key索引支持多级结构"""
exclude = exclude or []
mapping = {}
def _get_mapping(node: "ActionNode", prefix: str = ""):
for key, child in node.children.items():
if key in exclude:
continue
full_key = f"{prefix}{key}"
mapping[full_key] = (child.expected_type, ...)
_get_mapping(child, prefix=f"{full_key}.")
_get_mapping(self)
return mapping
def get_self_mapping(self) -> Dict[str, Tuple[Type, Any]]:
"""get self key: type mapping"""
return {self.key: (self.expected_type, ...)}
@ -616,3 +632,62 @@ class ActionNode:
self.update_instruct_content(revise_contents)
return revise_contents
@classmethod
def from_pydantic(cls, model: Type[BaseModel], key: str = None):
"""
Creates an ActionNode tree from a Pydantic model.
Args:
model (Type[BaseModel]): The Pydantic model to convert.
Returns:
ActionNode: The root node of the created ActionNode tree.
"""
key = key or model.__name__
root_node = cls(key=model.__name__, expected_type=Type[model], instruction="", example="")
for field_name, field_model in model.model_fields.items():
# Extracting field details
expected_type = field_model.annotation
instruction = field_model.description or ""
example = field_model.default
# Check if the field is a Pydantic model itself.
# Use isinstance to avoid typing.List, typing.Dict, etc. (they are instances of type, not subclasses)
if isinstance(expected_type, type) and issubclass(expected_type, BaseModel):
# Recursively process the nested model
child_node = cls.from_pydantic(expected_type, key=field_name)
else:
child_node = cls(key=field_name, expected_type=expected_type, instruction=instruction, example=example)
root_node.add_child(child_node)
return root_node
class ToolUse(BaseModel):
tool_name: str = Field(default="a", description="tool name", examples=[])
class Task(BaseModel):
task_id: int = Field(default="1", description="task id", examples=[1, 2, 3])
name: str = Field(default="Get data from ...", description="task name", examples=[])
dependent_task_ids: List[int] = Field(default=[], description="dependent task ids", examples=[1, 2, 3])
tool: ToolUse = Field(default=ToolUse(), description="tool use", examples=[])
class Tasks(BaseModel):
tasks: List[Task] = Field(default=[], description="tasks", examples=[])
if __name__ == "__main__":
node = ActionNode.from_pydantic(Tasks)
print("Tasks")
print(Tasks.model_json_schema())
print("Task")
print(Task.model_json_schema())
print(node)
prompt = node.compile(context="")
node.create_children_class()
print(prompt)

View file

@ -14,7 +14,14 @@ from pathlib import Path
from typing import Optional
from metagpt.actions import Action, ActionOutput
from metagpt.actions.design_api_an import DESIGN_API_NODE
from metagpt.actions.design_api_an import (
DATA_STRUCTURES_AND_INTERFACES,
DESIGN_API_NODE,
PROGRAM_CALL_FLOW,
REFINED_DATA_STRUCTURES_AND_INTERFACES,
REFINED_DESIGN_NODE,
REFINED_PROGRAM_CALL_FLOW,
)
from metagpt.const import DATA_API_DESIGN_FILE_REPO, SEQ_FLOW_FILE_REPO
from metagpt.logs import logger
from metagpt.schema import Document, Documents, Message
@ -39,7 +46,7 @@ class WriteDesign(Action):
)
async def run(self, with_messages: Message, schema: str = None):
# Use `git status` to identify which PRD documents have been modified in the `docs/prds` directory.
# Use `git status` to identify which PRD documents have been modified in the `docs/prd` directory.
changed_prds = self.repo.docs.prd.changed_files
# Use `git status` to identify which design documents in the `docs/system_designs` directory have undergone
# changes.
@ -68,7 +75,7 @@ class WriteDesign(Action):
async def _merge(self, prd_doc, system_design_doc):
context = NEW_REQ_TEMPLATE.format(old_design=system_design_doc.content, context=prd_doc.content)
node = await DESIGN_API_NODE.fill(context=context, llm=self.llm)
node = await REFINED_DESIGN_NODE.fill(context=context, llm=self.llm)
system_design_doc.content = node.instruct_content.model_dump_json()
return system_design_doc
@ -92,7 +99,7 @@ class WriteDesign(Action):
async def _save_data_api_design(self, design_doc):
m = json.loads(design_doc.content)
data_api_design = m.get("Data structures and interfaces")
data_api_design = m.get(DATA_STRUCTURES_AND_INTERFACES.key) or m.get(REFINED_DATA_STRUCTURES_AND_INTERFACES.key)
if not data_api_design:
return
pathname = self.repo.workdir / DATA_API_DESIGN_FILE_REPO / Path(design_doc.filename).with_suffix("")
@ -101,7 +108,7 @@ class WriteDesign(Action):
async def _save_seq_flow(self, design_doc):
m = json.loads(design_doc.content)
seq_flow = m.get("Program call flow")
seq_flow = m.get(PROGRAM_CALL_FLOW.key) or m.get(REFINED_PROGRAM_CALL_FLOW.key)
if not seq_flow:
return
pathname = self.repo.workdir / Path(SEQ_FLOW_FILE_REPO) / Path(design_doc.filename).with_suffix("")

View file

@ -8,6 +8,7 @@
from typing import List
from metagpt.actions.action_node import ActionNode
from metagpt.logs import logger
from metagpt.utils.mermaid import MMC1, MMC2
IMPLEMENTATION_APPROACH = ActionNode(
@ -17,6 +18,15 @@ IMPLEMENTATION_APPROACH = ActionNode(
example="We will ...",
)
REFINED_IMPLEMENTATION_APPROACH = ActionNode(
key="Refined Implementation Approach",
expected_type=str,
instruction="Update and extend the original implementation approach to reflect the evolving challenges and "
"requirements due to incremental development. Outline the steps involved in the implementation process with the "
"detailed strategies.",
example="We will refine ...",
)
PROJECT_NAME = ActionNode(
key="Project name", expected_type=str, instruction="The project name with underline", example="game_2048"
)
@ -28,6 +38,14 @@ FILE_LIST = ActionNode(
example=["main.py", "game.py"],
)
REFINED_FILE_LIST = ActionNode(
key="Refined File list",
expected_type=List[str],
instruction="Update and expand the original file list including only relative paths. Up to 2 files can be added."
"Ensure that the refined file list reflects the evolving structure of the project.",
example=["main.py", "game.py", "new_feature.py"],
)
DATA_STRUCTURES_AND_INTERFACES = ActionNode(
key="Data structures and interfaces",
expected_type=str,
@ -37,6 +55,16 @@ DATA_STRUCTURES_AND_INTERFACES = ActionNode(
example=MMC1,
)
REFINED_DATA_STRUCTURES_AND_INTERFACES = ActionNode(
key="Refined Data structures and interfaces",
expected_type=str,
instruction="Update and extend the existing mermaid classDiagram code syntax to incorporate new classes, "
"methods (including __init__), and functions with precise type annotations. Delineate additional "
"relationships between classes, ensuring clarity and adherence to PEP8 standards."
"Retain content that is not related to incremental development but important for consistency and clarity.",
example=MMC1,
)
PROGRAM_CALL_FLOW = ActionNode(
key="Program call flow",
expected_type=str,
@ -45,6 +73,16 @@ PROGRAM_CALL_FLOW = ActionNode(
example=MMC2,
)
REFINED_PROGRAM_CALL_FLOW = ActionNode(
key="Refined Program call flow",
expected_type=str,
instruction="Extend the existing sequenceDiagram code syntax with detailed information, accurately covering the"
"CRUD and initialization of each object. Ensure correct syntax usage and reflect the incremental changes introduced"
"in the classes and API defined above. "
"Retain content that is not related to incremental development but important for consistency and clarity.",
example=MMC2,
)
ANYTHING_UNCLEAR = ActionNode(
key="Anything UNCLEAR",
expected_type=str,
@ -61,4 +99,24 @@ NODES = [
ANYTHING_UNCLEAR,
]
REFINED_NODES = [
REFINED_IMPLEMENTATION_APPROACH,
REFINED_FILE_LIST,
REFINED_DATA_STRUCTURES_AND_INTERFACES,
REFINED_PROGRAM_CALL_FLOW,
ANYTHING_UNCLEAR,
]
DESIGN_API_NODE = ActionNode.from_children("DesignAPI", NODES)
REFINED_DESIGN_NODE = ActionNode.from_children("RefinedDesignAPI", REFINED_NODES)
def main():
prompt = DESIGN_API_NODE.compile(context="")
logger.info(prompt)
prompt = REFINED_DESIGN_NODE.compile(context="")
logger.info(prompt)
if __name__ == "__main__":
main()

View file

@ -48,5 +48,5 @@ class PrepareDocuments(Action):
# Write the newly added requirements from the main parameter idea to `docs/requirement.txt`.
doc = await self.repo.docs.save(filename=REQUIREMENT_FILENAME, content=with_messages[0].content)
# Send a Message notification to the WritePRD action, instructing it to process requirements using
# `docs/requirement.txt` and `docs/prds/`.
# `docs/requirement.txt` and `docs/prd/`.
return ActionOutput(content=doc.content, instruct_content=doc)

View file

@ -15,14 +15,14 @@ from typing import Optional
from metagpt.actions.action import Action
from metagpt.actions.action_output import ActionOutput
from metagpt.actions.project_management_an import PM_NODE
from metagpt.actions.project_management_an import PM_NODE, REFINED_PM_NODE
from metagpt.const import PACKAGE_REQUIREMENTS_FILENAME
from metagpt.logs import logger
from metagpt.schema import Document, Documents
NEW_REQ_TEMPLATE = """
### Legacy Content
{old_tasks}
{old_task}
### New Requirements
{context}
@ -77,8 +77,8 @@ class WriteTasks(Action):
return node
async def _merge(self, system_design_doc, task_doc) -> Document:
context = NEW_REQ_TEMPLATE.format(context=system_design_doc.content, old_tasks=task_doc.content)
node = await PM_NODE.fill(context, self.llm, schema=self.prompt_schema)
context = NEW_REQ_TEMPLATE.format(context=system_design_doc.content, old_task=task_doc.content)
node = await REFINED_PM_NODE.fill(context, self.llm, schema=self.prompt_schema)
task_doc.content = node.instruct_content.model_dump_json()
return task_doc

View file

@ -35,6 +35,20 @@ LOGIC_ANALYSIS = ActionNode(
],
)
REFINED_LOGIC_ANALYSIS = ActionNode(
key="Refined Logic Analysis",
expected_type=List[List[str]],
instruction="Review and refine the logic analysis by merging the Legacy Content and Incremental Content. "
"Provide a comprehensive list of files with classes/methods/functions to be implemented or modified incrementally. "
"Include dependency analysis, consider potential impacts on existing code, and document necessary imports.",
example=[
["game.py", "Contains Game class and ... functions"],
["main.py", "Contains main function, from game import Game"],
["new_feature.py", "Introduces NewFeature class and related functions"],
["utils.py", "Modifies existing utility functions to support incremental changes"],
],
)
TASK_LIST = ActionNode(
key="Task list",
expected_type=List[str],
@ -42,6 +56,15 @@ TASK_LIST = ActionNode(
example=["game.py", "main.py"],
)
REFINED_TASK_LIST = ActionNode(
key="Refined Task list",
expected_type=List[str],
instruction="Review and refine the combined task list after the merger of Legacy Content and Incremental Content, "
"and consistent with Refined File List. Ensure that tasks are organized in a logical and prioritized order, "
"considering dependencies for a streamlined and efficient development process. ",
example=["new_feature.py", "utils", "game.py", "main.py"],
)
FULL_API_SPEC = ActionNode(
key="Full API spec",
expected_type=str,
@ -54,9 +77,19 @@ SHARED_KNOWLEDGE = ActionNode(
key="Shared Knowledge",
expected_type=str,
instruction="Detail any shared knowledge, like common utility functions or configuration variables.",
example="'game.py' contains functions shared across the project.",
example="`game.py` contains functions shared across the project.",
)
REFINED_SHARED_KNOWLEDGE = ActionNode(
key="Refined Shared Knowledge",
expected_type=str,
instruction="Update and expand shared knowledge to reflect any new elements introduced. This includes common "
"utility functions, configuration variables for team collaboration. Retain content that is not related to "
"incremental development but important for consistency and clarity.",
example="`new_module.py` enhances shared utility functions for improved code reusability and collaboration.",
)
ANYTHING_UNCLEAR_PM = ActionNode(
key="Anything UNCLEAR",
expected_type=str,
@ -74,13 +107,25 @@ NODES = [
ANYTHING_UNCLEAR_PM,
]
REFINED_NODES = [
REQUIRED_PYTHON_PACKAGES,
REQUIRED_OTHER_LANGUAGE_PACKAGES,
REFINED_LOGIC_ANALYSIS,
REFINED_TASK_LIST,
FULL_API_SPEC,
REFINED_SHARED_KNOWLEDGE,
ANYTHING_UNCLEAR_PM,
]
PM_NODE = ActionNode.from_children("PM_NODE", NODES)
REFINED_PM_NODE = ActionNode.from_children("REFINED_PM_NODE", REFINED_NODES)
def main():
prompt = PM_NODE.compile(context="")
logger.info(prompt)
prompt = REFINED_PM_NODE.compile(context="")
logger.info(prompt)
if __name__ == "__main__":

View file

@ -16,6 +16,7 @@
class.
"""
import subprocess
from pathlib import Path
from typing import Tuple
from pydantic import Field
@ -150,11 +151,23 @@ class RunCode(Action):
return subprocess.run(cmd, check=check, cwd=cwd, env=env)
@staticmethod
def _install_dependencies(working_directory, env):
def _install_requirements(working_directory, env):
file_path = Path(working_directory) / "requirements.txt"
if not file_path.exists():
return
if file_path.stat().st_size == 0:
return
install_command = ["python", "-m", "pip", "install", "-r", "requirements.txt"]
logger.info(" ".join(install_command))
RunCode._install_via_subprocess(install_command, check=True, cwd=working_directory, env=env)
@staticmethod
def _install_pytest(working_directory, env):
install_pytest_command = ["python", "-m", "pip", "install", "pytest"]
logger.info(" ".join(install_pytest_command))
RunCode._install_via_subprocess(install_pytest_command, check=True, cwd=working_directory, env=env)
@staticmethod
def _install_dependencies(working_directory, env):
RunCode._install_requirements(working_directory, env)
RunCode._install_pytest(working_directory, env)

View file

@ -26,9 +26,9 @@ ATTENTION: Use '##' to SPLIT SECTIONS, not '#'. Output format carefully referenc
{system_design}
```
-----
# Tasks
# Task
```text
{tasks}
{task}
```
-----
{code_blocks}
@ -110,7 +110,7 @@ class SummarizeCode(Action):
format_example = FORMAT_EXAMPLE
prompt = PROMPT_TEMPLATE.format(
system_design=design_doc.content,
tasks=task_doc.content,
task=task_doc.content,
code_blocks="\n".join(code_blocks),
format_example=format_example,
)

View file

@ -21,10 +21,17 @@ from pydantic import Field
from tenacity import retry, stop_after_attempt, wait_random_exponential
from metagpt.actions.action import Action
from metagpt.const import BUGFIX_FILENAME
from metagpt.actions.project_management_an import REFINED_TASK_LIST, TASK_LIST
from metagpt.actions.write_code_plan_and_change_an import REFINED_TEMPLATE
from metagpt.const import (
BUGFIX_FILENAME,
CODE_PLAN_AND_CHANGE_FILENAME,
REQUIREMENT_FILENAME,
)
from metagpt.logs import logger
from metagpt.schema import CodingContext, Document, RunCodeResult
from metagpt.utils.common import CodeParser
from metagpt.utils.project_repo import ProjectRepo
PROMPT_TEMPLATE = """
NOTICE
@ -36,8 +43,8 @@ ATTENTION: Use '##' to SPLIT SECTIONS, not '#'. Output format carefully referenc
## Design
{design}
## Tasks
{tasks}
## Task
{task}
## Legacy Code
```Code
@ -91,6 +98,9 @@ class WriteCode(Action):
bug_feedback = await self.repo.docs.get(filename=BUGFIX_FILENAME)
coding_context = CodingContext.loads(self.i_context.content)
test_doc = await self.repo.test_outputs.get(filename="test_" + coding_context.filename + ".json")
code_plan_and_change_doc = await self.repo.docs.code_plan_and_change.get(filename=CODE_PLAN_AND_CHANGE_FILENAME)
code_plan_and_change = code_plan_and_change_doc.content if code_plan_and_change_doc else ""
requirement_doc = await self.repo.docs.get(filename=REQUIREMENT_FILENAME)
summary_doc = None
if coding_context.design_doc and coding_context.design_doc.filename:
summary_doc = await self.repo.docs.code_summary.get(filename=coding_context.design_doc.filename)
@ -101,6 +111,10 @@ class WriteCode(Action):
if bug_feedback:
code_context = coding_context.code_doc.content
elif code_plan_and_change:
code_context = await self.get_codes(
coding_context.task_doc, exclude=self.i_context.filename, project_repo=self.repo, use_inc=True
)
else:
code_context = await self.get_codes(
coding_context.task_doc,
@ -108,15 +122,28 @@ class WriteCode(Action):
project_repo=self.repo.with_src_path(self.context.src_workspace),
)
prompt = PROMPT_TEMPLATE.format(
design=coding_context.design_doc.content if coding_context.design_doc else "",
tasks=coding_context.task_doc.content if coding_context.task_doc else "",
code=code_context,
logs=logs,
feedback=bug_feedback.content if bug_feedback else "",
filename=self.i_context.filename,
summary_log=summary_doc.content if summary_doc else "",
)
if code_plan_and_change:
prompt = REFINED_TEMPLATE.format(
user_requirement=requirement_doc.content if requirement_doc else "",
code_plan_and_change=code_plan_and_change,
design=coding_context.design_doc.content if coding_context.design_doc else "",
task=coding_context.task_doc.content if coding_context.task_doc else "",
code=code_context,
logs=logs,
feedback=bug_feedback.content if bug_feedback else "",
filename=self.i_context.filename,
summary_log=summary_doc.content if summary_doc else "",
)
else:
prompt = PROMPT_TEMPLATE.format(
design=coding_context.design_doc.content if coding_context.design_doc else "",
task=coding_context.task_doc.content if coding_context.task_doc else "",
code=code_context,
logs=logs,
feedback=bug_feedback.content if bug_feedback else "",
filename=self.i_context.filename,
summary_log=summary_doc.content if summary_doc else "",
)
logger.info(f"Writing {coding_context.filename}..")
code = await self.write_code(prompt)
if not coding_context.code_doc:
@ -127,20 +154,66 @@ class WriteCode(Action):
return coding_context
@staticmethod
async def get_codes(task_doc, exclude, project_repo) -> str:
async def get_codes(task_doc: Document, exclude: str, project_repo: ProjectRepo, use_inc: bool = False) -> str:
"""
Get codes for generating the exclude file in various scenarios.
Attributes:
task_doc (Document): Document object of the task file.
exclude (str): The file to be generated. Specifies the filename to be excluded from the code snippets.
project_repo (ProjectRepo): ProjectRepo object of the project.
use_inc (bool): Indicates whether the scenario involves incremental development. Defaults to False.
Returns:
str: Codes for generating the exclude file.
"""
if not task_doc:
return ""
if not task_doc.content:
task_doc = project_repo.docs.task.get(filename=task_doc.filename)
m = json.loads(task_doc.content)
code_filenames = m.get("Task list", [])
code_filenames = m.get(TASK_LIST.key, []) if use_inc else m.get(REFINED_TASK_LIST.key, [])
codes = []
src_file_repo = project_repo.srcs
for filename in code_filenames:
if filename == exclude:
continue
doc = await src_file_repo.get(filename=filename)
if not doc:
continue
codes.append(f"----- {filename}\n" + doc.content)
# Incremental development scenario
if use_inc:
src_files = src_file_repo.all_files
# Get the old workspace contained the old codes and old workspace are created in previous CodePlanAndChange
old_file_repo = project_repo.git_repo.new_file_repository(relative_path=project_repo.old_workspace)
old_files = old_file_repo.all_files
# Get the union of the files in the src and old workspaces
union_files_list = list(set(src_files) | set(old_files))
for filename in union_files_list:
# Exclude the current file from the all code snippets
if filename == exclude:
# If the file is in the old workspace, use the old code
# Exclude unnecessary code to maintain a clean and focused main.py file, ensuring only relevant and
# essential functionality is included for the projects requirements
if filename in old_files and filename != "main.py":
# Use old code
doc = await old_file_repo.get(filename=filename)
# If the file is in the src workspace, skip it
else:
continue
codes.insert(0, f"-----Now, {filename} to be rewritten\n```{doc.content}```\n=====")
# The code snippets are generated from the src workspace
else:
doc = await src_file_repo.get(filename=filename)
# If the file does not exist in the src workspace, skip it
if not doc:
continue
codes.append(f"----- {filename}\n```{doc.content}```")
# Normal scenario
else:
for filename in code_filenames:
# Exclude the current file to get the code snippets for generating the current file
if filename == exclude:
continue
doc = await src_file_repo.get(filename=filename)
if not doc:
continue
codes.append(f"----- {filename}\n```{doc.content}```")
return "\n".join(codes)

View file

@ -0,0 +1,210 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/12/26
@Author : mannaandpoem
@File : write_code_plan_and_change_an.py
"""
import os
from pydantic import Field
from metagpt.actions.action import Action
from metagpt.actions.action_node import ActionNode
from metagpt.schema import CodePlanAndChangeContext
CODE_PLAN_AND_CHANGE = ActionNode(
key="Code Plan And Change",
expected_type=str,
instruction="Developing comprehensive and step-by-step incremental development plan, and write Incremental "
"Change by making a code draft that how to implement incremental development including detailed steps based on the "
"context. Note: Track incremental changes using mark of '+' or '-' for add/modify/delete code, and conforms to the "
"output format of git diff",
example="""
1. Plan for calculator.py: Enhance the functionality of `calculator.py` by extending it to incorporate methods for subtraction, multiplication, and division. Additionally, implement robust error handling for the division operation to mitigate potential issues related to division by zero.
```python
class Calculator:
self.result = number1 + number2
return self.result
- def sub(self, number1, number2) -> float:
+ def subtract(self, number1: float, number2: float) -> float:
+ '''
+ Subtracts the second number from the first and returns the result.
+
+ Args:
+ number1 (float): The number to be subtracted from.
+ number2 (float): The number to subtract.
+
+ Returns:
+ float: The difference of number1 and number2.
+ '''
+ self.result = number1 - number2
+ return self.result
+
def multiply(self, number1: float, number2: float) -> float:
- pass
+ '''
+ Multiplies two numbers and returns the result.
+
+ Args:
+ number1 (float): The first number to multiply.
+ number2 (float): The second number to multiply.
+
+ Returns:
+ float: The product of number1 and number2.
+ '''
+ self.result = number1 * number2
+ return self.result
+
def divide(self, number1: float, number2: float) -> float:
- pass
+ '''
+ ValueError: If the second number is zero.
+ '''
+ if number2 == 0:
+ raise ValueError('Cannot divide by zero')
+ self.result = number1 / number2
+ return self.result
+
- def reset_result(self):
+ def clear(self):
+ if self.result != 0.0:
+ print("Result is not zero, clearing...")
+ else:
+ print("Result is already zero, no need to clear.")
+
self.result = 0.0
```
2. Plan for main.py: Integrate new API endpoints for subtraction, multiplication, and division into the existing codebase of `main.py`. Then, ensure seamless integration with the overall application architecture and maintain consistency with coding standards.
```python
def add_numbers():
result = calculator.add_numbers(num1, num2)
return jsonify({'result': result}), 200
-# TODO: Implement subtraction, multiplication, and division operations
+@app.route('/subtract_numbers', methods=['POST'])
+def subtract_numbers():
+ data = request.get_json()
+ num1 = data.get('num1', 0)
+ num2 = data.get('num2', 0)
+ result = calculator.subtract_numbers(num1, num2)
+ return jsonify({'result': result}), 200
+
+@app.route('/multiply_numbers', methods=['POST'])
+def multiply_numbers():
+ data = request.get_json()
+ num1 = data.get('num1', 0)
+ num2 = data.get('num2', 0)
+ try:
+ result = calculator.divide_numbers(num1, num2)
+ except ValueError as e:
+ return jsonify({'error': str(e)}), 400
+ return jsonify({'result': result}), 200
+
if __name__ == '__main__':
app.run()
```""",
)
CODE_PLAN_AND_CHANGE_CONTEXT = """
## User New Requirements
{requirement}
## PRD
{prd}
## Design
{design}
## Task
{task}
## Legacy Code
{code}
"""
REFINED_TEMPLATE = """
NOTICE
Role: You are a professional engineer; The main goal is to complete incremental development by combining legacy code and plan and Incremental Change, ensuring the integration of new features.
# Context
## User New Requirements
{user_requirement}
## Code Plan And Change
{code_plan_and_change}
## Design
{design}
## Task
{task}
## Legacy Code
```Code
{code}
```
## Debug logs
```text
{logs}
{summary_log}
```
## Bug Feedback logs
```text
{feedback}
```
# Format example
## Code: {filename}
```python
## {filename}
...
```
# Instruction: Based on the context, follow "Format example", write or rewrite code.
## Write/Rewrite Code: Only write one file {filename}, write or rewrite complete code using triple quotes based on the following attentions and context.
1. Only One file: do your best to implement THIS ONLY ONE FILE.
2. COMPLETE CODE: Your code will be part of the entire project, so please implement complete, reliable, reusable code snippets.
3. Set default value: If there is any setting, ALWAYS SET A DEFAULT VALUE, ALWAYS USE STRONG TYPE AND EXPLICIT VARIABLE. AVOID circular import.
4. Follow design: YOU MUST FOLLOW "Data structures and interfaces". DONT CHANGE ANY DESIGN. Do not use public member functions that do not exist in your design.
5. Follow Code Plan And Change: If there is any Incremental Change that is marked by the git diff format using '+' and '-' for add/modify/delete code, or Legacy Code files contain "{filename} to be rewritten", you must merge it into the code file according to the plan.
6. CAREFULLY CHECK THAT YOU DONT MISS ANY NECESSARY CLASS/FUNCTION IN THIS FILE.
7. Before using a external variable/module, make sure you import it first.
8. Write out EVERY CODE DETAIL, DON'T LEAVE TODO.
9. Attention: Retain details that are not related to incremental development but are important for maintaining the consistency and clarity of the old code.
"""
WRITE_CODE_PLAN_AND_CHANGE_NODE = ActionNode.from_children("WriteCodePlanAndChange", [CODE_PLAN_AND_CHANGE])
class WriteCodePlanAndChange(Action):
name: str = "WriteCodePlanAndChange"
i_context: CodePlanAndChangeContext = Field(default_factory=CodePlanAndChangeContext)
async def run(self, *args, **kwargs):
self.llm.system_prompt = "You are a professional software engineer, your primary responsibility is to "
"meticulously craft comprehensive incremental development plan and deliver detailed incremental change"
prd_doc = await self.repo.docs.prd.get(filename=self.i_context.prd_filename)
design_doc = await self.repo.docs.system_design.get(filename=self.i_context.design_filename)
task_doc = await self.repo.docs.task.get(filename=self.i_context.task_filename)
code_text = await self.get_old_codes()
context = CODE_PLAN_AND_CHANGE_CONTEXT.format(
requirement=self.i_context.requirement,
prd=prd_doc.content,
design=design_doc.content,
task=task_doc.content,
code=code_text,
)
return await WRITE_CODE_PLAN_AND_CHANGE_NODE.fill(context=context, llm=self.llm, schema="json")
async def get_old_codes(self) -> str:
self.repo.old_workspace = self.repo.git_repo.workdir / os.path.basename(self.config.project_path)
old_file_repo = self.repo.git_repo.new_file_repository(relative_path=self.repo.old_workspace)
old_codes = await old_file_repo.get_all()
codes = [f"----- {code.filename}\n```{code.content}```" for code in old_codes]
return "\n".join(codes)

View file

@ -13,6 +13,7 @@ from tenacity import retry, stop_after_attempt, wait_random_exponential
from metagpt.actions import WriteCode
from metagpt.actions.action import Action
from metagpt.const import CODE_PLAN_AND_CHANGE_FILENAME, REQUIREMENT_FILENAME
from metagpt.logs import logger
from metagpt.schema import CodingContext
from metagpt.utils.common import CodeParser
@ -137,6 +138,7 @@ class WriteCodeReview(Action):
async def run(self, *args, **kwargs) -> CodingContext:
iterative_code = self.i_context.code_doc.content
k = self.context.config.code_review_k_times or 1
for i in range(k):
format_example = FORMAT_EXAMPLE.format(filename=self.i_context.code_doc.filename)
task_content = self.i_context.task_doc.content if self.i_context.task_doc else ""
@ -144,14 +146,30 @@ class WriteCodeReview(Action):
self.i_context.task_doc,
exclude=self.i_context.filename,
project_repo=self.repo.with_src_path(self.context.src_workspace),
use_inc=self.config.inc,
)
context = "\n".join(
[
"## System Design\n" + str(self.i_context.design_doc) + "\n",
"## Tasks\n" + task_content + "\n",
"## Code Files\n" + code_context + "\n",
]
)
if not self.config.inc:
context = "\n".join(
[
"## System Design\n" + str(self.i_context.design_doc) + "\n",
"## Task\n" + task_content + "\n",
"## Code Files\n" + code_context + "\n",
]
)
else:
requirement_doc = await self.repo.docs.get(filename=REQUIREMENT_FILENAME)
code_plan_and_change_doc = await self.repo.get(filename=CODE_PLAN_AND_CHANGE_FILENAME)
context = "\n".join(
[
"## User New Requirements\n" + str(requirement_doc) + "\n",
"## Code Plan And Change\n" + str(code_plan_and_change_doc) + "\n",
"## System Design\n" + str(self.i_context.design_doc) + "\n",
"## Task\n" + task_content + "\n",
"## Code Files\n" + code_context + "\n",
]
)
context_prompt = PROMPT_TEMPLATE.format(
context=context,
code=iterative_code,
@ -161,7 +179,7 @@ class WriteCodeReview(Action):
format_example=format_example,
)
len1 = len(iterative_code) if iterative_code else 0
len2 = len(self.context.code_doc.content) if self.context.code_doc.content else 0
len2 = len(self.i_context.code_doc.content) if self.i_context.code_doc.content else 0
logger.info(
f"Code review and rewrite {self.i_context.code_doc.filename}: {i + 1}/{k} | len(iterative_code)={len1}, "
f"len(self.i_context.code_doc.content)={len2}"

View file

@ -20,7 +20,9 @@ from metagpt.actions import Action, ActionOutput
from metagpt.actions.action_node import ActionNode
from metagpt.actions.fix_bug import FixBug
from metagpt.actions.write_prd_an import (
COMPETITIVE_QUADRANT_CHART,
PROJECT_NAME,
REFINED_PRD_NODE,
WP_IS_RELATIVE_NODE,
WP_ISSUE_TYPE_NODE,
WRITE_PRD_NODE,
@ -138,21 +140,21 @@ class WritePRD(Action):
if not self.project_name:
self.project_name = Path(self.project_path).name
prompt = NEW_REQ_TEMPLATE.format(requirements=req.content, old_prd=related_doc.content)
node = await WRITE_PRD_NODE.fill(context=prompt, llm=self.llm, schema=self.prompt_schema)
node = await REFINED_PRD_NODE.fill(context=prompt, llm=self.llm, schema=self.prompt_schema)
related_doc.content = node.instruct_content.model_dump_json()
await self._rename_workspace(node)
return related_doc
async def _update_prd(self, req: Document, prd_doc: Document) -> Document:
new_prd_doc: Document = await self._merge(req, prd_doc)
self.repo.docs.prd.save_doc(doc=new_prd_doc)
await self.repo.docs.prd.save_doc(doc=new_prd_doc)
await self._save_competitive_analysis(new_prd_doc)
await self.repo.resources.prd.save_pdf(doc=new_prd_doc)
return new_prd_doc
async def _save_competitive_analysis(self, prd_doc: Document):
m = json.loads(prd_doc.content)
quadrant_chart = m.get("Competitive Quadrant Chart")
quadrant_chart = m.get(COMPETITIVE_QUADRANT_CHART.key)
if not quadrant_chart:
return
pathname = self.repo.workdir / COMPETITIVE_ANALYSIS_FILE_REPO / Path(prd_doc.filename).stem

View file

@ -30,6 +30,13 @@ ORIGINAL_REQUIREMENTS = ActionNode(
example="Create a 2048 game",
)
REFINED_REQUIREMENTS = ActionNode(
key="Refined Requirements",
expected_type=str,
instruction="Place the New user's original requirements here.",
example="Create a 2048 game with a new feature that ...",
)
PROJECT_NAME = ActionNode(
key="Project Name",
expected_type=str,
@ -45,6 +52,18 @@ PRODUCT_GOALS = ActionNode(
example=["Create an engaging user experience", "Improve accessibility, be responsive", "More beautiful UI"],
)
REFINED_PRODUCT_GOALS = ActionNode(
key="Refined Product Goals",
expected_type=List[str],
instruction="Update and expand the original product goals to reflect the evolving needs due to incremental "
"development.Ensure that the refined goals align with the current project direction and contribute to its success.",
example=[
"Enhance user engagement through new features",
"Optimize performance for scalability",
"Integrate innovative UI enhancements",
],
)
USER_STORIES = ActionNode(
key="User Stories",
expected_type=List[str],
@ -58,6 +77,20 @@ USER_STORIES = ActionNode(
],
)
REFINED_USER_STORIES = ActionNode(
key="Refined User Stories",
expected_type=List[str],
instruction="Update and expand the original scenario-based user stories to reflect the evolving needs due to "
"incremental development. Ensure that the refined user stories capture incremental features and improvements. ",
example=[
"As a player, I want to choose difficulty levels to challenge my skills",
"As a player, I want a visually appealing score display after each game for a better gaming experience",
"As a player, I want a convenient restart button displayed when I lose to quickly start a new game",
"As a player, I want an enhanced and aesthetically pleasing UI to elevate the overall gaming experience",
"As a player, I want the ability to play the game seamlessly on my mobile phone for on-the-go entertainment",
],
)
COMPETITIVE_ANALYSIS = ActionNode(
key="Competitive Analysis",
expected_type=List[str],
@ -97,6 +130,15 @@ REQUIREMENT_ANALYSIS = ActionNode(
example="",
)
REFINED_REQUIREMENT_ANALYSIS = ActionNode(
key="Refined Requirement Analysis",
expected_type=List[str],
instruction="Review and refine the existing requirement analysis to align with the evolving needs of the project "
"due to incremental development. Ensure the analysis comprehensively covers the new features and enhancements "
"required for the refined project scope.",
example=["Require add/update/modify ..."],
)
REQUIREMENT_POOL = ActionNode(
key="Requirement Pool",
expected_type=List[List[str]],
@ -104,6 +146,14 @@ REQUIREMENT_POOL = ActionNode(
example=[["P0", "The main code ..."], ["P0", "The game algorithm ..."]],
)
REFINED_REQUIREMENT_POOL = ActionNode(
key="Refined Requirement Pool",
expected_type=List[List[str]],
instruction="List down the top 5 to 7 requirements with their priority (P0, P1, P2). "
"Cover both legacy content and incremental content. Retain content unrelated to incremental development",
example=[["P0", "The main code ..."], ["P0", "The game algorithm ..."]],
)
UI_DESIGN_DRAFT = ActionNode(
key="UI Design draft",
expected_type=str,
@ -152,6 +202,22 @@ NODES = [
ANYTHING_UNCLEAR,
]
REFINED_NODES = [
LANGUAGE,
PROGRAMMING_LANGUAGE,
REFINED_REQUIREMENTS,
PROJECT_NAME,
REFINED_PRODUCT_GOALS,
REFINED_USER_STORIES,
COMPETITIVE_ANALYSIS,
COMPETITIVE_QUADRANT_CHART,
REFINED_REQUIREMENT_ANALYSIS,
REFINED_REQUIREMENT_POOL,
UI_DESIGN_DRAFT,
ANYTHING_UNCLEAR,
]
WRITE_PRD_NODE = ActionNode.from_children("WritePRD", NODES)
REFINED_PRD_NODE = ActionNode.from_children("RefinedPRD", REFINED_NODES)
WP_ISSUE_TYPE_NODE = ActionNode.from_children("WP_ISSUE_TYPE", [ISSUE_TYPE, REASON])
WP_IS_RELATIVE_NODE = ActionNode.from_children("WP_IS_RELATIVE", [IS_RELATIVE, REASON])

View file

@ -38,6 +38,7 @@ class CLIParams(BaseModel):
if self.project_path:
self.inc = True
self.project_name = self.project_name or Path(self.project_path).name
return self
class Config(CLIParams, YamlModel):

View file

@ -82,17 +82,20 @@ MESSAGE_ROUTE_TO_NONE = "<none>"
REQUIREMENT_FILENAME = "requirement.txt"
BUGFIX_FILENAME = "bugfix.txt"
PACKAGE_REQUIREMENTS_FILENAME = "requirements.txt"
CODE_PLAN_AND_CHANGE_FILENAME = "code_plan_and_change.json"
DOCS_FILE_REPO = "docs"
PRDS_FILE_REPO = "docs/prd"
SYSTEM_DESIGN_FILE_REPO = "docs/system_design"
TASK_FILE_REPO = "docs/task"
CODE_PLAN_AND_CHANGE_FILE_REPO = "docs/code_plan_and_change"
COMPETITIVE_ANALYSIS_FILE_REPO = "resources/competitive_analysis"
DATA_API_DESIGN_FILE_REPO = "resources/data_api_design"
SEQ_FLOW_FILE_REPO = "resources/seq_flow"
SYSTEM_DESIGN_PDF_FILE_REPO = "resources/system_design"
PRD_PDF_FILE_REPO = "resources/prd"
TASK_PDF_FILE_REPO = "resources/api_spec_and_task"
CODE_PLAN_AND_CHANGE_PDF_FILE_REPO = "resources/code_plan_and_change"
TEST_CODES_FILE_REPO = "tests"
TEST_OUTPUTS_FILE_REPO = "test_outputs"
CODE_SUMMARIES_FILE_REPO = "docs/code_summary"

View file

@ -95,7 +95,3 @@ class Context(BaseModel):
if llm.cost_manager is None:
llm.cost_manager = self.cost_manager
return llm
# Global context, not in Env
CONTEXT = Context()

View file

@ -10,7 +10,7 @@ from typing import Optional
from pydantic import BaseModel, ConfigDict, Field
from metagpt.config2 import Config
from metagpt.context import CONTEXT, Context
from metagpt.context import Context
from metagpt.provider.base_llm import BaseLLM
@ -34,7 +34,7 @@ class ContextMixin(BaseModel):
def __init__(
self,
context: Optional[Context] = CONTEXT,
context: Optional[Context] = None,
config: Optional[Config] = None,
llm: Optional[BaseLLM] = None,
**kwargs,
@ -81,7 +81,7 @@ class ContextMixin(BaseModel):
"""Role context: role context > context"""
if self.private_context:
return self.private_context
return CONTEXT
return Context()
@context.setter
def context(self, context: Context) -> None:

View file

@ -13,7 +13,7 @@ import aiofiles
import yaml
from pydantic import BaseModel, Field
from metagpt.context import CONTEXT, Context
from metagpt.context import Context
class Example(BaseModel):
@ -73,14 +73,15 @@ class SkillsDeclaration(BaseModel):
skill_data = yaml.safe_load(data)
return SkillsDeclaration(**skill_data)
def get_skill_list(self, entity_name: str = "Assistant", context: Context = CONTEXT) -> Dict:
def get_skill_list(self, entity_name: str = "Assistant", context: Context = None) -> Dict:
"""Return the skill name based on the skill description."""
entity = self.entities.get(entity_name)
if not entity:
return {}
# List of skills that the agent chooses to activate.
agent_skills = context.kwargs.agent_skills
ctx = context or Context()
agent_skills = ctx.kwargs.agent_skills
if not agent_skills:
return {}

View file

@ -8,12 +8,13 @@
from typing import Optional
from metagpt.configs.llm_config import LLMConfig
from metagpt.context import CONTEXT
from metagpt.context import Context
from metagpt.provider.base_llm import BaseLLM
def LLM(llm_config: Optional[LLMConfig] = None) -> BaseLLM:
def LLM(llm_config: Optional[LLMConfig] = None, context: Context = None) -> BaseLLM:
"""get the default llm provider if name is None"""
ctx = context or Context()
if llm_config is not None:
CONTEXT.llm_with_cost_manager_from_llm_config(llm_config)
return CONTEXT.llm()
ctx.llm_with_cost_manager_from_llm_config(llm_config)
return ctx.llm()

View file

@ -22,7 +22,6 @@ from pydantic import Field
from metagpt.actions.skill_action import ArgumentsParingAction, SkillAction
from metagpt.actions.talk_action import TalkAction
from metagpt.context import CONTEXT
from metagpt.learn.skill_loader import SkillsDeclaration
from metagpt.logs import logger
from metagpt.memory.brain_memory import BrainMemory
@ -48,7 +47,7 @@ class Assistant(Role):
def __init__(self, **kwargs):
super().__init__(**kwargs)
language = kwargs.get("language") or self.context.kwargs.language or CONTEXT.kwargs.language
language = kwargs.get("language") or self.context.kwargs.language
self.constraints = self.constraints.format(language=language)
async def think(self) -> bool:

View file

@ -20,17 +20,27 @@
from __future__ import annotations
import json
import os
from collections import defaultdict
from pathlib import Path
from typing import Set
from metagpt.actions import Action, WriteCode, WriteCodeReview, WriteTasks
from metagpt.actions.fix_bug import FixBug
from metagpt.actions.project_management_an import REFINED_TASK_LIST, TASK_LIST
from metagpt.actions.summarize_code import SummarizeCode
from metagpt.const import SYSTEM_DESIGN_FILE_REPO, TASK_FILE_REPO
from metagpt.actions.write_code_plan_and_change_an import WriteCodePlanAndChange
from metagpt.const import (
CODE_PLAN_AND_CHANGE_FILE_REPO,
CODE_PLAN_AND_CHANGE_FILENAME,
REQUIREMENT_FILENAME,
SYSTEM_DESIGN_FILE_REPO,
TASK_FILE_REPO,
)
from metagpt.logs import logger
from metagpt.roles import Role
from metagpt.schema import (
CodePlanAndChangeContext,
CodeSummarizeContext,
CodingContext,
Document,
@ -80,7 +90,7 @@ class Engineer(Role):
super().__init__(**kwargs)
self.set_actions([WriteCode])
self._watch([WriteTasks, SummarizeCode, WriteCode, WriteCodeReview, FixBug])
self._watch([WriteTasks, SummarizeCode, WriteCode, WriteCodeReview, FixBug, WriteCodePlanAndChange])
self.code_todos = []
self.summarize_todos = []
self.next_todo_action = any_to_name(WriteCode)
@ -88,7 +98,7 @@ class Engineer(Role):
@staticmethod
def _parse_tasks(task_msg: Document) -> list[str]:
m = json.loads(task_msg.content)
return m.get("Task list")
return m.get(TASK_LIST.key) or m.get(REFINED_TASK_LIST.key)
async def _act_sp_with_cr(self, review=False) -> Set[str]:
changed_files = set()
@ -106,9 +116,13 @@ class Engineer(Role):
action = WriteCodeReview(i_context=coding_context, context=self.context, llm=self.llm)
self._init_action(action)
coding_context = await action.run()
dependencies = {coding_context.design_doc.root_relative_path, coding_context.task_doc.root_relative_path}
if self.config.inc:
dependencies.add(os.path.join(CODE_PLAN_AND_CHANGE_FILE_REPO, CODE_PLAN_AND_CHANGE_FILENAME))
await self.project_repo.srcs.save(
filename=coding_context.filename,
dependencies={coding_context.design_doc.root_relative_path, coding_context.task_doc.root_relative_path},
dependencies=dependencies,
content=coding_context.code_doc.content,
)
msg = Message(
@ -128,6 +142,9 @@ class Engineer(Role):
"""Determines the mode of action based on whether code review is used."""
if self.rc.todo is None:
return None
if isinstance(self.rc.todo, WriteCodePlanAndChange):
self.next_todo_action = any_to_name(WriteCode)
return await self._act_code_plan_and_change()
if isinstance(self.rc.todo, WriteCode):
self.next_todo_action = any_to_name(SummarizeCode)
return await self._act_write_code()
@ -161,7 +178,7 @@ class Engineer(Role):
is_pass, reason = await self._is_pass(summary)
if not is_pass:
todo.i_context.reason = reason
tasks.append(todo.i_context.dict())
tasks.append(todo.i_context.model_dump())
await self.project_repo.docs.code_summary.save(
filename=Path(todo.i_context.design_filename).name,
@ -187,6 +204,34 @@ class Engineer(Role):
content=json.dumps(tasks), role=self.profile, cause_by=SummarizeCode, send_to=self, sent_from=self
)
async def _act_code_plan_and_change(self):
"""Write code plan and change that guides subsequent WriteCode and WriteCodeReview"""
logger.info("Writing code plan and change..")
node = await self.rc.todo.run()
code_plan_and_change = node.instruct_content.model_dump_json()
dependencies = {
REQUIREMENT_FILENAME,
self.rc.todo.i_context.prd_filename,
self.rc.todo.i_context.design_filename,
self.rc.todo.i_context.task_filename,
}
await self.project_repo.docs.code_plan_and_change.save(
filename=self.rc.todo.i_context.filename, content=code_plan_and_change, dependencies=dependencies
)
await self.project_repo.resources.code_plan_and_change.save(
filename=Path(self.rc.todo.i_context.filename).with_suffix(".md").name,
content=node.content,
dependencies=dependencies,
)
return Message(
content=code_plan_and_change,
role=self.profile,
cause_by=WriteCodePlanAndChange,
send_to=self,
sent_from=self,
)
async def _is_pass(self, summary) -> (str, str):
rsp = await self.llm.aask(msg=IS_PASS_PROMPT.format(context=summary), stream=False)
logger.info(rsp)
@ -197,11 +242,16 @@ class Engineer(Role):
async def _think(self) -> Action | None:
if not self.src_workspace:
self.src_workspace = self.git_repo.workdir / self.git_repo.workdir.name
write_code_filters = any_to_str_set([WriteTasks, SummarizeCode, FixBug])
write_plan_and_change_filters = any_to_str_set([WriteTasks])
write_code_filters = any_to_str_set([WriteTasks, WriteCodePlanAndChange, SummarizeCode, FixBug])
summarize_code_filters = any_to_str_set([WriteCode, WriteCodeReview])
if not self.rc.news:
return None
msg = self.rc.news[0]
if self.config.inc and msg.cause_by in write_plan_and_change_filters:
logger.debug(f"TODO WriteCodePlanAndChange:{msg.model_dump_json()}")
await self._new_code_plan_and_change_action()
return self.rc.todo
if msg.cause_by in write_code_filters:
logger.debug(f"TODO WriteCode:{msg.model_dump_json()}")
await self._new_code_actions(bug_fix=msg.cause_by == any_to_str(FixBug))
@ -292,10 +342,18 @@ class Engineer(Role):
summarizations[ctx].append(filename)
for ctx, filenames in summarizations.items():
ctx.codes_filenames = filenames
self.summarize_todos.append(SummarizeCode(i_context=ctx, llm=self.llm))
self.summarize_todos.append(SummarizeCode(i_context=ctx, context=self.context, llm=self.llm))
if self.summarize_todos:
self.set_todo(self.summarize_todos[0])
async def _new_code_plan_and_change_action(self):
"""Create a WriteCodePlanAndChange action for subsequent to-do actions."""
files = self.project_repo.all_files
requirement_doc = await self.project_repo.docs.get(REQUIREMENT_FILENAME)
requirement = requirement_doc.content if requirement_doc else ""
code_plan_and_change_ctx = CodePlanAndChangeContext.loads(files, requirement=requirement)
self.rc.todo = WriteCodePlanAndChange(i_context=code_plan_and_change_ctx, context=self.context, llm=self.llm)
@property
def action_description(self) -> str:
"""AgentStore uses this attribute to display to the user what actions the current role should take."""

View file

@ -57,6 +57,8 @@ class QaEngineer(Role):
code_doc = await src_file_repo.get(filename)
if not code_doc:
continue
if not code_doc.filename.endswith(".py"):
continue
test_doc = await self.project_repo.tests.get("test_" + code_doc.filename)
if not test_doc:
test_doc = Document(

View file

@ -37,10 +37,12 @@ from pydantic import (
)
from metagpt.const import (
CODE_PLAN_AND_CHANGE_FILENAME,
MESSAGE_ROUTE_CAUSE_BY,
MESSAGE_ROUTE_FROM,
MESSAGE_ROUTE_TO,
MESSAGE_ROUTE_TO_ALL,
PRDS_FILE_REPO,
SYSTEM_DESIGN_FILE_REPO,
TASK_FILE_REPO,
)
@ -471,6 +473,30 @@ class BugFixContext(BaseContext):
filename: str = ""
class CodePlanAndChangeContext(BaseModel):
filename: str = CODE_PLAN_AND_CHANGE_FILENAME
requirement: str = ""
prd_filename: str = ""
design_filename: str = ""
task_filename: str = ""
@staticmethod
def loads(filenames: List, **kwargs) -> CodePlanAndChangeContext:
ctx = CodePlanAndChangeContext(requirement=kwargs.get("requirement", ""))
for filename in filenames:
filename = Path(filename)
if filename.is_relative_to(PRDS_FILE_REPO):
ctx.prd_filename = filename.name
continue
if filename.is_relative_to(SYSTEM_DESIGN_FILE_REPO):
ctx.design_filename = filename.name
continue
if filename.is_relative_to(TASK_FILE_REPO):
ctx.task_filename = filename.name
continue
return ctx
# mermaid class view
class UMLClassMeta(BaseModel):
name: str = ""

View file

@ -8,6 +8,7 @@ import typer
from metagpt.config2 import config
from metagpt.const import CONFIG_ROOT, METAGPT_ROOT
from metagpt.context import Context
app = typer.Typer(add_completion=False, pretty_exceptions_show_locals=False)
@ -37,9 +38,10 @@ def generate_repo(
from metagpt.team import Team
config.update_via_cli(project_path, project_name, inc, reqa_file, max_auto_summarize_code)
ctx = Context(config=config)
if not recover_path:
company = Team()
company = Team(context=ctx)
company.hire(
[
ProductManager(),
@ -58,7 +60,7 @@ def generate_repo(
if not stg_path.exists() or not str(stg_path).endswith("team"):
raise FileNotFoundError(f"{recover_path} not exists or not endswith `team`")
company = Team.deserialize(stg_path=stg_path)
company = Team.deserialize(stg_path=stg_path, context=ctx)
idea = company.idea
company.invest(investment)

View file

@ -10,12 +10,13 @@
import warnings
from pathlib import Path
from typing import Any
from typing import Any, Optional
from pydantic import BaseModel, ConfigDict, Field
from metagpt.actions import UserRequirement
from metagpt.const import MESSAGE_ROUTE_TO_ALL, SERDESER_PATH
from metagpt.context import Context
from metagpt.environment import Environment
from metagpt.logs import logger
from metagpt.roles import Role
@ -36,12 +37,17 @@ class Team(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
env: Environment = Field(default_factory=Environment)
env: Optional[Environment] = None
investment: float = Field(default=10.0)
idea: str = Field(default="")
def __init__(self, **data: Any):
def __init__(self, context: Context = None, **data: Any):
super(Team, self).__init__(**data)
ctx = context or Context()
if not self.env:
self.env = Environment(context=ctx)
else:
self.env.context = ctx # The `env` object is allocated by deserialization
if "roles" in data:
self.hire(data["roles"])
if "env_desc" in data:
@ -54,7 +60,7 @@ class Team(BaseModel):
write_json_file(team_info_path, self.model_dump())
@classmethod
def deserialize(cls, stg_path: Path) -> "Team":
def deserialize(cls, stg_path: Path, context: Context = None) -> "Team":
"""stg_path = ./storage/team"""
# recover team_info
team_info_path = stg_path.joinpath("team.json")
@ -64,7 +70,8 @@ class Team(BaseModel):
)
team_info: dict = read_json_file(team_info_path)
team = Team(**team_info)
ctx = context or Context()
team = Team(**team_info, context=ctx)
return team
def hire(self, roles: list[Role]):

View file

@ -9,6 +9,7 @@
from __future__ import annotations
import json
import re
from pathlib import Path
from typing import Set
@ -36,7 +37,9 @@ class DependencyFile:
"""Load dependencies from the file asynchronously."""
if not self._filename.exists():
return
self._dependencies = json.loads(await aread(self._filename))
json_data = await aread(self._filename)
json_data = re.sub(r"\\+", "/", json_data) # Compatible with windows path
self._dependencies = json.loads(json_data)
@handle_exception
async def save(self):
@ -60,17 +63,20 @@ class DependencyFile:
key = Path(filename).relative_to(root)
except ValueError:
key = filename
skey = re.sub(r"\\+", "/", str(key)) # Compatible with windows path
if dependencies:
relative_paths = []
for i in dependencies:
try:
relative_paths.append(str(Path(i).relative_to(root)))
s = str(Path(i).relative_to(root))
except ValueError:
relative_paths.append(str(i))
self._dependencies[str(key)] = relative_paths
elif str(key) in self._dependencies:
del self._dependencies[str(key)]
s = str(i)
s = re.sub(r"\\+", "/", s) # Compatible with windows path
relative_paths.append(s)
self._dependencies[skey] = relative_paths
elif skey in self._dependencies:
del self._dependencies[skey]
if persist:
await self.save()

View file

@ -101,21 +101,28 @@ class FileRepository:
path_name = self.workdir / filename
if not path_name.exists():
return None
if not path_name.is_file():
return None
doc.content = await aread(path_name)
return doc
async def get_all(self) -> List[Document]:
async def get_all(self, filter_ignored=True) -> List[Document]:
"""Get the content of all files in the repository.
:return: List of Document instances representing files.
"""
docs = []
for root, dirs, files in os.walk(str(self.workdir)):
for file in files:
file_path = Path(root) / file
relative_path = file_path.relative_to(self.workdir)
doc = await self.get(relative_path)
if filter_ignored:
for f in self.all_files:
doc = await self.get(f)
docs.append(doc)
else:
for root, dirs, files in os.walk(str(self.workdir)):
for file in files:
file_path = Path(root) / file
relative_path = file_path.relative_to(self.workdir)
doc = await self.get(relative_path)
docs.append(doc)
return docs
@property

View file

@ -13,6 +13,8 @@ from pathlib import Path
from metagpt.const import (
CLASS_VIEW_FILE_REPO,
CODE_PLAN_AND_CHANGE_FILE_REPO,
CODE_PLAN_AND_CHANGE_PDF_FILE_REPO,
CODE_SUMMARIES_FILE_REPO,
CODE_SUMMARIES_PDF_FILE_REPO,
COMPETITIVE_ANALYSIS_FILE_REPO,
@ -43,6 +45,7 @@ class DocFileRepositories(FileRepository):
code_summary: FileRepository
graph_repo: FileRepository
class_view: FileRepository
code_plan_and_change: FileRepository
def __init__(self, git_repo):
super().__init__(git_repo=git_repo, relative_path=DOCS_FILE_REPO)
@ -53,6 +56,7 @@ class DocFileRepositories(FileRepository):
self.code_summary = git_repo.new_file_repository(relative_path=CODE_SUMMARIES_FILE_REPO)
self.graph_repo = git_repo.new_file_repository(relative_path=GRAPH_REPO_FILE_REPO)
self.class_view = git_repo.new_file_repository(relative_path=CLASS_VIEW_FILE_REPO)
self.code_plan_and_change = git_repo.new_file_repository(relative_path=CODE_PLAN_AND_CHANGE_FILE_REPO)
class ResourceFileRepositories(FileRepository):
@ -64,6 +68,7 @@ class ResourceFileRepositories(FileRepository):
api_spec_and_task: FileRepository
code_summary: FileRepository
sd_output: FileRepository
code_plan_and_change: FileRepository
def __init__(self, git_repo):
super().__init__(git_repo=git_repo, relative_path=RESOURCES_FILE_REPO)
@ -76,6 +81,7 @@ class ResourceFileRepositories(FileRepository):
self.api_spec_and_task = git_repo.new_file_repository(relative_path=TASK_PDF_FILE_REPO)
self.code_summary = git_repo.new_file_repository(relative_path=CODE_SUMMARIES_PDF_FILE_REPO)
self.sd_output = git_repo.new_file_repository(relative_path=SD_OUTPUT_FILE_REPO)
self.code_plan_and_change = git_repo.new_file_repository(relative_path=CODE_PLAN_AND_CHANGE_PDF_FILE_REPO)
class ProjectRepo(FileRepository):

View file

@ -4,10 +4,11 @@
@Time : 2023/5/18 00:40
@Author : alexanderwu
@File : token_counter.py
ref1: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
ref2: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/llm/token_counter.py
ref3: https://github.com/hwchase17/langchain/blob/master/langchain/chat_models/openai.py
ref4: https://ai.google.dev/models/gemini
ref1: https://openai.com/pricing
ref2: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
ref3: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/llm/token_counter.py
ref4: https://github.com/hwchase17/langchain/blob/master/langchain/chat_models/openai.py
ref5: https://ai.google.dev/models/gemini
"""
import tiktoken
@ -25,7 +26,10 @@ TOKEN_COSTS = {
"gpt-4-32k": {"prompt": 0.06, "completion": 0.12},
"gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12},
"gpt-4-0613": {"prompt": 0.06, "completion": 0.12},
"gpt-4-turbo-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4-0125-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4-1106-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4-1106-vision-preview": {"prompt": 0.01, "completion": 0.03},
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
"glm-3-turbo": {"prompt": 0.0, "completion": 0.0007}, # 128k version, prompt + completion tokens=0.005¥/k-tokens
"glm-4": {"prompt": 0.0, "completion": 0.014}, # 128k version, prompt + completion tokens=0.1¥/k-tokens
@ -47,7 +51,10 @@ TOKEN_MAX = {
"gpt-4-32k": 32768,
"gpt-4-32k-0314": 32768,
"gpt-4-0613": 8192,
"gpt-4-turbo-preview": 128000,
"gpt-4-0125-preview": 128000,
"gpt-4-1106-preview": 128000,
"gpt-4-1106-vision-preview": 128000,
"text-embedding-ada-002": 8192,
"chatglm_turbo": 32768,
"gemini-pro": 32768,
@ -72,7 +79,10 @@ def count_message_tokens(messages, model="gpt-3.5-turbo-0613"):
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
"gpt-4-turbo-preview",
"gpt-4-0125-preview",
"gpt-4-1106-preview",
"gpt-4-1106-vision-preview",
}:
tokens_per_message = 3 # # every reply is primed with <|start|>assistant<|message|>
tokens_per_name = 1