mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-04-29 02:46:24 +02:00
Merge main branch
This commit is contained in:
commit
24e617b362
325 changed files with 11290 additions and 3760 deletions
|
|
@ -13,7 +13,7 @@ from metagpt.actions.add_requirement import UserRequirement
|
|||
from metagpt.actions.debug_error import DebugError
|
||||
from metagpt.actions.design_api import WriteDesign
|
||||
from metagpt.actions.design_api_review import DesignReview
|
||||
from metagpt.actions.project_management import AssignTasks, WriteTasks
|
||||
from metagpt.actions.project_management import WriteTasks
|
||||
from metagpt.actions.research import CollectLinks, WebBrowseAndSummarize, ConductResearch
|
||||
from metagpt.actions.run_code import RunCode
|
||||
from metagpt.actions.search_and_summarize import SearchAndSummarize
|
||||
|
|
@ -38,7 +38,6 @@ class ActionType(Enum):
|
|||
RUN_CODE = RunCode
|
||||
DEBUG_ERROR = DebugError
|
||||
WRITE_TASKS = WriteTasks
|
||||
ASSIGN_TASKS = AssignTasks
|
||||
SEARCH_AND_SUMMARIZE = SearchAndSummarize
|
||||
COLLECT_LINKS = CollectLinks
|
||||
WEB_BROWSE_AND_SUMMARIZE = WebBrowseAndSummarize
|
||||
|
|
|
|||
|
|
@ -8,61 +8,45 @@
|
|||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Optional, Union
|
||||
from typing import Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import ConfigDict, Field, model_validator
|
||||
|
||||
from metagpt.actions.action_node import ActionNode
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.provider.base_llm import BaseLLM
|
||||
from metagpt.schema import (
|
||||
CodeSummarizeContext,
|
||||
CodingContext,
|
||||
RunCodeContext,
|
||||
SerializationMixin,
|
||||
TestingContext,
|
||||
)
|
||||
|
||||
action_subclass_registry = {}
|
||||
|
||||
class Action(SerializationMixin, is_polymorphic_base=True):
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True, exclude=["llm"])
|
||||
|
||||
class Action(BaseModel):
|
||||
name: str = ""
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM, exclude=True)
|
||||
llm: BaseLLM = Field(default_factory=LLM, exclude=True)
|
||||
context: Union[dict, CodingContext, CodeSummarizeContext, TestingContext, RunCodeContext, str, None] = ""
|
||||
prefix = "" # aask*时会加上prefix,作为system_message
|
||||
desc = "" # for skill manager
|
||||
prefix: str = "" # aask*时会加上prefix,作为system_message
|
||||
desc: str = "" # for skill manager
|
||||
node: ActionNode = Field(default=None, exclude=True)
|
||||
|
||||
# builtin variables
|
||||
builtin_class_name: str = ""
|
||||
@model_validator(mode="before")
|
||||
def set_name_if_empty(cls, values):
|
||||
if "name" not in values or not values["name"]:
|
||||
values["name"] = cls.__name__
|
||||
return values
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
def __init_with_instruction(self, instruction: str):
|
||||
"""Initialize action with instruction"""
|
||||
self.node = ActionNode(key=self.name, expected_type=str, instruction=instruction, example="", schema="raw")
|
||||
return self
|
||||
|
||||
def __init__(self, **kwargs: Any):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
# deserialize child classes dynamically for inherited `action`
|
||||
object.__setattr__(self, "builtin_class_name", self.__class__.__name__)
|
||||
self.__fields__["builtin_class_name"].default = self.__class__.__name__
|
||||
|
||||
if "instruction" in kwargs:
|
||||
self.__init_with_instruction(kwargs["instruction"])
|
||||
|
||||
def __init_subclass__(cls, **kwargs: Any) -> None:
|
||||
super().__init_subclass__(**kwargs)
|
||||
action_subclass_registry[cls.__name__] = cls
|
||||
|
||||
def dict(self, *args, **kwargs) -> "DictStrAny":
|
||||
obj_dict = super().dict(*args, **kwargs)
|
||||
if "llm" in obj_dict:
|
||||
obj_dict.pop("llm")
|
||||
return obj_dict
|
||||
@model_validator(mode="before")
|
||||
def _init_with_instruction(cls, values):
|
||||
if "instruction" in values:
|
||||
name = values["name"]
|
||||
i = values["instruction"]
|
||||
values["node"] = ActionNode(key=name, expected_type=str, instruction=i, example="", schema="raw")
|
||||
return values
|
||||
|
||||
def set_prefix(self, prefix):
|
||||
"""Set prefix for later usage"""
|
||||
|
|
|
|||
|
|
@ -11,12 +11,13 @@ NOTE: You should use typing.List instead of list to do type annotation. Because
|
|||
import json
|
||||
from typing import Any, Dict, List, Optional, Tuple, Type
|
||||
|
||||
from pydantic import BaseModel, create_model, root_validator, validator
|
||||
from pydantic import BaseModel, create_model, model_validator
|
||||
from tenacity import retry, stop_after_attempt, wait_random_exponential
|
||||
|
||||
from metagpt.llm import BaseGPTAPI
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.llm import BaseLLM
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.postprecess.llm_output_postprecess import llm_output_postprecess
|
||||
from metagpt.provider.postprocess.llm_output_postprocess import llm_output_postprocess
|
||||
from metagpt.utils.common import OutputParser, general_after_log
|
||||
|
||||
TAG = "CONTENT"
|
||||
|
|
@ -59,7 +60,7 @@ class ActionNode:
|
|||
|
||||
# Action Context
|
||||
context: str # all the context, including all necessary info
|
||||
llm: BaseGPTAPI # LLM with aask interface
|
||||
llm: BaseLLM # LLM with aask interface
|
||||
children: dict[str, "ActionNode"]
|
||||
|
||||
# Action Input
|
||||
|
|
@ -116,50 +117,48 @@ class ActionNode:
|
|||
obj.add_children(nodes)
|
||||
return obj
|
||||
|
||||
def get_children_mapping(self) -> Dict[str, Tuple[Type, Any]]:
|
||||
def get_children_mapping(self, exclude=None) -> Dict[str, Tuple[Type, Any]]:
|
||||
"""获得子ActionNode的字典,以key索引"""
|
||||
return {k: (v.expected_type, ...) for k, v in self.children.items()}
|
||||
exclude = exclude or []
|
||||
return {k: (v.expected_type, ...) for k, v in self.children.items() if k not in exclude}
|
||||
|
||||
def get_self_mapping(self) -> Dict[str, Tuple[Type, Any]]:
|
||||
"""get self key: type mapping"""
|
||||
return {self.key: (self.expected_type, ...)}
|
||||
|
||||
def get_mapping(self, mode="children") -> Dict[str, Tuple[Type, Any]]:
|
||||
def get_mapping(self, mode="children", exclude=None) -> Dict[str, Tuple[Type, Any]]:
|
||||
"""get key: type mapping under mode"""
|
||||
if mode == "children" or (mode == "auto" and self.children):
|
||||
return self.get_children_mapping()
|
||||
return self.get_self_mapping()
|
||||
return self.get_children_mapping(exclude=exclude)
|
||||
return {} if exclude and self.key in exclude else self.get_self_mapping()
|
||||
|
||||
@classmethod
|
||||
def create_model_class(cls, class_name: str, mapping: Dict[str, Tuple[Type, Any]]):
|
||||
"""基于pydantic v1的模型动态生成,用来检验结果类型正确性"""
|
||||
new_class = create_model(class_name, **mapping)
|
||||
|
||||
@validator("*", allow_reuse=True)
|
||||
def check_name(v, field):
|
||||
if field.name not in mapping.keys():
|
||||
raise ValueError(f"Unrecognized block: {field.name}")
|
||||
return v
|
||||
|
||||
@root_validator(pre=True, allow_reuse=True)
|
||||
def check_missing_fields(values):
|
||||
def check_fields(cls, values):
|
||||
required_fields = set(mapping.keys())
|
||||
missing_fields = required_fields - set(values.keys())
|
||||
if missing_fields:
|
||||
raise ValueError(f"Missing fields: {missing_fields}")
|
||||
|
||||
unrecognized_fields = set(values.keys()) - required_fields
|
||||
if unrecognized_fields:
|
||||
logger.warning(f"Unrecognized fields: {unrecognized_fields}")
|
||||
return values
|
||||
|
||||
new_class.__validator_check_name = classmethod(check_name)
|
||||
new_class.__root_validator_check_missing_fields = classmethod(check_missing_fields)
|
||||
validators = {"check_missing_fields_validator": model_validator(mode="before")(check_fields)}
|
||||
|
||||
new_class = create_model(class_name, __validators__=validators, **mapping)
|
||||
return new_class
|
||||
|
||||
def create_children_class(self):
|
||||
def create_children_class(self, exclude=None):
|
||||
"""使用object内有的字段直接生成model_class"""
|
||||
class_name = f"{self.key}_AN"
|
||||
mapping = self.get_children_mapping()
|
||||
mapping = self.get_children_mapping(exclude=exclude)
|
||||
return self.create_model_class(class_name, mapping)
|
||||
|
||||
def to_dict(self, format_func=None, mode="auto") -> Dict:
|
||||
def to_dict(self, format_func=None, mode="auto", exclude=None) -> Dict:
|
||||
"""将当前节点与子节点都按照node: format的格式组织成字典"""
|
||||
|
||||
# 如果没有提供格式化函数,使用默认的格式化方式
|
||||
|
|
@ -179,7 +178,10 @@ class ActionNode:
|
|||
return node_dict
|
||||
|
||||
# 遍历子节点并递归调用 to_dict 方法
|
||||
exclude = exclude or []
|
||||
for _, child_node in self.children.items():
|
||||
if child_node.key in exclude:
|
||||
continue
|
||||
node_dict.update(child_node.to_dict(format_func))
|
||||
|
||||
return node_dict
|
||||
|
|
@ -200,25 +202,25 @@ class ActionNode:
|
|||
else: # markdown
|
||||
return f"[{tag}]\n" + text + f"\n[/{tag}]"
|
||||
|
||||
def _compile_f(self, schema, mode, tag, format_func, kv_sep) -> str:
|
||||
nodes = self.to_dict(format_func=format_func, mode=mode)
|
||||
def _compile_f(self, schema, mode, tag, format_func, kv_sep, exclude=None) -> str:
|
||||
nodes = self.to_dict(format_func=format_func, mode=mode, exclude=exclude)
|
||||
text = self.compile_to(nodes, schema, kv_sep)
|
||||
return self.tagging(text, schema, tag)
|
||||
|
||||
def compile_instruction(self, schema="markdown", mode="children", tag="") -> str:
|
||||
def compile_instruction(self, schema="markdown", mode="children", tag="", exclude=None) -> str:
|
||||
"""compile to raw/json/markdown template with all/root/children nodes"""
|
||||
format_func = lambda i: f"{i.expected_type} # {i.instruction}"
|
||||
return self._compile_f(schema, mode, tag, format_func, kv_sep=": ")
|
||||
return self._compile_f(schema, mode, tag, format_func, kv_sep=": ", exclude=exclude)
|
||||
|
||||
def compile_example(self, schema="json", mode="children", tag="") -> str:
|
||||
def compile_example(self, schema="json", mode="children", tag="", exclude=None) -> str:
|
||||
"""compile to raw/json/markdown examples with all/root/children nodes"""
|
||||
|
||||
# 这里不能使用f-string,因为转译为str后再json.dumps会额外加上引号,无法作为有效的example
|
||||
# 错误示例:"File list": "['main.py', 'const.py', 'game.py']", 注意这里值不是list,而是str
|
||||
format_func = lambda i: i.example
|
||||
return self._compile_f(schema, mode, tag, format_func, kv_sep="\n")
|
||||
return self._compile_f(schema, mode, tag, format_func, kv_sep="\n", exclude=exclude)
|
||||
|
||||
def compile(self, context, schema="json", mode="children", template=SIMPLE_TEMPLATE) -> str:
|
||||
def compile(self, context, schema="json", mode="children", template=SIMPLE_TEMPLATE, exclude=[]) -> str:
|
||||
"""
|
||||
mode: all/root/children
|
||||
mode="children": 编译所有子节点为一个统一模板,包括instruction与example
|
||||
|
|
@ -234,8 +236,8 @@ class ActionNode:
|
|||
|
||||
# FIXME: json instruction会带来格式问题,如:"Project name": "web_2048 # 项目名称使用下划线",
|
||||
# compile example暂时不支持markdown
|
||||
instruction = self.compile_instruction(schema="markdown", mode=mode)
|
||||
example = self.compile_example(schema=schema, tag=TAG, mode=mode)
|
||||
instruction = self.compile_instruction(schema="markdown", mode=mode, exclude=exclude)
|
||||
example = self.compile_example(schema=schema, tag=TAG, mode=mode, exclude=exclude)
|
||||
# nodes = ", ".join(self.to_dict(mode=mode).keys())
|
||||
constraints = [LANGUAGE_CONSTRAINT, FORMAT_CONSTRAINT]
|
||||
constraint = "\n".join(constraints)
|
||||
|
|
@ -260,14 +262,17 @@ class ActionNode:
|
|||
output_data_mapping: dict,
|
||||
system_msgs: Optional[list[str]] = None,
|
||||
schema="markdown", # compatible to original format
|
||||
timeout=CONFIG.timeout,
|
||||
) -> (str, BaseModel):
|
||||
"""Use ActionOutput to wrap the output of aask"""
|
||||
content = await self.llm.aask(prompt, system_msgs)
|
||||
content = await self.llm.aask(prompt, system_msgs, timeout=timeout)
|
||||
logger.debug(f"llm raw output:\n{content}")
|
||||
output_class = self.create_model_class(output_class_name, output_data_mapping)
|
||||
|
||||
if schema == "json":
|
||||
parsed_data = llm_output_postprecess(output=content, schema=output_class.schema(), req_key=f"[/{TAG}]")
|
||||
parsed_data = llm_output_postprocess(
|
||||
output=content, schema=output_class.model_json_schema(), req_key=f"[/{TAG}]"
|
||||
)
|
||||
else: # using markdown parser
|
||||
parsed_data = OutputParser.parse_data_with_mapping(content, output_data_mapping)
|
||||
|
||||
|
|
@ -276,7 +281,7 @@ class ActionNode:
|
|||
return content, instruct_content
|
||||
|
||||
def get(self, key):
|
||||
return self.instruct_content.dict()[key]
|
||||
return self.instruct_content.model_dump()[key]
|
||||
|
||||
def set_recursive(self, name, value):
|
||||
setattr(self, name, value)
|
||||
|
|
@ -289,13 +294,13 @@ class ActionNode:
|
|||
def set_context(self, context):
|
||||
self.set_recursive("context", context)
|
||||
|
||||
async def simple_fill(self, schema, mode):
|
||||
prompt = self.compile(context=self.context, schema=schema, mode=mode)
|
||||
async def simple_fill(self, schema, mode, timeout=CONFIG.timeout, exclude=None):
|
||||
prompt = self.compile(context=self.context, schema=schema, mode=mode, exclude=exclude)
|
||||
|
||||
if schema != "raw":
|
||||
mapping = self.get_mapping(mode)
|
||||
mapping = self.get_mapping(mode, exclude=exclude)
|
||||
class_name = f"{self.key}_AN"
|
||||
content, scontent = await self._aask_v1(prompt, class_name, mapping, schema=schema)
|
||||
content, scontent = await self._aask_v1(prompt, class_name, mapping, schema=schema, timeout=timeout)
|
||||
self.content = content
|
||||
self.instruct_content = scontent
|
||||
else:
|
||||
|
|
@ -304,7 +309,7 @@ class ActionNode:
|
|||
|
||||
return self
|
||||
|
||||
async def fill(self, context, llm, schema="json", mode="auto", strgy="simple"):
|
||||
async def fill(self, context, llm, schema="json", mode="auto", strgy="simple", timeout=CONFIG.timeout, exclude=[]):
|
||||
"""Fill the node(s) with mode.
|
||||
|
||||
:param context: Everything we should know when filling node.
|
||||
|
|
@ -320,6 +325,8 @@ class ActionNode:
|
|||
:param strgy: simple/complex
|
||||
- simple: run only once
|
||||
- complex: run each node
|
||||
:param timeout: Timeout for llm invocation.
|
||||
:param exclude: The keys of ActionNode to exclude.
|
||||
:return: self
|
||||
"""
|
||||
self.set_llm(llm)
|
||||
|
|
@ -328,27 +335,15 @@ class ActionNode:
|
|||
schema = self.schema
|
||||
|
||||
if strgy == "simple":
|
||||
return await self.simple_fill(schema=schema, mode=mode)
|
||||
return await self.simple_fill(schema=schema, mode=mode, timeout=timeout, exclude=exclude)
|
||||
elif strgy == "complex":
|
||||
# 这里隐式假设了拥有children
|
||||
tmp = {}
|
||||
for _, i in self.children.items():
|
||||
child = await i.simple_fill(schema=schema, mode=mode)
|
||||
if exclude and i.key in exclude:
|
||||
continue
|
||||
child = await i.simple_fill(schema=schema, mode=mode, timeout=timeout, exclude=exclude)
|
||||
tmp.update(child.instruct_content.dict())
|
||||
cls = self.create_children_class()
|
||||
self.instruct_content = cls(**tmp)
|
||||
return self
|
||||
|
||||
|
||||
def action_node_example():
|
||||
node = ActionNode(key="key-0", expected_type=str, instruction="instruction-a", example="example-b")
|
||||
|
||||
logger.info(node.compile(context="123", schema="raw", mode="auto"))
|
||||
logger.info(node.compile(context="123", schema="json", mode="auto"))
|
||||
logger.info(node.compile(context="123", schema="markdown", mode="auto"))
|
||||
logger.info(node.to_dict())
|
||||
logger.info(node)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
action_node_example()
|
||||
|
|
|
|||
|
|
@ -10,6 +10,3 @@ from metagpt.actions import Action
|
|||
|
||||
class UserRequirement(Action):
|
||||
"""User Requirement without any implementation details"""
|
||||
|
||||
async def run(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
|
|
|||
|
|
@ -1,70 +0,0 @@
|
|||
import traceback
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from metagpt.actions.write_code import WriteCode
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.schema import Message
|
||||
from metagpt.utils.highlight import highlight
|
||||
|
||||
CLONE_PROMPT = """
|
||||
*context*
|
||||
Please convert the function code ```{source_code}``` into the the function format: ```{template_func}```.
|
||||
*Please Write code based on the following list and context*
|
||||
1. Write code start with ```, and end with ```.
|
||||
2. Please implement it in one function if possible, except for import statements. for exmaple:
|
||||
```python
|
||||
import pandas as pd
|
||||
def run(*args) -> pd.DataFrame:
|
||||
...
|
||||
```
|
||||
3. Do not use public member functions that do not exist in your design.
|
||||
4. The output function name, input parameters and return value must be the same as ```{template_func}```.
|
||||
5. Make sure the results before and after the code conversion are required to be exactly the same.
|
||||
6. Don't repeat my context in your replies.
|
||||
7. Return full results, for example, if the return value has df.head(), please return df.
|
||||
8. If you must use a third-party package, use the most popular ones, for example: pandas, numpy, ta, ...
|
||||
"""
|
||||
|
||||
|
||||
class CloneFunction(WriteCode):
|
||||
name: str = "CloneFunction"
|
||||
context: list[Message] = []
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
def _save(self, code_path, code):
|
||||
if isinstance(code_path, str):
|
||||
code_path = Path(code_path)
|
||||
code_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
code_path.write_text(code)
|
||||
logger.info(f"Saving Code to {code_path}")
|
||||
|
||||
async def run(self, template_func: str, source_code: str) -> str:
|
||||
"""将source_code转换成template_func一样的入参和返回类型"""
|
||||
prompt = CLONE_PROMPT.format(source_code=source_code, template_func=template_func)
|
||||
logger.info(f"query for CloneFunction: \n {prompt}")
|
||||
code = await self.write_code(prompt)
|
||||
logger.info(f"CloneFunction code is \n {highlight(code)}")
|
||||
return code
|
||||
|
||||
|
||||
def run_function_code(func_code: str, func_name: str, *args, **kwargs):
|
||||
"""Run function code from string code."""
|
||||
try:
|
||||
locals_ = {}
|
||||
exec(func_code, locals_)
|
||||
func = locals_[func_name]
|
||||
return func(*args, **kwargs), ""
|
||||
except Exception:
|
||||
return "", traceback.format_exc()
|
||||
|
||||
|
||||
def run_function_script(code_script_path: str, func_name: str, *args, **kwargs):
|
||||
"""Run function code from script."""
|
||||
if isinstance(code_script_path, str):
|
||||
code_path = Path(code_script_path)
|
||||
code = code_path.read_text(encoding="utf-8")
|
||||
return run_function_code(code, func_name, *args, **kwargs)
|
||||
|
|
@ -15,7 +15,6 @@ from pydantic import Field
|
|||
from metagpt.actions.action import Action
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.const import TEST_CODES_FILE_REPO, TEST_OUTPUTS_FILE_REPO
|
||||
from metagpt.llm import LLM, BaseGPTAPI
|
||||
from metagpt.logs import logger
|
||||
from metagpt.schema import RunCodeContext, RunCodeResult
|
||||
from metagpt.utils.common import CodeParser
|
||||
|
|
@ -52,7 +51,6 @@ Now you should start rewriting the code:
|
|||
class DebugError(Action):
|
||||
name: str = "DebugError"
|
||||
context: RunCodeContext = Field(default_factory=RunCodeContext)
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
async def run(self, *args, **kwargs) -> str:
|
||||
output_doc = await FileRepository.get_file(
|
||||
|
|
|
|||
|
|
@ -13,8 +13,6 @@ import json
|
|||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from metagpt.actions import Action, ActionOutput
|
||||
from metagpt.actions.design_api_an import DESIGN_API_NODE, REFINED_DESIGN_NODES
|
||||
from metagpt.config import CONFIG
|
||||
|
|
@ -25,9 +23,7 @@ from metagpt.const import (
|
|||
SYSTEM_DESIGN_FILE_REPO,
|
||||
SYSTEM_DESIGN_PDF_FILE_REPO,
|
||||
)
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.schema import Document, Documents, Message
|
||||
from metagpt.utils.file_repository import FileRepository
|
||||
from metagpt.utils.mermaid import mermaid_to_file
|
||||
|
|
@ -44,7 +40,6 @@ NEW_REQ_TEMPLATE = """
|
|||
class WriteDesign(Action):
|
||||
name: str = ""
|
||||
context: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
desc: str = (
|
||||
"Based on the PRD, think about the system design, and design the corresponding APIs, "
|
||||
"data structures, library tables, processes, and paths. Please provide your design, feedback "
|
||||
|
|
@ -52,10 +47,10 @@ class WriteDesign(Action):
|
|||
)
|
||||
|
||||
async def run(self, with_messages: Message, schema: str = CONFIG.prompt_schema):
|
||||
# Use `git diff` to identify which PRD documents have been modified in the `docs/prds` directory.
|
||||
# Use `git status` to identify which PRD documents have been modified in the `docs/prds` directory.
|
||||
prds_file_repo = CONFIG.git_repo.new_file_repository(PRDS_FILE_REPO)
|
||||
changed_prds = prds_file_repo.changed_files
|
||||
# Use `git diff` to identify which design documents in the `docs/system_designs` directory have undergone
|
||||
# Use `git status` to identify which design documents in the `docs/system_designs` directory have undergone
|
||||
# changes.
|
||||
system_design_file_repo = CONFIG.git_repo.new_file_repository(SYSTEM_DESIGN_FILE_REPO)
|
||||
changed_system_designs = system_design_file_repo.changed_files
|
||||
|
|
@ -79,7 +74,7 @@ class WriteDesign(Action):
|
|||
logger.info("Nothing has changed.")
|
||||
# Wait until all files under `docs/system_designs/` are processed before sending the publish message,
|
||||
# leaving room for global optimization in subsequent steps.
|
||||
return ActionOutput(content=changed_files.json(), instruct_content=changed_files)
|
||||
return ActionOutput(content=changed_files.model_dump_json(), instruct_content=changed_files)
|
||||
|
||||
async def _new_system_design(self, context, schema=CONFIG.prompt_schema):
|
||||
node = await DESIGN_API_NODE.fill(context=context, llm=self.llm, schema=schema)
|
||||
|
|
@ -88,7 +83,7 @@ class WriteDesign(Action):
|
|||
async def _merge(self, prd_doc, system_design_doc, schema=CONFIG.prompt_schema):
|
||||
context = NEW_REQ_TEMPLATE.format(old_design=system_design_doc.content, context=prd_doc.content)
|
||||
node = await REFINED_DESIGN_NODES.fill(context=context, llm=self.llm, schema=schema)
|
||||
system_design_doc.content = node.instruct_content.json(ensure_ascii=False)
|
||||
system_design_doc.content = node.instruct_content.model_dump_json()
|
||||
return system_design_doc
|
||||
|
||||
async def _update_system_design(self, filename, prds_file_repo, system_design_file_repo) -> Document:
|
||||
|
|
@ -99,7 +94,7 @@ class WriteDesign(Action):
|
|||
doc = Document(
|
||||
root_path=SYSTEM_DESIGN_FILE_REPO,
|
||||
filename=filename,
|
||||
content=system_design.instruct_content.json(ensure_ascii=False),
|
||||
content=system_design.instruct_content.model_dump_json(),
|
||||
)
|
||||
else:
|
||||
doc = await self._merge(prd_doc=prd, system_design_doc=old_system_design_doc)
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@
|
|||
from typing import List
|
||||
|
||||
from metagpt.actions.action_node import ActionNode
|
||||
from metagpt.logs import logger
|
||||
from metagpt.utils.mermaid import MMC1, MMC1_REFINE, MMC2, MMC2_REFINE
|
||||
|
||||
IMPLEMENTATION_APPROACH = ActionNode(
|
||||
|
|
@ -157,14 +156,3 @@ REFINE_NODES = [
|
|||
DESIGN_API_NODE = ActionNode.from_children("DesignAPI", NODES)
|
||||
INCREMENTAL_DESIGN_NODES = ActionNode.from_children("Incremental_Design_API", INC_NODES)
|
||||
REFINED_DESIGN_NODES = ActionNode.from_children("Refined_Design_API", REFINE_NODES)
|
||||
|
||||
|
||||
def main():
|
||||
prompt = DESIGN_API_NODE.compile(context="")
|
||||
logger.info(prompt)
|
||||
prompt = REFINED_DESIGN_NODES.compile(context="")
|
||||
logger.info(prompt)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -8,17 +8,12 @@
|
|||
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from metagpt.actions.action import Action
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
|
||||
|
||||
class DesignReview(Action):
|
||||
name: str = "DesignReview"
|
||||
context: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
async def run(self, prd, api_design):
|
||||
prompt = (
|
||||
|
|
|
|||
|
|
@ -6,18 +6,14 @@
|
|||
@File : execute_task.py
|
||||
"""
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from metagpt.actions import Action
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.schema import Message
|
||||
|
||||
|
||||
class ExecuteTask(Action):
|
||||
name: str = "ExecuteTask"
|
||||
context: list[Message] = []
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
def run(self, *args, **kwargs):
|
||||
async def run(self, *args, **kwargs):
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -11,6 +11,3 @@ class FixBug(Action):
|
|||
"""Fix bug action without any implementation details"""
|
||||
|
||||
name: str = "FixBug"
|
||||
|
||||
async def run(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ from metagpt.prompts.invoice_ocr import (
|
|||
EXTRACT_OCR_MAIN_INFO_PROMPT,
|
||||
REPLY_OCR_QUESTION_PROMPT,
|
||||
)
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.provider.base_llm import BaseLLM
|
||||
from metagpt.utils.common import OutputParser
|
||||
from metagpt.utils.file import File
|
||||
|
||||
|
|
@ -42,7 +42,6 @@ class InvoiceOCR(Action):
|
|||
|
||||
name: str = "InvoiceOCR"
|
||||
context: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
@staticmethod
|
||||
async def _check_file_type(file_path: Path) -> str:
|
||||
|
|
@ -132,7 +131,7 @@ class GenerateTable(Action):
|
|||
|
||||
name: str = "GenerateTable"
|
||||
context: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
llm: BaseLLM = Field(default_factory=LLM)
|
||||
language: str = "ch"
|
||||
|
||||
async def run(self, ocr_results: list, filename: str, *args, **kwargs) -> dict[str, str]:
|
||||
|
|
@ -177,7 +176,7 @@ class ReplyQuestion(Action):
|
|||
|
||||
name: str = "ReplyQuestion"
|
||||
context: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
llm: BaseLLM = Field(default_factory=LLM)
|
||||
language: str = "ch"
|
||||
|
||||
async def run(self, query: str, ocr_result: list, *args, **kwargs) -> str:
|
||||
|
|
|
|||
|
|
@ -11,13 +11,9 @@ import shutil
|
|||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from metagpt.actions import Action, ActionOutput
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.const import DOCS_FILE_REPO, REQUIREMENT_FILENAME
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.schema import Document
|
||||
from metagpt.utils.file_repository import FileRepository
|
||||
from metagpt.utils.git_repository import GitRepository
|
||||
|
|
@ -28,17 +24,18 @@ class PrepareDocuments(Action):
|
|||
|
||||
name: str = "PrepareDocuments"
|
||||
context: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
def _init_repo(self):
|
||||
"""Initialize the Git environment."""
|
||||
path = CONFIG.project_path
|
||||
if not path:
|
||||
if not CONFIG.project_path:
|
||||
name = CONFIG.project_name or FileRepository.new_filename()
|
||||
path = Path(CONFIG.workspace_path) / name
|
||||
|
||||
if Path(path).exists() and not CONFIG.inc:
|
||||
else:
|
||||
path = Path(CONFIG.project_path)
|
||||
if path.exists() and not CONFIG.inc:
|
||||
shutil.rmtree(path)
|
||||
CONFIG.project_path = path
|
||||
CONFIG.project_name = path.name
|
||||
CONFIG.git_repo = GitRepository(local_path=path, auto_init=True)
|
||||
|
||||
async def run(self, with_messages, **kwargs):
|
||||
|
|
|
|||
|
|
@ -13,8 +13,6 @@
|
|||
import json
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from metagpt.actions import ActionOutput
|
||||
from metagpt.actions.action import Action
|
||||
from metagpt.actions.project_management_an import PM_NODE, REFINED_PM_NODES
|
||||
|
|
@ -25,9 +23,7 @@ from metagpt.const import (
|
|||
TASK_FILE_REPO,
|
||||
TASK_PDF_FILE_REPO,
|
||||
)
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.schema import Document, Documents
|
||||
from metagpt.utils.file_repository import FileRepository
|
||||
|
||||
|
|
@ -43,7 +39,6 @@ NEW_REQ_TEMPLATE = """
|
|||
class WriteTasks(Action):
|
||||
name: str = "CreateTasks"
|
||||
context: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
async def run(self, with_messages, schema=CONFIG.prompt_schema):
|
||||
system_design_file_repo = CONFIG.git_repo.new_file_repository(SYSTEM_DESIGN_FILE_REPO)
|
||||
|
|
@ -73,7 +68,7 @@ class WriteTasks(Action):
|
|||
logger.info("Nothing has changed.")
|
||||
# Wait until all files under `docs/tasks/` are processed before sending the publish_message, leaving room for
|
||||
# global optimization in subsequent steps.
|
||||
return ActionOutput(content=change_files.json(), instruct_content=change_files)
|
||||
return ActionOutput(content=change_files.model_dump_json(), instruct_content=change_files)
|
||||
|
||||
async def _update_tasks(self, filename, system_design_file_repo, tasks_file_repo):
|
||||
system_design_doc = await system_design_file_repo.get(filename)
|
||||
|
|
@ -83,7 +78,7 @@ class WriteTasks(Action):
|
|||
else:
|
||||
rsp = await self._run_new_tasks(context=system_design_doc.content)
|
||||
task_doc = Document(
|
||||
root_path=TASK_FILE_REPO, filename=filename, content=rsp.instruct_content.json(ensure_ascii=False)
|
||||
root_path=TASK_FILE_REPO, filename=filename, content=rsp.instruct_content.model_dump_json()
|
||||
)
|
||||
await tasks_file_repo.save(
|
||||
filename=filename, content=task_doc.content, dependencies={system_design_doc.root_relative_path}
|
||||
|
|
@ -94,15 +89,12 @@ class WriteTasks(Action):
|
|||
|
||||
async def _run_new_tasks(self, context, schema=CONFIG.prompt_schema):
|
||||
node = await PM_NODE.fill(context, self.llm, schema)
|
||||
# prompt_template, format_example = get_template(templates, format)
|
||||
# prompt = prompt_template.format(context=context, format_example=format_example)
|
||||
# rsp = await self._aask_v1(prompt, "task", OUTPUT_MAPPING, format=format)
|
||||
return node
|
||||
|
||||
async def _merge(self, system_design_doc, task_doc, schema=CONFIG.prompt_schema) -> Document:
|
||||
context = NEW_REQ_TEMPLATE.format(context=system_design_doc.content, old_tasks=task_doc.content)
|
||||
node = await REFINED_PM_NODES.fill(context, self.llm, schema)
|
||||
task_doc.content = node.instruct_content.json(ensure_ascii=False)
|
||||
task_doc.content = node.instruct_content.model_dump_json()
|
||||
return task_doc
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -123,9 +115,3 @@ class WriteTasks(Action):
|
|||
@staticmethod
|
||||
async def _save_pdf(task_doc):
|
||||
await FileRepository.save_as(doc=task_doc, with_suffix=".md", relative_path=TASK_PDF_FILE_REPO)
|
||||
|
||||
|
||||
class AssignTasks(Action):
|
||||
async def run(self, *args, **kwargs):
|
||||
# Here you should implement the actual action
|
||||
pass
|
||||
|
|
|
|||
68
metagpt/actions/rebuild_class_view.py
Normal file
68
metagpt/actions/rebuild_class_view.py
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
@Time : 2023/12/19
|
||||
@Author : mashenquan
|
||||
@File : rebuild_class_view.py
|
||||
@Desc : Rebuild class view info
|
||||
"""
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
from metagpt.actions import Action
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.const import CLASS_VIEW_FILE_REPO, GRAPH_REPO_FILE_REPO
|
||||
from metagpt.repo_parser import RepoParser
|
||||
from metagpt.utils.di_graph_repository import DiGraphRepository
|
||||
from metagpt.utils.graph_repository import GraphKeyword, GraphRepository
|
||||
|
||||
|
||||
class RebuildClassView(Action):
|
||||
def __init__(self, name="", context=None, llm=None):
|
||||
super().__init__(name=name, context=context, llm=llm)
|
||||
|
||||
async def run(self, with_messages=None, format=CONFIG.prompt_schema):
|
||||
graph_repo_pathname = CONFIG.git_repo.workdir / GRAPH_REPO_FILE_REPO / CONFIG.git_repo.workdir.name
|
||||
graph_db = await DiGraphRepository.load_from(str(graph_repo_pathname.with_suffix(".json")))
|
||||
repo_parser = RepoParser(base_directory=self.context)
|
||||
class_views = await repo_parser.rebuild_class_views(path=Path(self.context)) # use pylint
|
||||
await GraphRepository.update_graph_db_with_class_views(graph_db, class_views)
|
||||
symbols = repo_parser.generate_symbols() # use ast
|
||||
for file_info in symbols:
|
||||
await GraphRepository.update_graph_db_with_file_info(graph_db, file_info)
|
||||
await self._create_mermaid_class_view(graph_db=graph_db)
|
||||
await self._save(graph_db=graph_db)
|
||||
|
||||
async def _create_mermaid_class_view(self, graph_db):
|
||||
pass
|
||||
# dataset = await graph_db.select(subject=concat_namespace(filename, class_name), predicate=GraphKeyword.HAS_PAGE_INFO)
|
||||
# if not dataset:
|
||||
# logger.warning(f"No page info for {concat_namespace(filename, class_name)}")
|
||||
# return
|
||||
# code_block_info = CodeBlockInfo.parse_raw(dataset[0].object_)
|
||||
# src_code = await read_file_block(filename=Path(self.context) / filename, lineno=code_block_info.lineno, end_lineno=code_block_info.end_lineno)
|
||||
# code_type = ""
|
||||
# dataset = await graph_db.select(subject=filename, predicate=GraphKeyword.IS)
|
||||
# for spo in dataset:
|
||||
# if spo.object_ in ["javascript", "python"]:
|
||||
# code_type = spo.object_
|
||||
# break
|
||||
|
||||
# try:
|
||||
# node = await REBUILD_CLASS_VIEW_NODE.fill(context=f"```{code_type}\n{src_code}\n```", llm=self.llm, to=format)
|
||||
# class_view = node.instruct_content.model_dump()["Class View"]
|
||||
# except Exception as e:
|
||||
# class_view = RepoParser.rebuild_class_view(src_code, code_type)
|
||||
# await graph_db.insert(subject=concat_namespace(filename, class_name), predicate=GraphKeyword.HAS_CLASS_VIEW, object_=class_view)
|
||||
# logger.info(f"{concat_namespace(filename, class_name)} {GraphKeyword.HAS_CLASS_VIEW} {class_view}")
|
||||
|
||||
async def _save(self, graph_db):
|
||||
class_view_file_repo = CONFIG.git_repo.new_file_repository(relative_path=CLASS_VIEW_FILE_REPO)
|
||||
dataset = await graph_db.select(predicate=GraphKeyword.HAS_CLASS_VIEW)
|
||||
all_class_view = []
|
||||
for spo in dataset:
|
||||
title = f"---\ntitle: {spo.subject}\n---\n"
|
||||
filename = re.sub(r"[/:]", "_", spo.subject) + ".mmd"
|
||||
await class_view_file_repo.save(filename=filename, content=title + spo.object_)
|
||||
all_class_view.append(spo.object_)
|
||||
await class_view_file_repo.save(filename="all.mmd", content="\n".join(all_class_view))
|
||||
33
metagpt/actions/rebuild_class_view_an.py
Normal file
33
metagpt/actions/rebuild_class_view_an.py
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
@Time : 2023/12/19
|
||||
@Author : mashenquan
|
||||
@File : rebuild_class_view_an.py
|
||||
@Desc : Defines `ActionNode` objects used by rebuild_class_view.py
|
||||
"""
|
||||
from metagpt.actions.action_node import ActionNode
|
||||
|
||||
CLASS_SOURCE_CODE_BLOCK = ActionNode(
|
||||
key="Class View",
|
||||
expected_type=str,
|
||||
instruction='Generate the mermaid class diagram corresponding to source code in "context."',
|
||||
example="""
|
||||
classDiagram
|
||||
class A {
|
||||
-int x
|
||||
+int y
|
||||
-int speed
|
||||
-int direction
|
||||
+__init__(x: int, y: int, speed: int, direction: int)
|
||||
+change_direction(new_direction: int) None
|
||||
+move() None
|
||||
}
|
||||
""",
|
||||
)
|
||||
|
||||
REBUILD_CLASS_VIEW_NODES = [
|
||||
CLASS_SOURCE_CODE_BLOCK,
|
||||
]
|
||||
|
||||
REBUILD_CLASS_VIEW_NODE = ActionNode.from_children("RebuildClassView", REBUILD_CLASS_VIEW_NODES)
|
||||
|
|
@ -11,7 +11,7 @@ from metagpt.actions import Action
|
|||
from metagpt.config import CONFIG
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.provider.base_llm import BaseLLM
|
||||
from metagpt.tools.search_engine import SearchEngine
|
||||
from metagpt.tools.web_browser_engine import WebBrowserEngine, WebBrowserEngineType
|
||||
from metagpt.utils.common import OutputParser
|
||||
|
|
@ -82,8 +82,8 @@ class CollectLinks(Action):
|
|||
|
||||
name: str = "CollectLinks"
|
||||
context: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
desc: str = "Collect links from a search engine."
|
||||
|
||||
search_engine: SearchEngine = Field(default_factory=SearchEngine)
|
||||
rank_func: Optional[Callable[[list[str]], None]] = None
|
||||
|
||||
|
|
@ -129,7 +129,8 @@ class CollectLinks(Action):
|
|||
if len(remove) == 0:
|
||||
break
|
||||
|
||||
prompt = reduce_message_length(gen_msg(), self.llm.model, system_text, CONFIG.max_tokens_rsp)
|
||||
model_name = CONFIG.get_model_name(CONFIG.get_default_llm_provider_enum())
|
||||
prompt = reduce_message_length(gen_msg(), model_name, system_text, CONFIG.max_tokens_rsp)
|
||||
logger.debug(prompt)
|
||||
queries = await self._aask(prompt, [system_text])
|
||||
try:
|
||||
|
|
@ -177,7 +178,7 @@ class WebBrowseAndSummarize(Action):
|
|||
|
||||
name: str = "WebBrowseAndSummarize"
|
||||
context: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
llm: BaseLLM = Field(default_factory=LLM)
|
||||
desc: str = "Explore the web and provide summaries of articles and webpages."
|
||||
browse_func: Union[Callable[[list[str]], None], None] = None
|
||||
web_browser_engine: Optional[WebBrowserEngine] = None
|
||||
|
|
@ -248,7 +249,7 @@ class ConductResearch(Action):
|
|||
|
||||
name: str = "ConductResearch"
|
||||
context: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
llm: BaseLLM = Field(default_factory=LLM)
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ from pydantic import Field
|
|||
|
||||
from metagpt.actions.action import Action
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.llm import LLM, BaseGPTAPI
|
||||
from metagpt.logs import logger
|
||||
from metagpt.schema import RunCodeContext, RunCodeResult
|
||||
from metagpt.utils.exceptions import handle_exception
|
||||
|
|
@ -79,14 +78,15 @@ standard errors:
|
|||
class RunCode(Action):
|
||||
name: str = "RunCode"
|
||||
context: RunCodeContext = Field(default_factory=RunCodeContext)
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
@classmethod
|
||||
@handle_exception
|
||||
async def run_text(cls, code) -> Tuple[str, str]:
|
||||
# We will document_store the result in this dictionary
|
||||
namespace = {}
|
||||
exec(code, namespace)
|
||||
try:
|
||||
# We will document_store the result in this dictionary
|
||||
namespace = {}
|
||||
exec(code, namespace)
|
||||
except Exception as e:
|
||||
return "", str(e)
|
||||
return namespace.get("result", ""), ""
|
||||
|
||||
@classmethod
|
||||
|
|
|
|||
|
|
@ -8,13 +8,11 @@
|
|||
from typing import Any, Optional
|
||||
|
||||
import pydantic
|
||||
from pydantic import Field, root_validator
|
||||
from pydantic import Field, model_validator
|
||||
|
||||
from metagpt.actions import Action
|
||||
from metagpt.config import CONFIG, Config
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.schema import Message
|
||||
from metagpt.tools import SearchEngineType
|
||||
from metagpt.tools.search_engine import SearchEngine
|
||||
|
|
@ -105,18 +103,18 @@ You are a member of a professional butler team and will provide helpful suggesti
|
|||
"""
|
||||
|
||||
|
||||
# TOTEST
|
||||
class SearchAndSummarize(Action):
|
||||
name: str = ""
|
||||
content: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
config: None = Field(default_factory=Config)
|
||||
engine: Optional[SearchEngineType] = CONFIG.search_engine
|
||||
search_func: Optional[Any] = None
|
||||
search_engine: SearchEngine = None
|
||||
result: str = ""
|
||||
|
||||
result = ""
|
||||
|
||||
@root_validator
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def validate_engine_and_run_func(cls, values):
|
||||
engine = values.get("engine")
|
||||
search_func = values.get("search_func")
|
||||
|
|
|
|||
111
metagpt/actions/skill_action.py
Normal file
111
metagpt/actions/skill_action.py
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
@Time : 2023/8/28
|
||||
@Author : mashenquan
|
||||
@File : skill_action.py
|
||||
@Desc : Call learned skill
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import importlib
|
||||
import traceback
|
||||
from copy import deepcopy
|
||||
from typing import Dict, Optional
|
||||
|
||||
from metagpt.actions import Action
|
||||
from metagpt.learn.skill_loader import Skill
|
||||
from metagpt.logs import logger
|
||||
from metagpt.schema import Message
|
||||
|
||||
|
||||
# TOTEST
|
||||
class ArgumentsParingAction(Action):
|
||||
skill: Skill
|
||||
ask: str
|
||||
rsp: Optional[Message] = None
|
||||
args: Optional[Dict] = None
|
||||
|
||||
@property
|
||||
def prompt(self):
|
||||
prompt = "You are a function parser. You can convert spoken words into function parameters.\n"
|
||||
prompt += "\n---\n"
|
||||
prompt += f"{self.skill.name} function parameters description:\n"
|
||||
for k, v in self.skill.arguments.items():
|
||||
prompt += f"parameter `{k}`: {v}\n"
|
||||
prompt += "\n---\n"
|
||||
prompt += "Examples:\n"
|
||||
for e in self.skill.examples:
|
||||
prompt += f"If want you to do `{e.ask}`, return `{e.answer}` brief and clear.\n"
|
||||
prompt += "\n---\n"
|
||||
prompt += (
|
||||
f"\nRefer to the `{self.skill.name}` function description, and fill in the function parameters according "
|
||||
'to the example "I want you to do xx" in the Examples section.'
|
||||
f"\nNow I want you to do `{self.ask}`, return function parameters in Examples format above, brief and "
|
||||
"clear."
|
||||
)
|
||||
return prompt
|
||||
|
||||
async def run(self, with_message=None, **kwargs) -> Message:
|
||||
prompt = self.prompt
|
||||
rsp = await self.llm.aask(msg=prompt, system_msgs=[])
|
||||
logger.debug(f"SKILL:{prompt}\n, RESULT:{rsp}")
|
||||
self.args = ArgumentsParingAction.parse_arguments(skill_name=self.skill.name, txt=rsp)
|
||||
self.rsp = Message(content=rsp, role="assistant", instruct_content=self.args, cause_by=self)
|
||||
return self.rsp
|
||||
|
||||
@staticmethod
|
||||
def parse_arguments(skill_name, txt) -> dict:
|
||||
prefix = skill_name + "("
|
||||
if prefix not in txt:
|
||||
logger.error(f"{skill_name} not in {txt}")
|
||||
return None
|
||||
if ")" not in txt:
|
||||
logger.error(f"')' not in {txt}")
|
||||
return None
|
||||
begin_ix = txt.find(prefix)
|
||||
end_ix = txt.rfind(")")
|
||||
args_txt = txt[begin_ix + len(prefix) : end_ix]
|
||||
logger.info(args_txt)
|
||||
fake_expression = f"dict({args_txt})"
|
||||
parsed_expression = ast.parse(fake_expression, mode="eval")
|
||||
args = {}
|
||||
for keyword in parsed_expression.body.keywords:
|
||||
key = keyword.arg
|
||||
value = ast.literal_eval(keyword.value)
|
||||
args[key] = value
|
||||
return args
|
||||
|
||||
|
||||
class SkillAction(Action):
|
||||
skill: Skill
|
||||
args: Dict
|
||||
rsp: Optional[Message] = None
|
||||
|
||||
async def run(self, with_message=None, **kwargs) -> Message:
|
||||
"""Run action"""
|
||||
options = deepcopy(kwargs)
|
||||
if self.args:
|
||||
for k in self.args.keys():
|
||||
if k in options:
|
||||
options.pop(k)
|
||||
try:
|
||||
rsp = await self.find_and_call_function(self.skill.name, args=self.args, **options)
|
||||
self.rsp = Message(content=rsp, role="assistant", cause_by=self)
|
||||
except Exception as e:
|
||||
logger.exception(f"{e}, traceback:{traceback.format_exc()}")
|
||||
self.rsp = Message(content=f"Error: {e}", role="assistant", cause_by=self)
|
||||
return self.rsp
|
||||
|
||||
@staticmethod
|
||||
async def find_and_call_function(function_name, args, **kwargs) -> str:
|
||||
try:
|
||||
module = importlib.import_module("metagpt.learn")
|
||||
function = getattr(module, function_name)
|
||||
# Invoke function and return result
|
||||
result = await function(**args, **kwargs)
|
||||
return result
|
||||
except (ModuleNotFoundError, AttributeError):
|
||||
logger.error(f"{function_name} not found")
|
||||
raise ValueError(f"{function_name} not found")
|
||||
|
|
@ -13,7 +13,6 @@ from tenacity import retry, stop_after_attempt, wait_random_exponential
|
|||
from metagpt.actions.action import Action
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.const import SYSTEM_DESIGN_FILE_REPO, TASK_FILE_REPO
|
||||
from metagpt.llm import LLM, BaseGPTAPI
|
||||
from metagpt.logs import logger
|
||||
from metagpt.schema import CodeSummarizeContext
|
||||
from metagpt.utils.file_repository import FileRepository
|
||||
|
|
@ -91,10 +90,10 @@ flowchart TB
|
|||
"""
|
||||
|
||||
|
||||
# TOTEST
|
||||
class SummarizeCode(Action):
|
||||
name: str = "SummarizeCode"
|
||||
context: CodeSummarizeContext = Field(default_factory=CodeSummarizeContext)
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
@retry(stop=stop_after_attempt(2), wait=wait_random_exponential(min=1, max=60))
|
||||
async def summarize_code(self, prompt):
|
||||
|
|
|
|||
163
metagpt/actions/talk_action.py
Normal file
163
metagpt/actions/talk_action.py
Normal file
|
|
@ -0,0 +1,163 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
@Time : 2023/8/28
|
||||
@Author : mashenquan
|
||||
@File : talk_action.py
|
||||
@Desc : Act as it’s a talk
|
||||
"""
|
||||
from typing import Optional
|
||||
|
||||
from metagpt.actions import Action
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.const import DEFAULT_LANGUAGE
|
||||
from metagpt.logs import logger
|
||||
from metagpt.schema import Message
|
||||
|
||||
|
||||
# TOTEST
|
||||
class TalkAction(Action):
|
||||
context: str
|
||||
history_summary: str = ""
|
||||
knowledge: str = ""
|
||||
rsp: Optional[Message] = None
|
||||
|
||||
@property
|
||||
def prompt(self):
|
||||
prompt = ""
|
||||
if CONFIG.agent_description:
|
||||
prompt = (
|
||||
f"You are {CONFIG.agent_description}. Your responses should align with the role-play agreement, "
|
||||
f"maintaining the character's persona and habits. When faced with unrelated questions, playfully "
|
||||
f"decline to answer without revealing your AI nature to preserve the character's image.\n\n"
|
||||
)
|
||||
prompt += f"Knowledge:\n{self.knowledge}\n\n" if self.knowledge else ""
|
||||
prompt += f"{self.history_summary}\n\n"
|
||||
prompt += (
|
||||
"If the information is insufficient, you can search in the historical conversation or knowledge above.\n"
|
||||
)
|
||||
language = CONFIG.language or DEFAULT_LANGUAGE
|
||||
prompt += (
|
||||
f"Answer the following questions strictly in {language}, and the answers must follow the Markdown format.\n "
|
||||
f"{self.context}"
|
||||
)
|
||||
logger.debug(f"PROMPT: {prompt}")
|
||||
return prompt
|
||||
|
||||
@property
|
||||
def prompt_gpt4(self):
|
||||
kvs = {
|
||||
"{role}": CONFIG.agent_description or "",
|
||||
"{history}": self.history_summary or "",
|
||||
"{knowledge}": self.knowledge or "",
|
||||
"{language}": CONFIG.language or DEFAULT_LANGUAGE,
|
||||
"{ask}": self.context,
|
||||
}
|
||||
prompt = TalkActionPrompt.FORMATION_LOOSE
|
||||
for k, v in kvs.items():
|
||||
prompt = prompt.replace(k, v)
|
||||
logger.info(f"PROMPT: {prompt}")
|
||||
return prompt
|
||||
|
||||
# async def run_old(self, *args, **kwargs) -> ActionOutput:
|
||||
# prompt = self.prompt
|
||||
# rsp = await self.llm.aask(msg=prompt, system_msgs=[])
|
||||
# logger.debug(f"PROMPT:{prompt}\nRESULT:{rsp}\n")
|
||||
# self._rsp = ActionOutput(content=rsp)
|
||||
# return self._rsp
|
||||
|
||||
@property
|
||||
def aask_args(self):
|
||||
language = CONFIG.language or DEFAULT_LANGUAGE
|
||||
system_msgs = [
|
||||
f"You are {CONFIG.agent_description}.",
|
||||
"Your responses should align with the role-play agreement, "
|
||||
"maintaining the character's persona and habits. When faced with unrelated questions, playfully "
|
||||
"decline to answer without revealing your AI nature to preserve the character's image.",
|
||||
"If the information is insufficient, you can search in the context or knowledge.",
|
||||
f"Answer the following questions strictly in {language}, and the answers must follow the Markdown format.",
|
||||
]
|
||||
format_msgs = []
|
||||
if self.knowledge:
|
||||
format_msgs.append({"role": "assistant", "content": self.knowledge})
|
||||
if self.history_summary:
|
||||
format_msgs.append({"role": "assistant", "content": self.history_summary})
|
||||
return self.context, format_msgs, system_msgs
|
||||
|
||||
async def run(self, with_message=None, **kwargs) -> Message:
|
||||
msg, format_msgs, system_msgs = self.aask_args
|
||||
rsp = await self.llm.aask(msg=msg, format_msgs=format_msgs, system_msgs=system_msgs)
|
||||
self.rsp = Message(content=rsp, role="assistant", cause_by=self)
|
||||
return self.rsp
|
||||
|
||||
|
||||
class TalkActionPrompt:
|
||||
FORMATION = """Formation: "Capacity and role" defines the role you are currently playing;
|
||||
"[HISTORY_BEGIN]" and "[HISTORY_END]" tags enclose the historical conversation;
|
||||
"[KNOWLEDGE_BEGIN]" and "[KNOWLEDGE_END]" tags enclose the knowledge may help for your responses;
|
||||
"Statement" defines the work detail you need to complete at this stage;
|
||||
"[ASK_BEGIN]" and [ASK_END] tags enclose the questions;
|
||||
"Constraint" defines the conditions that your responses must comply with.
|
||||
"Personality" defines your language style。
|
||||
"Insight" provides a deeper understanding of the characters' inner traits.
|
||||
"Initial" defines the initial setup of a character.
|
||||
|
||||
Capacity and role: {role}
|
||||
Statement: Your responses should align with the role-play agreement, maintaining the
|
||||
character's persona and habits. When faced with unrelated questions, playfully decline to answer without revealing
|
||||
your AI nature to preserve the character's image.
|
||||
|
||||
[HISTORY_BEGIN]
|
||||
|
||||
{history}
|
||||
|
||||
[HISTORY_END]
|
||||
|
||||
[KNOWLEDGE_BEGIN]
|
||||
|
||||
{knowledge}
|
||||
|
||||
[KNOWLEDGE_END]
|
||||
|
||||
Statement: If the information is insufficient, you can search in the historical conversation or knowledge.
|
||||
Statement: Unless you are a language professional, answer the following questions strictly in {language}
|
||||
, and the answers must follow the Markdown format. Strictly excluding any tag likes "[HISTORY_BEGIN]"
|
||||
, "[HISTORY_END]", "[KNOWLEDGE_BEGIN]", "[KNOWLEDGE_END]" in responses.
|
||||
|
||||
|
||||
{ask}
|
||||
"""
|
||||
|
||||
FORMATION_LOOSE = """Formation: "Capacity and role" defines the role you are currently playing;
|
||||
"[HISTORY_BEGIN]" and "[HISTORY_END]" tags enclose the historical conversation;
|
||||
"[KNOWLEDGE_BEGIN]" and "[KNOWLEDGE_END]" tags enclose the knowledge may help for your responses;
|
||||
"Statement" defines the work detail you need to complete at this stage;
|
||||
"Constraint" defines the conditions that your responses must comply with.
|
||||
"Personality" defines your language style。
|
||||
"Insight" provides a deeper understanding of the characters' inner traits.
|
||||
"Initial" defines the initial setup of a character.
|
||||
|
||||
Capacity and role: {role}
|
||||
Statement: Your responses should maintaining the character's persona and habits. When faced with unrelated questions
|
||||
, playfully decline to answer without revealing your AI nature to preserve the character's image.
|
||||
|
||||
[HISTORY_BEGIN]
|
||||
|
||||
{history}
|
||||
|
||||
[HISTORY_END]
|
||||
|
||||
[KNOWLEDGE_BEGIN]
|
||||
|
||||
{knowledge}
|
||||
|
||||
[KNOWLEDGE_END]
|
||||
|
||||
Statement: If the information is insufficient, you can search in the historical conversation or knowledge.
|
||||
Statement: Unless you are a language professional, answer the following questions strictly in {language}
|
||||
, and the answers must follow the Markdown format. Strictly excluding any tag likes "[HISTORY_BEGIN]"
|
||||
, "[HISTORY_END]", "[KNOWLEDGE_BEGIN]", "[KNOWLEDGE_END]" in responses.
|
||||
|
||||
|
||||
{ask}
|
||||
"""
|
||||
|
|
@ -31,9 +31,7 @@ from metagpt.const import (
|
|||
TASK_FILE_REPO,
|
||||
TEST_OUTPUTS_FILE_REPO,
|
||||
)
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.schema import CodingContext, Document, RunCodeResult
|
||||
from metagpt.utils.common import CodeParser
|
||||
from metagpt.utils.file_repository import FileRepository
|
||||
|
|
@ -92,7 +90,6 @@ ATTENTION: Use '##' to SPLIT SECTIONS, not '#'. Output format carefully referenc
|
|||
class WriteCode(Action):
|
||||
name: str = "WriteCode"
|
||||
context: Document = Field(default_factory=Document)
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
|
||||
async def write_code(self, prompt) -> str:
|
||||
|
|
|
|||
|
|
@ -14,9 +14,7 @@ from tenacity import retry, stop_after_attempt, wait_random_exponential
|
|||
from metagpt.actions import WriteCode
|
||||
from metagpt.actions.action import Action
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.schema import CodingContext
|
||||
from metagpt.utils.common import CodeParser
|
||||
|
||||
|
|
@ -123,7 +121,6 @@ REWRITE_CODE_TEMPLATE = """
|
|||
class WriteCodeReview(Action):
|
||||
name: str = "WriteCodeReview"
|
||||
context: CodingContext = Field(default_factory=CodingContext)
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
|
||||
async def write_code_review_and_rewrite(self, context_prompt, cr_prompt, filename):
|
||||
|
|
|
|||
|
|
@ -21,15 +21,14 @@ Example:
|
|||
This script uses the 'fire' library to create a command-line interface. It generates docstrings for the given Python code using
|
||||
the specified docstring style and adds them to the code.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
from pathlib import Path
|
||||
from typing import Literal, Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from metagpt.actions.action import Action
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.utils.common import OutputParser
|
||||
from metagpt.utils.common import OutputParser, aread, awrite
|
||||
from metagpt.utils.pycst import merge_docstring
|
||||
|
||||
PYTHON_DOCSTRING_SYSTEM = """### Requirements
|
||||
|
|
@ -163,7 +162,6 @@ class WriteDocstring(Action):
|
|||
|
||||
desc: str = "Write docstring for code."
|
||||
context: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
async def run(
|
||||
self,
|
||||
|
|
@ -187,6 +185,16 @@ class WriteDocstring(Action):
|
|||
documented_code = OutputParser.parse_python_code(documented_code)
|
||||
return merge_docstring(code, documented_code)
|
||||
|
||||
@staticmethod
|
||||
async def write_docstring(
|
||||
filename: str | Path, overwrite: bool = False, style: Literal["google", "numpy", "sphinx"] = "google"
|
||||
) -> str:
|
||||
data = await aread(str(filename))
|
||||
code = await WriteDocstring().run(data, style=style)
|
||||
if overwrite:
|
||||
await awrite(filename, code)
|
||||
return code
|
||||
|
||||
|
||||
def _simplify_python_code(code: str) -> None:
|
||||
"""Simplifies the given Python code by removing expressions and the last if statement.
|
||||
|
|
@ -207,13 +215,4 @@ def _simplify_python_code(code: str) -> None:
|
|||
if __name__ == "__main__":
|
||||
import fire
|
||||
|
||||
async def run(filename: str, overwrite: bool = False, style: Literal["google", "numpy", "sphinx"] = "google"):
|
||||
with open(filename) as f:
|
||||
code = f.read()
|
||||
code = await WriteDocstring().run(code, style=style)
|
||||
if overwrite:
|
||||
with open(filename, "w") as f:
|
||||
f.write(code)
|
||||
return code
|
||||
|
||||
fire.Fire(run)
|
||||
fire.Fire(WriteDocstring.write_docstring)
|
||||
|
|
|
|||
|
|
@ -17,12 +17,11 @@ import json
|
|||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from metagpt.actions import Action, ActionOutput
|
||||
from metagpt.actions.action_node import ActionNode
|
||||
from metagpt.actions.fix_bug import FixBug
|
||||
from metagpt.actions.write_prd_an import (
|
||||
PROJECT_NAME,
|
||||
REFINE_PRD_NODE,
|
||||
REFINE_PRD_TEMPLATE,
|
||||
WP_IS_RELATIVE_NODE,
|
||||
|
|
@ -38,9 +37,7 @@ from metagpt.const import (
|
|||
PRDS_FILE_REPO,
|
||||
REQUIREMENT_FILENAME,
|
||||
)
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.schema import BugFixContext, Document, Documents, Message
|
||||
from metagpt.utils.common import CodeParser
|
||||
from metagpt.utils.file_repository import FileRepository
|
||||
|
|
@ -67,9 +64,8 @@ NEW_REQ_TEMPLATE = """
|
|||
|
||||
|
||||
class WritePRD(Action):
|
||||
name: str = ""
|
||||
name: str = "WritePRD"
|
||||
content: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
async def run(self, with_messages, schema=CONFIG.prompt_schema, *args, **kwargs) -> ActionOutput | Message:
|
||||
# Determine which requirement documents need to be rewritten: Use LLM to assess whether new requirements are
|
||||
|
|
@ -81,7 +77,7 @@ class WritePRD(Action):
|
|||
await docs_file_repo.save(filename=REQUIREMENT_FILENAME, content="")
|
||||
bug_fix = BugFixContext(filename=BUGFIX_FILENAME)
|
||||
return Message(
|
||||
content=bug_fix.json(),
|
||||
content=bug_fix.model_dump_json(),
|
||||
instruct_content=bug_fix,
|
||||
role="",
|
||||
cause_by=FixBug,
|
||||
|
|
@ -113,7 +109,7 @@ class WritePRD(Action):
|
|||
# Once all files under 'docs/prds/' have been compared with the newly added requirements, trigger the
|
||||
# 'publish' message to transition the workflow to the next stage. This design allows room for global
|
||||
# optimization in subsequent steps.
|
||||
return ActionOutput(content=change_files.json(), instruct_content=change_files)
|
||||
return ActionOutput(content=change_files.model_dump_json(), instruct_content=change_files)
|
||||
|
||||
async def _run_new_requirement(self, requirements, schema=CONFIG.prompt_schema) -> ActionOutput:
|
||||
# sas = SearchAndSummarize()
|
||||
|
|
@ -125,7 +121,8 @@ class WritePRD(Action):
|
|||
# logger.info(rsp)
|
||||
project_name = CONFIG.project_name if CONFIG.project_name else ""
|
||||
context = CONTEXT_TEMPLATE.format(requirements=requirements, project_name=project_name)
|
||||
node = await WRITE_PRD_NODE.fill(context=context, llm=self.llm, schema=schema)
|
||||
exclude = [PROJECT_NAME.key] if project_name else []
|
||||
node = await WRITE_PRD_NODE.fill(context=context, llm=self.llm, exclude=exclude) # schema=schema
|
||||
await self._rename_workspace(node)
|
||||
return node
|
||||
|
||||
|
|
@ -137,15 +134,13 @@ class WritePRD(Action):
|
|||
async def _merge(self, new_requirement_doc, prd_doc, schema=CONFIG.prompt_schema) -> Document:
|
||||
if not CONFIG.project_name:
|
||||
CONFIG.project_name = Path(CONFIG.project_path).name
|
||||
|
||||
project_name = CONFIG.project_name if CONFIG.project_name else ""
|
||||
prompt = REFINE_PRD_TEMPLATE.format(
|
||||
requirements=new_requirement_doc.content,
|
||||
old_prd=prd_doc.content,
|
||||
project_name=project_name,
|
||||
project_name=CONFIG.project_name,
|
||||
)
|
||||
node = await REFINE_PRD_NODE.fill(context=prompt, llm=self.llm, schema=schema)
|
||||
prd_doc.content = node.instruct_content.json(ensure_ascii=False)
|
||||
prd_doc.content = node.instruct_content.model_dump_json()
|
||||
await self._rename_workspace(node)
|
||||
return prd_doc
|
||||
|
||||
|
|
@ -157,7 +152,7 @@ class WritePRD(Action):
|
|||
new_prd_doc = Document(
|
||||
root_path=PRDS_FILE_REPO,
|
||||
filename=FileRepository.new_filename() + ".json",
|
||||
content=prd.instruct_content.json(ensure_ascii=False),
|
||||
content=prd.instruct_content.model_dump_json(),
|
||||
)
|
||||
elif await self._is_relative(requirement_doc, prd_doc):
|
||||
new_prd_doc = await self._merge(requirement_doc, prd_doc)
|
||||
|
|
@ -187,18 +182,13 @@ class WritePRD(Action):
|
|||
|
||||
@staticmethod
|
||||
async def _rename_workspace(prd):
|
||||
if CONFIG.project_path: # Updating on the old version has already been specified if it's valid. According to
|
||||
# Section 2.2.3.10 of RFC 135
|
||||
if not CONFIG.project_name:
|
||||
CONFIG.project_name = Path(CONFIG.project_path).name
|
||||
return
|
||||
|
||||
if not CONFIG.project_name:
|
||||
if isinstance(prd, (ActionOutput, ActionNode)):
|
||||
ws_name = prd.instruct_content.dict()["Project Name"]
|
||||
ws_name = prd.instruct_content.model_dump()["Project Name"]
|
||||
else:
|
||||
ws_name = CodeParser.parse_str(block="Project Name", text=prd)
|
||||
CONFIG.project_name = ws_name
|
||||
if ws_name:
|
||||
CONFIG.project_name = ws_name
|
||||
CONFIG.git_repo.rename_root(CONFIG.project_name)
|
||||
|
||||
async def _is_bugfix(self, context) -> bool:
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ REFINED_REQUIREMENTS = ActionNode(
|
|||
PROJECT_NAME = ActionNode(
|
||||
key="Project Name",
|
||||
expected_type=str,
|
||||
instruction="Name the project using snake case style, like 'game_2048' or 'simple_crm'.",
|
||||
instruction="According to the content of \"Original Requirements,\" name the project using snake case style , like 'game_2048' or 'simple_crm.",
|
||||
example="game_2048",
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -8,17 +8,13 @@
|
|||
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from metagpt.actions.action import Action
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
|
||||
|
||||
class WritePRDReview(Action):
|
||||
name: str = ""
|
||||
context: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
prd: Optional[str] = None
|
||||
desc: str = "Based on the PRD, conduct a PRD Review, providing clear and detailed feedback"
|
||||
prd_review_prompt_template: str = """
|
||||
|
|
|
|||
|
|
@ -6,12 +6,8 @@
|
|||
"""
|
||||
from typing import List
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from metagpt.actions import Action
|
||||
from metagpt.actions.action_node import ActionNode
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
|
||||
REVIEW = ActionNode(
|
||||
key="Review",
|
||||
|
|
@ -38,7 +34,6 @@ class WriteReview(Action):
|
|||
"""Write a review for the given context."""
|
||||
|
||||
name: str = "WriteReview"
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
|
||||
async def run(self, context):
|
||||
return await WRITE_REVIEW_NODE.fill(context=context, llm=self.llm, schema="json")
|
||||
|
|
|
|||
188
metagpt/actions/write_teaching_plan.py
Normal file
188
metagpt/actions/write_teaching_plan.py
Normal file
|
|
@ -0,0 +1,188 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
@Time : 2023/7/27
|
||||
@Author : mashenquan
|
||||
@File : write_teaching_plan.py
|
||||
"""
|
||||
from typing import Optional
|
||||
|
||||
from metagpt.actions import Action
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.logs import logger
|
||||
|
||||
|
||||
class WriteTeachingPlanPart(Action):
|
||||
"""Write Teaching Plan Part"""
|
||||
|
||||
context: Optional[str] = None
|
||||
topic: str = ""
|
||||
language: str = "Chinese"
|
||||
rsp: Optional[str] = None
|
||||
|
||||
async def run(self, with_message=None, **kwargs):
|
||||
statement_patterns = TeachingPlanBlock.TOPIC_STATEMENTS.get(self.topic, [])
|
||||
statements = []
|
||||
for p in statement_patterns:
|
||||
s = self.format_value(p)
|
||||
statements.append(s)
|
||||
formatter = (
|
||||
TeachingPlanBlock.PROMPT_TITLE_TEMPLATE
|
||||
if self.topic == TeachingPlanBlock.COURSE_TITLE
|
||||
else TeachingPlanBlock.PROMPT_TEMPLATE
|
||||
)
|
||||
prompt = formatter.format(
|
||||
formation=TeachingPlanBlock.FORMATION,
|
||||
role=self.prefix,
|
||||
statements="\n".join(statements),
|
||||
lesson=self.context,
|
||||
topic=self.topic,
|
||||
language=self.language,
|
||||
)
|
||||
|
||||
logger.debug(prompt)
|
||||
rsp = await self._aask(prompt=prompt)
|
||||
logger.debug(rsp)
|
||||
self._set_result(rsp)
|
||||
return self.rsp
|
||||
|
||||
def _set_result(self, rsp):
|
||||
if TeachingPlanBlock.DATA_BEGIN_TAG in rsp:
|
||||
ix = rsp.index(TeachingPlanBlock.DATA_BEGIN_TAG)
|
||||
rsp = rsp[ix + len(TeachingPlanBlock.DATA_BEGIN_TAG) :]
|
||||
if TeachingPlanBlock.DATA_END_TAG in rsp:
|
||||
ix = rsp.index(TeachingPlanBlock.DATA_END_TAG)
|
||||
rsp = rsp[0:ix]
|
||||
self.rsp = rsp.strip()
|
||||
if self.topic != TeachingPlanBlock.COURSE_TITLE:
|
||||
return
|
||||
if "#" not in self.rsp or self.rsp.index("#") != 0:
|
||||
self.rsp = "# " + self.rsp
|
||||
|
||||
def __str__(self):
|
||||
"""Return `topic` value when str()"""
|
||||
return self.topic
|
||||
|
||||
def __repr__(self):
|
||||
"""Show `topic` value when debug"""
|
||||
return self.topic
|
||||
|
||||
@staticmethod
|
||||
def format_value(value):
|
||||
"""Fill parameters inside `value` with `options`."""
|
||||
if not isinstance(value, str):
|
||||
return value
|
||||
if "{" not in value:
|
||||
return value
|
||||
|
||||
merged_opts = CONFIG.options or {}
|
||||
try:
|
||||
return value.format(**merged_opts)
|
||||
except KeyError as e:
|
||||
logger.warning(f"Parameter is missing:{e}")
|
||||
|
||||
for k, v in merged_opts.items():
|
||||
value = value.replace("{" + f"{k}" + "}", str(v))
|
||||
return value
|
||||
|
||||
|
||||
class TeachingPlanBlock:
|
||||
FORMATION = (
|
||||
'"Capacity and role" defines the role you are currently playing;\n'
|
||||
'\t"[LESSON_BEGIN]" and "[LESSON_END]" tags enclose the content of textbook;\n'
|
||||
'\t"Statement" defines the work detail you need to complete at this stage;\n'
|
||||
'\t"Answer options" defines the format requirements for your responses;\n'
|
||||
'\t"Constraint" defines the conditions that your responses must comply with.'
|
||||
)
|
||||
|
||||
COURSE_TITLE = "Title"
|
||||
TOPICS = [
|
||||
COURSE_TITLE,
|
||||
"Teaching Hours",
|
||||
"Teaching Objectives",
|
||||
"Teaching Content",
|
||||
"Teaching Methods and Strategies",
|
||||
"Learning Activities",
|
||||
"Teaching Time Allocation",
|
||||
"Assessment and Feedback",
|
||||
"Teaching Summary and Improvement",
|
||||
"Vocabulary Cloze",
|
||||
"Choice Questions",
|
||||
"Grammar Questions",
|
||||
"Translation Questions",
|
||||
]
|
||||
|
||||
TOPIC_STATEMENTS = {
|
||||
COURSE_TITLE: [
|
||||
"Statement: Find and return the title of the lesson only in markdown first-level header format, "
|
||||
"without anything else."
|
||||
],
|
||||
"Teaching Content": [
|
||||
'Statement: "Teaching Content" must include vocabulary, analysis, and examples of various grammar '
|
||||
"structures that appear in the textbook, as well as the listening materials and key points.",
|
||||
'Statement: "Teaching Content" must include more examples.',
|
||||
],
|
||||
"Teaching Time Allocation": [
|
||||
'Statement: "Teaching Time Allocation" must include how much time is allocated to each '
|
||||
"part of the textbook content."
|
||||
],
|
||||
"Teaching Methods and Strategies": [
|
||||
'Statement: "Teaching Methods and Strategies" must include teaching focus, difficulties, materials, '
|
||||
"procedures, in detail."
|
||||
],
|
||||
"Vocabulary Cloze": [
|
||||
'Statement: Based on the content of the textbook enclosed by "[LESSON_BEGIN]" and "[LESSON_END]", '
|
||||
"create vocabulary cloze. The cloze should include 10 {language} questions with {teaching_language} "
|
||||
"answers, and it should also include 10 {teaching_language} questions with {language} answers. "
|
||||
"The key-related vocabulary and phrases in the textbook content must all be included in the exercises.",
|
||||
],
|
||||
"Grammar Questions": [
|
||||
'Statement: Based on the content of the textbook enclosed by "[LESSON_BEGIN]" and "[LESSON_END]", '
|
||||
"create grammar questions. 10 questions."
|
||||
],
|
||||
"Choice Questions": [
|
||||
'Statement: Based on the content of the textbook enclosed by "[LESSON_BEGIN]" and "[LESSON_END]", '
|
||||
"create choice questions. 10 questions."
|
||||
],
|
||||
"Translation Questions": [
|
||||
'Statement: Based on the content of the textbook enclosed by "[LESSON_BEGIN]" and "[LESSON_END]", '
|
||||
"create translation questions. The translation should include 10 {language} questions with "
|
||||
"{teaching_language} answers, and it should also include 10 {teaching_language} questions with "
|
||||
"{language} answers."
|
||||
],
|
||||
}
|
||||
|
||||
# Teaching plan title
|
||||
PROMPT_TITLE_TEMPLATE = (
|
||||
"Do not refer to the context of the previous conversation records, "
|
||||
"start the conversation anew.\n\n"
|
||||
"Formation: {formation}\n\n"
|
||||
"{statements}\n"
|
||||
"Constraint: Writing in {language}.\n"
|
||||
'Answer options: Encloses the lesson title with "[TEACHING_PLAN_BEGIN]" '
|
||||
'and "[TEACHING_PLAN_END]" tags.\n'
|
||||
"[LESSON_BEGIN]\n"
|
||||
"{lesson}\n"
|
||||
"[LESSON_END]"
|
||||
)
|
||||
|
||||
# Teaching plan parts:
|
||||
PROMPT_TEMPLATE = (
|
||||
"Do not refer to the context of the previous conversation records, "
|
||||
"start the conversation anew.\n\n"
|
||||
"Formation: {formation}\n\n"
|
||||
"Capacity and role: {role}\n"
|
||||
'Statement: Write the "{topic}" part of teaching plan, '
|
||||
'WITHOUT ANY content unrelated to "{topic}"!!\n'
|
||||
"{statements}\n"
|
||||
'Answer options: Enclose the teaching plan content with "[TEACHING_PLAN_BEGIN]" '
|
||||
'and "[TEACHING_PLAN_END]" tags.\n'
|
||||
"Answer options: Using proper markdown format from second-level header format.\n"
|
||||
"Constraint: Writing in {language}.\n"
|
||||
"[LESSON_BEGIN]\n"
|
||||
"{lesson}\n"
|
||||
"[LESSON_END]"
|
||||
)
|
||||
|
||||
DATA_BEGIN_TAG = "[TEACHING_PLAN_BEGIN]"
|
||||
DATA_END_TAG = "[TEACHING_PLAN_END]"
|
||||
|
|
@ -10,14 +10,10 @@
|
|||
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from metagpt.actions.action import Action
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.const import TEST_CODES_FILE_REPO
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.schema import Document, TestingContext
|
||||
from metagpt.utils.common import CodeParser
|
||||
|
||||
|
|
@ -44,8 +40,7 @@ you should correctly import the necessary classes based on these file locations!
|
|||
|
||||
class WriteTest(Action):
|
||||
name: str = "WriteTest"
|
||||
context: Optional[str] = None
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
context: Optional[TestingContext] = None
|
||||
|
||||
async def write_code(self, prompt):
|
||||
code_rsp = await self._aask(prompt)
|
||||
|
|
|
|||
|
|
@ -9,12 +9,8 @@
|
|||
|
||||
from typing import Dict
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from metagpt.actions import Action
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.prompts.tutorial_assistant import CONTENT_PROMPT, DIRECTORY_PROMPT
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.utils.common import OutputParser
|
||||
|
||||
|
||||
|
|
@ -27,7 +23,6 @@ class WriteDirectory(Action):
|
|||
"""
|
||||
|
||||
name: str = "WriteDirectory"
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
language: str = "Chinese"
|
||||
|
||||
async def run(self, topic: str, *args, **kwargs) -> Dict:
|
||||
|
|
@ -54,7 +49,6 @@ class WriteContent(Action):
|
|||
"""
|
||||
|
||||
name: str = "WriteContent"
|
||||
llm: BaseGPTAPI = Field(default_factory=LLM)
|
||||
directory: dict = dict()
|
||||
language: str = "Chinese"
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue