Merge branch 'mgx_ops' into basic_ability

This commit is contained in:
garylin2099 2024-07-19 10:13:48 +08:00
commit 4cc47366f8
94 changed files with 3333 additions and 1200 deletions

View file

@ -90,7 +90,7 @@ class Action(SerializationMixin, ContextMixin, BaseModel):
msgs = args[0]
context = "## History Messages\n"
context += "\n".join([f"{idx}: {i}" for idx, i in enumerate(reversed(msgs))])
return await self.node.fill(context=context, llm=self.llm)
return await self.node.fill(req=context, llm=self.llm)
async def run(self, *args, **kwargs):
"""Run action"""

View file

@ -18,6 +18,8 @@ from tenacity import retry, stop_after_attempt, wait_random_exponential
from metagpt.actions.action_outcls_registry import register_action_outcls
from metagpt.const import MARKDOWN_TITLE_PREFIX, USE_CONFIG_TIMEOUT
from metagpt.exp_pool import exp_cache
from metagpt.exp_pool.serializers import ActionNodeSerializer
from metagpt.llm import BaseLLM
from metagpt.logs import logger
from metagpt.provider.postprocess.llm_output_postprocess import llm_output_postprocess
@ -465,9 +467,11 @@ class ActionNode:
return self
@exp_cache(serializer=ActionNodeSerializer())
async def fill(
self,
context,
*,
req,
llm,
schema="json",
mode="auto",
@ -478,7 +482,7 @@ class ActionNode:
):
"""Fill the node(s) with mode.
:param context: Everything we should know when filling node.
:param req: Everything we should know when filling node.
:param llm: Large Language Model with pre-defined system message.
:param schema: json/markdown, determine example and output format.
- raw: free form text
@ -497,7 +501,7 @@ class ActionNode:
:return: self
"""
self.set_llm(llm)
self.set_context(context)
self.set_context(req)
if self.schema:
schema = self.schema

View file

@ -178,12 +178,12 @@ class WriteDesign(Action):
)
async def _new_system_design(self, context):
node = await DESIGN_API_NODE.fill(context=context, llm=self.llm, schema=self.prompt_schema)
node = await DESIGN_API_NODE.fill(req=context, llm=self.llm, schema=self.prompt_schema)
return node
async def _merge(self, prd_doc, system_design_doc):
context = NEW_REQ_TEMPLATE.format(old_design=system_design_doc.content, context=prd_doc.content)
node = await REFINED_DESIGN_NODE.fill(context=context, llm=self.llm, schema=self.prompt_schema)
node = await REFINED_DESIGN_NODE.fill(req=context, llm=self.llm, schema=self.prompt_schema)
system_design_doc.content = node.instruct_content.model_dump_json()
return system_design_doc
@ -254,25 +254,29 @@ class WriteDesign(Action):
extra_info=to_markdown_code_block(extra_info),
prd=to_markdown_code_block(prd_content),
)
if not legacy_design_filename:
node = await self._new_system_design(context=context)
design = Document(content=node.instruct_content.model_dump_json())
else:
old_design_content = await aread(filename=legacy_design_filename)
design = await self._merge(
prd_doc=Document(content=context), system_design_doc=Document(content=old_design_content)
)
async with DocsReporter(enable_llm_stream=True) as reporter:
await reporter.async_report({"type": "design"}, "meta")
if not legacy_design_filename:
node = await self._new_system_design(context=context)
design = Document(content=node.instruct_content.model_dump_json())
else:
old_design_content = await aread(filename=legacy_design_filename)
design = await self._merge(
prd_doc=Document(content=context), system_design_doc=Document(content=old_design_content)
)
if not output_pathname:
output_pathname = Path(output_pathname) / "docs" / "sytem_design.json"
output_pathname.mkdir(parents=True, exist_ok=True)
elif not Path(output_pathname).is_absolute():
output_pathname = DEFAULT_WORKSPACE_ROOT / output_pathname
output_pathname = Path(output_pathname)
await awrite(filename=output_pathname, data=design.content)
output_filename = output_pathname.parent / f"{output_pathname.stem}-class-diagram"
await self._save_data_api_design(design_doc=design, output_filename=output_filename)
output_filename = output_pathname.parent / f"{output_pathname.stem}-sequence-diagram"
await self._save_seq_flow(design_doc=design, output_filename=output_filename)
await save_json_to_markdown(content=design.content, output_filename=output_pathname.with_suffix(".md"))
if not output_pathname:
output_pathname = Path(output_pathname) / "docs" / "sytem_design.json"
output_pathname.mkdir(parents=True, exist_ok=True)
elif not Path(output_pathname).is_absolute():
output_pathname = DEFAULT_WORKSPACE_ROOT / output_pathname
output_pathname = Path(output_pathname)
await awrite(filename=output_pathname, data=design.content)
output_filename = output_pathname.parent / f"{output_pathname.stem}-class-diagram"
await self._save_data_api_design(design_doc=design, output_filename=output_filename)
output_filename = output_pathname.parent / f"{output_pathname.stem}-sequence-diagram"
await self._save_seq_flow(design_doc=design, output_filename=output_filename)
md_output_filename = output_pathname.with_suffix(".md")
await save_json_to_markdown(content=design.content, output_filename=md_output_filename)
await reporter.async_report(md_output_filename, "path")
return f'System Design filename: "{str(output_pathname)}"'

View file

@ -30,6 +30,12 @@ from metagpt.logs import logger
from metagpt.utils.report import NotebookReporter
INSTALL_KEEPLEN = 500
INI_CODE = """import warnings
import logging
root_logger = logging.getLogger()
root_logger.setLevel(logging.ERROR)
warnings.filterwarnings('ignore')"""
class RealtimeOutputNotebookClient(NotebookClient):
@ -79,6 +85,12 @@ class ExecuteNbCode(Action):
)
self.reporter = NotebookReporter()
self.set_nb_client()
self.init_called = False
async def init_code(self):
if not self.init_called:
await self.run(INI_CODE)
self.init_called = True
def set_nb_client(self):
self.nb_client = RealtimeOutputNotebookClient(
@ -175,9 +187,12 @@ class ExecuteNbCode(Action):
is_success = False
output_text = remove_escape_and_color_codes(output_text)
if is_success:
output_text = remove_log_and_warning_lines(output_text)
# The useful information of the exception is at the end,
# the useful information of normal output is at the begining.
output_text = output_text[:keep_len] if is_success else output_text[-keep_len:]
if '<!DOCTYPE html>' not in output_text:
output_text = output_text[:keep_len] if is_success else output_text[-keep_len:]
parsed_output.append(output_text)
return is_success, ",".join(parsed_output)
@ -268,6 +283,18 @@ class ExecuteNbCode(Action):
return outputs, success
def remove_log_and_warning_lines(input_str: str) -> str:
delete_lines = ["[warning]", "warning:", "[cv]", "[info]"]
result = "\n".join(
[
line
for line in input_str.split("\n")
if not any(dl in line.lower() for dl in delete_lines)
]
).strip()
return result
def remove_escape_and_color_codes(input_str: str):
# 使用正则表达式去除jupyter notebook输出结果中的转义字符和颜色代码
# Use regular expressions to get rid of escape characters and color codes in jupyter notebook output.

View file

@ -57,7 +57,7 @@ class WriteAnalysisCode(Action):
code = await self._debug_with_reflection(context=context, working_memory=working_memory)
else:
rsp = await self.llm.aask(context, system_msgs=[INTERPRETER_SYSTEM_MSG], **kwargs)
code = CodeParser.parse_code(text=rsp)
code = CodeParser.parse_code(text=rsp, lang="python")
return code

View file

@ -22,4 +22,4 @@ class GenerateQuestions(Action):
name: str = "GenerateQuestions"
async def run(self, context) -> ActionNode:
return await QUESTIONS.fill(context=context, llm=self.llm)
return await QUESTIONS.fill(req=context, llm=self.llm)

View file

@ -22,4 +22,4 @@ class PrepareInterview(Action):
name: str = "PrepareInterview"
async def run(self, context):
return await QUESTIONS.fill(context=context, llm=self.llm)
return await QUESTIONS.fill(req=context, llm=self.llm)

View file

@ -151,12 +151,12 @@ class WriteTasks(Action):
return task_doc
async def _run_new_tasks(self, context: str):
node = await PM_NODE.fill(context, self.llm, schema=self.prompt_schema)
node = await PM_NODE.fill(req=context, llm=self.llm, schema=self.prompt_schema)
return node
async def _merge(self, system_design_doc, task_doc) -> Document:
context = NEW_REQ_TEMPLATE.format(context=system_design_doc.content, old_task=task_doc.content)
node = await REFINED_PM_NODE.fill(context, self.llm, schema=self.prompt_schema)
node = await REFINED_PM_NODE.fill(req=context, llm=self.llm, schema=self.prompt_schema)
task_doc.content = node.instruct_content.model_dump_json()
return task_doc
@ -180,16 +180,20 @@ class WriteTasks(Action):
if design_filename:
content = await aread(filename=design_filename)
context += to_markdown_code_block(content)
node = await self._run_new_tasks(context)
file_content = node.instruct_content.model_dump_json()
if not output_pathname:
output_pathname = Path(output_pathname) / "docs" / "project_schedule.json"
output_pathname.mkdir(parents=True, exist_ok=True)
elif not Path(output_pathname).is_absolute():
output_pathname = DEFAULT_WORKSPACE_ROOT / output_pathname
output_pathname = Path(output_pathname)
await awrite(filename=output_pathname, data=file_content)
await save_json_to_markdown(content=file_content, output_filename=output_pathname.with_suffix(".md"))
async with DocsReporter(enable_llm_stream=True) as reporter:
await reporter.async_report({"type": "task"}, "meta")
node = await self._run_new_tasks(context)
file_content = node.instruct_content.model_dump_json()
if not output_pathname:
output_pathname = Path(output_pathname) / "docs" / "project_schedule.json"
output_pathname.mkdir(parents=True, exist_ok=True)
elif not Path(output_pathname).is_absolute():
output_pathname = DEFAULT_WORKSPACE_ROOT / output_pathname
output_pathname = Path(output_pathname)
await awrite(filename=output_pathname, data=file_content)
md_output_filename = output_pathname.with_suffix(".md")
await save_json_to_markdown(content=file_content, output_filename=md_output_filename)
await reporter.async_report(md_output_filename, "path")
return f'Project Schedule filename: "{str(output_pathname)}"'

View file

@ -578,7 +578,7 @@ class WriteCodeAN(Action):
async def run(self, context):
self.llm.system_prompt = "You are an outstanding engineer and can implement any code"
return await WRITE_MOVE_NODE.fill(context=context, llm=self.llm, schema="json")
return await WRITE_MOVE_NODE.fill(req=context, llm=self.llm, schema="json")
async def main():

View file

@ -229,7 +229,7 @@ class WriteCodePlanAndChange(Action):
code=await self.get_old_codes(),
)
logger.info("Writing code plan and change..")
return await WRITE_CODE_PLAN_AND_CHANGE_NODE.fill(context=context, llm=self.llm, schema="json")
return await WRITE_CODE_PLAN_AND_CHANGE_NODE.fill(req=context, llm=self.llm, schema="json")
async def get_old_codes(self) -> str:
old_codes = await self.repo.srcs.get_all()

View file

@ -211,7 +211,7 @@ class WritePRD(Action):
context = CONTEXT_TEMPLATE.format(requirements=requirement, project_name=project_name)
exclude = [PROJECT_NAME.key] if project_name else []
node = await WRITE_PRD_NODE.fill(
context=context, llm=self.llm, exclude=exclude, schema=self.prompt_schema
req=context, llm=self.llm, exclude=exclude, schema=self.prompt_schema
) # schema=schema
return node
@ -238,7 +238,7 @@ class WritePRD(Action):
async def _is_bugfix(self, context: str) -> bool:
if not self.repo.code_files_exists():
return False
node = await WP_ISSUE_TYPE_NODE.fill(context, self.llm)
node = await WP_ISSUE_TYPE_NODE.fill(req=context, llm=self.llm)
return node.get("issue_type") == "BUG"
async def get_related_docs(self, req: Document, docs: list[Document]) -> list[Document]:
@ -248,14 +248,14 @@ class WritePRD(Action):
async def _is_related(self, req: Document, old_prd: Document) -> bool:
context = NEW_REQ_TEMPLATE.format(old_prd=old_prd.content, requirements=req.content)
node = await WP_IS_RELATIVE_NODE.fill(context, self.llm)
node = await WP_IS_RELATIVE_NODE.fill(req=context, llm=self.llm)
return node.get("is_relative") == "YES"
async def _merge(self, req: Document, related_doc: Document) -> Document:
if not self.project_name:
self.project_name = Path(self.project_path).name
prompt = NEW_REQ_TEMPLATE.format(requirements=req.content, old_prd=related_doc.content)
node = await REFINED_PRD_NODE.fill(context=prompt, llm=self.llm, schema=self.prompt_schema)
node = await REFINED_PRD_NODE.fill(req=prompt, llm=self.llm, schema=self.prompt_schema)
related_doc.content = node.instruct_content.model_dump_json()
await self._rename_workspace(node)
return related_doc
@ -300,23 +300,27 @@ class WritePRD(Action):
user_requirement=to_markdown_code_block(val=user_requirement),
extra_info=to_markdown_code_block(val=extra_info),
)
req = Document(content=content)
if not legacy_prd_filename:
node = await self._new_prd(requirement=req.content)
new_prd = Document(content=node.instruct_content.model_dump_json())
else:
content = await aread(filename=legacy_prd_filename)
old_prd = Document(content=content)
new_prd = await self._merge(req=req, related_doc=old_prd)
async with DocsReporter(enable_llm_stream=True) as reporter:
await reporter.async_report({"type": "prd"}, "meta")
req = Document(content=content)
if not legacy_prd_filename:
node = await self._new_prd(requirement=req.content)
new_prd = Document(content=node.instruct_content.model_dump_json())
else:
content = await aread(filename=legacy_prd_filename)
old_prd = Document(content=content)
new_prd = await self._merge(req=req, related_doc=old_prd)
if not output_pathname:
output_pathname = DEFAULT_WORKSPACE_ROOT / "docs" / "prd.json"
output_pathname.mkdir(parents=True, exist_ok=True)
elif not Path(output_pathname).is_absolute():
output_pathname = DEFAULT_WORKSPACE_ROOT / output_pathname
output_pathname = Path(output_pathname)
await awrite(filename=output_pathname, data=new_prd.content)
competitive_analysis_filename = output_pathname.parent / f"{output_pathname.stem}-competitive-analysis"
await self._save_competitive_analysis(prd_doc=new_prd, output_filename=Path(competitive_analysis_filename))
await save_json_to_markdown(content=new_prd.content, output_filename=output_pathname.with_suffix(".md"))
if not output_pathname:
output_pathname = DEFAULT_WORKSPACE_ROOT / "docs" / "prd.json"
output_pathname.mkdir(parents=True, exist_ok=True)
elif not Path(output_pathname).is_absolute():
output_pathname = DEFAULT_WORKSPACE_ROOT / output_pathname
output_pathname = Path(output_pathname)
await awrite(filename=output_pathname, data=new_prd.content)
competitive_analysis_filename = output_pathname.parent / f"{output_pathname.stem}-competitive-analysis"
await self._save_competitive_analysis(prd_doc=new_prd, output_filename=Path(competitive_analysis_filename))
md_output_filename = output_pathname.with_suffix(".md")
await save_json_to_markdown(content=new_prd.content, output_filename=md_output_filename)
await reporter.async_report(md_output_filename, "path")
return f'PRD filename: "{str(output_pathname)}"'

View file

@ -36,4 +36,4 @@ class WriteReview(Action):
name: str = "WriteReview"
async def run(self, context):
return await WRITE_REVIEW_NODE.fill(context=context, llm=self.llm, schema="json")
return await WRITE_REVIEW_NODE.fill(req=context, llm=self.llm, schema="json")

View file

@ -13,6 +13,7 @@ from pydantic import BaseModel, model_validator
from metagpt.configs.browser_config import BrowserConfig
from metagpt.configs.embedding_config import EmbeddingConfig
from metagpt.configs.exp_pool_config import ExperiencePoolConfig
from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.configs.mermaid_config import MermaidConfig
from metagpt.configs.redis_config import RedisConfig
@ -71,6 +72,9 @@ class Config(CLIParams, YamlModel):
enable_longterm_memory: bool = False
code_review_k_times: int = 2
# Experience Pool Parameters
exp_pool: ExperiencePoolConfig = ExperiencePoolConfig()
# Will be removed in the future
metagpt_tti_url: str = ""
language: str = "English"

View file

@ -0,0 +1,9 @@
from pydantic import Field
from metagpt.utils.yaml_model import YamlModel
class ExperiencePoolConfig(YamlModel):
enable_read: bool = Field(default=False, description="Enable to read from experience pool.")
enable_write: bool = Field(default=False, description="Enable to write to experience pool.")
persist_path: str = Field(default=".chroma_exp_data", description="The persist path for experience pool.")

View file

@ -20,12 +20,6 @@ import metagpt
def get_metagpt_package_root():
"""Get the root directory of the installed package."""
package_root = Path(metagpt.__file__).parent.parent
for i in (".git", ".project_root", ".gitignore"):
if (package_root / i).exists():
break
else:
package_root = Path.cwd()
logger.info(f"Package root set to {str(package_root)}")
return package_root
@ -40,6 +34,12 @@ def get_metagpt_root():
else:
# Fallback to package root if no environment variable is set
project_root = get_metagpt_package_root()
for i in (".git", ".project_root", ".gitignore"):
if (project_root / i).exists():
break
else:
project_root = Path.cwd()
return project_root
@ -151,4 +151,4 @@ METAGPT_REPORTER_DEFAULT_URL = os.environ.get("METAGPT_REPORTER_URL", "")
AGENT = "agent"
# SWE agent
SWE_SETUP_PATH = METAGPT_ROOT / "metagpt/tools/swe_agent_commands/setup_default.sh"
SWE_SETUP_PATH = get_metagpt_package_root() / "metagpt/tools/swe_agent_commands/setup_default.sh"

View file

@ -27,7 +27,7 @@ class MGXEnv(Environment):
def publish_message(self, message: Message, user_defined_recipient: str = "", publicer: str = "") -> bool:
"""let the team leader take over message publishing"""
tl = self.get_role("Tim") # TeamLeader's name is Tim
tl = self.get_role("Mike") # TeamLeader's name is Mike
if user_defined_recipient:
# human user's direct chat message to a certain role
@ -91,7 +91,7 @@ class MGXEnv(Environment):
async def reply_to_human(self, content: str, sent_from: Role = None) -> str:
# NOTE: Can be overwritten in remote setting
return content
return "The monitor has verified the message, confirmation acknowledged. Refrain from resending duplicate messages."
def message_within_software_sop(self, message: Message) -> bool:
# Engineer, QaEngineer can be end of the SOP. Their msg requires routing outside.

View file

@ -0,0 +1,6 @@
"""Experience pool init."""
from metagpt.exp_pool.manager import exp_manager
from metagpt.exp_pool.decorator import exp_cache
__all__ = ["exp_manager", "exp_cache"]

View file

@ -0,0 +1,7 @@
"""Context builders init."""
from metagpt.exp_pool.context_builders.base import BaseContextBuilder
from metagpt.exp_pool.context_builders.simple import SimpleContextBuilder
from metagpt.exp_pool.context_builders.role_zero import RoleZeroContextBuilder
__all__ = ["BaseContextBuilder", "SimpleContextBuilder", "RoleZeroContextBuilder"]

View file

@ -0,0 +1,30 @@
"""Action Node context builder."""
from typing import Any
from metagpt.exp_pool.context_builders.base import BaseContextBuilder
ACTION_NODE_CONTEXT_TEMPLATE = """
{req}
### Experiences
-----
{exps}
-----
## Instruction
Consider **Experiences** to generate a better answer.
"""
class ActionNodeContextBuilder(BaseContextBuilder):
async def build(self, req: Any) -> str:
"""Builds the action node context string.
If there are no experiences, returns the original `req`;
otherwise returns context with `req` and formatted experiences.
"""
exps = self.format_exps()
return ACTION_NODE_CONTEXT_TEMPLATE.format(req=req, exps=exps) if exps else req

View file

@ -0,0 +1,41 @@
"""Base context builder."""
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel, ConfigDict
from metagpt.exp_pool.schema import Experience
EXP_TEMPLATE = """Given the request: {req}, We can get the response: {resp}, which scored: {score}."""
class BaseContextBuilder(BaseModel, ABC):
model_config = ConfigDict(arbitrary_types_allowed=True)
exps: list[Experience] = []
@abstractmethod
async def build(self, req: Any) -> Any:
"""Build context from req.
Do not modify `req`. If modification is necessary, use copy.deepcopy to create a copy first.
"""
def format_exps(self) -> str:
"""Format experiences into a numbered list of strings.
Example:
1. Given the request: req1, We can get the response: resp1, which scored: 8.
2. Given the request: req2, We can get the response: resp2, which scored: 9.
Returns:
str: The formatted experiences as a string.
"""
result = []
for i, exp in enumerate(self.exps, start=1):
score_val = exp.metric.score.val if exp.metric and exp.metric.score else "N/A"
result.append(f"{i}. " + EXP_TEMPLATE.format(req=exp.req, resp=exp.resp, score=score_val))
return "\n".join(result)

View file

@ -0,0 +1,56 @@
"""RoleZero context builder."""
import copy
import re
from typing import Any
from metagpt.exp_pool.context_builders.base import BaseContextBuilder
class RoleZeroContextBuilder(BaseContextBuilder):
async def build(self, req: Any) -> list[dict]:
"""Builds the role zero context string.
Note:
1. The expected format for `req`, e.g., [{...}, {"role": "user", "content": "context"}].
2. Returns the original `req` if it is empty.
3. Creates a copy of req and replaces the example content in the copied req with actual experiences.
"""
if not req:
return req
exps = self.format_exps()
if not exps:
return req
req_copy = copy.deepcopy(req)
req_copy[-1]["content"] = self.replace_example_content(req_copy[-1].get("content", ""), exps)
return req_copy
def replace_example_content(self, text: str, new_example_content: str) -> str:
return self.replace_content_between_markers(text, "# Example", "# Instruction", new_example_content)
@staticmethod
def replace_content_between_markers(text: str, start_marker: str, end_marker: str, new_content: str) -> str:
"""Replace the content between `start_marker` and `end_marker` in the text with `new_content`.
Args:
text (str): The original text.
new_content (str): The new content to replace the old content.
start_marker (str): The marker indicating the start of the content to be replaced, such as '# Example'.
end_marker (str): The marker indicating the end of the content to be replaced, such as '# Instruction'.
Returns:
str: The text with the content replaced.
"""
pattern = re.compile(f"({start_marker}\n)(.*?)(\n{end_marker})", re.DOTALL)
def replacement(match):
return f"{match.group(1)}{new_content}\n{match.group(3)}"
replaced_text = pattern.sub(replacement, text)
return replaced_text

View file

@ -0,0 +1,26 @@
"""Simple context builder."""
from typing import Any
from metagpt.exp_pool.context_builders.base import BaseContextBuilder
SIMPLE_CONTEXT_TEMPLATE = """
## Context
### Experiences
-----
{exps}
-----
## User Requirement
{req}
## Instruction
Consider **Experiences** to generate a better answer.
"""
class SimpleContextBuilder(BaseContextBuilder):
async def build(self, req: Any) -> str:
return SIMPLE_CONTEXT_TEMPLATE.format(req=req, exps=self.format_exps())

View file

@ -0,0 +1,211 @@
"""Experience Decorator."""
import asyncio
import functools
from typing import Any, Callable, Optional, TypeVar
from pydantic import BaseModel, ConfigDict, model_validator
from metagpt.config2 import config
from metagpt.exp_pool.context_builders import BaseContextBuilder, SimpleContextBuilder
from metagpt.exp_pool.manager import ExperienceManager, exp_manager
from metagpt.exp_pool.perfect_judges import BasePerfectJudge, SimplePerfectJudge
from metagpt.exp_pool.schema import Experience, Metric, QueryType, Score
from metagpt.exp_pool.scorers import BaseScorer, SimpleScorer
from metagpt.exp_pool.serializers import BaseSerializer, SimpleSerializer
from metagpt.logs import logger
from metagpt.utils.async_helper import NestAsyncio
from metagpt.utils.exceptions import handle_exception
ReturnType = TypeVar("ReturnType")
def exp_cache(
_func: Optional[Callable[..., ReturnType]] = None,
query_type: QueryType = QueryType.SEMANTIC,
manager: Optional[ExperienceManager] = None,
scorer: Optional[BaseScorer] = None,
perfect_judge: Optional[BasePerfectJudge] = None,
context_builder: Optional[BaseContextBuilder] = None,
serializer: Optional[BaseSerializer] = None,
tag: Optional[str] = None,
):
"""Decorator to get a perfect experience, otherwise, it executes the function, and create a new experience.
Note:
1. This can be applied to both synchronous and asynchronous functions.
2. The function must have a `req` parameter, and it must be provided as a keyword argument.
3. If `config.exp_pool.enable_read` is False, the decorator will just directly execute the function.
4. If `config.exp_pool.enable_write` is False, the decorator will skip evaluating and saving the experience.
Args:
_func: Just to make the decorator more flexible, for example, it can be used directly with @exp_cache by default, without the need for @exp_cache().
query_type: The type of query to be used when fetching experiences.
manager: How to fetch, evaluate and save experience, etc. Default to `exp_manager`.
scorer: Evaluate experience. Default to `SimpleScorer()`.
perfect_judge: Determines if an experience is perfect. Defaults to `SimplePerfectJudge()`.
context_builder: Build the context from exps and the function parameters. Default to `SimpleContextBuilder()`.
serializer: Serializes the request and the function's return value for storage, deserializes the stored response back to the function's return value. Defaults to `SimpleSerializer()`.
tag: An optional tag for the experience. Default to `ClassName.method_name` or `function_name`.
"""
def decorator(func: Callable[..., ReturnType]) -> Callable[..., ReturnType]:
if not config.exp_pool.enable_read:
return func
@functools.wraps(func)
async def get_or_create(args: Any, kwargs: Any) -> ReturnType:
handler = ExpCacheHandler(
func=func,
args=args,
kwargs=kwargs,
query_type=query_type,
exp_manager=manager,
exp_scorer=scorer,
exp_perfect_judge=perfect_judge,
context_builder=context_builder,
serializer=serializer,
tag=tag,
)
await handler.fetch_experiences()
if exp := await handler.get_one_perfect_exp():
return exp
await handler.execute_function()
if config.exp_pool.enable_write:
await handler.process_experience()
return handler._raw_resp
return ExpCacheHandler.choose_wrapper(func, get_or_create)
return decorator(_func) if _func else decorator
class ExpCacheHandler(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
func: Callable
args: Any
kwargs: Any
query_type: QueryType = QueryType.SEMANTIC
exp_manager: Optional[ExperienceManager] = None
exp_scorer: Optional[BaseScorer] = None
exp_perfect_judge: Optional[BasePerfectJudge] = None
context_builder: Optional[BaseContextBuilder] = None
serializer: Optional[BaseSerializer] = None
tag: Optional[str] = None
_exps: list[Experience] = None
_req: str = ""
_resp: str = ""
_raw_resp: Any = None
_score: Score = None
@model_validator(mode="after")
def initialize(self):
"""Initialize default values for optional parameters if they are None.
This is necessary because the decorator might pass None, which would override the default values set by Field.
"""
self._validate_params()
self.exp_manager = self.exp_manager or exp_manager
self.exp_scorer = self.exp_scorer or SimpleScorer()
self.exp_perfect_judge = self.exp_perfect_judge or SimplePerfectJudge()
self.context_builder = self.context_builder or SimpleContextBuilder()
self.serializer = self.serializer or SimpleSerializer()
self.tag = self.tag or self._generate_tag()
self._req = self.serializer.serialize_req(**self.kwargs)
return self
async def fetch_experiences(self):
"""Fetch experiences by query_type."""
self._exps = await self.exp_manager.query_exps(self._req, query_type=self.query_type, tag=self.tag)
async def get_one_perfect_exp(self) -> Optional[Any]:
"""Get a potentially perfect experience, and resolve resp."""
for exp in self._exps:
if await self.exp_perfect_judge.is_perfect_exp(exp, self._req, *self.args, **self.kwargs):
logger.info(f"Get one perfect experience: {exp.req[:20]}...")
return self.serializer.deserialize_resp(exp.resp)
return None
async def execute_function(self):
"""Execute the function, and save resp."""
self._raw_resp = await self._execute_function()
self._resp = self.serializer.serialize_resp(self._raw_resp)
@handle_exception
async def process_experience(self):
"""Process experience.
Evaluates and saves experience.
Use `handle_exception` to ensure robustness, do not stop subsequent operations.
"""
await self.evaluate_experience()
self.save_experience()
async def evaluate_experience(self):
"""Evaluate the experience, and save the score."""
self._score = await self.exp_scorer.evaluate(self._req, self._resp)
def save_experience(self):
"""Save the new experience."""
exp = Experience(req=self._req, resp=self._resp, tag=self.tag, metric=Metric(score=self._score))
self.exp_manager.create_exp(exp)
@staticmethod
def choose_wrapper(func, wrapped_func):
"""Choose how to run wrapped_func based on whether the function is asynchronous."""
async def async_wrapper(*args, **kwargs):
return await wrapped_func(args, kwargs)
def sync_wrapper(*args, **kwargs):
NestAsyncio.apply_once()
return asyncio.get_event_loop().run_until_complete(wrapped_func(args, kwargs))
return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
def _validate_params(self):
if "req" not in self.kwargs:
raise ValueError("`req` must be provided as a keyword argument.")
def _generate_tag(self) -> str:
"""Generates a tag for the self.func.
"ClassName.method_name" if the first argument is a class instance, otherwise just "function_name".
"""
if self.args and hasattr(self.args[0], "__class__"):
cls_name = type(self.args[0]).__name__
return f"{cls_name}.{self.func.__name__}"
return self.func.__name__
async def _build_context(self) -> str:
self.context_builder.exps = self._exps
return await self.context_builder.build(self.kwargs["req"])
async def _execute_function(self):
self.kwargs["req"] = await self._build_context()
if asyncio.iscoroutinefunction(self.func):
return await self.func(*self.args, **self.kwargs)
return self.func(*self.args, **self.kwargs)

116
metagpt/exp_pool/manager.py Normal file
View file

@ -0,0 +1,116 @@
"""Experience Manager."""
from typing import TYPE_CHECKING, Any
from pydantic import BaseModel, ConfigDict
from metagpt.config2 import Config, config
from metagpt.exp_pool.schema import (
DEFAULT_COLLECTION_NAME,
DEFAULT_SIMILARITY_TOP_K,
Experience,
QueryType,
)
from metagpt.logs import logger
from metagpt.utils.exceptions import handle_exception
if TYPE_CHECKING:
from llama_index.vector_stores.chroma import ChromaVectorStore
class ExperienceManager(BaseModel):
"""ExperienceManager manages the lifecycle of experiences, including CRUD and optimization.
Args:
config (Config): Configuration for managing experiences.
_storage (SimpleEngine): Engine to handle the storage and retrieval of experiences.
_vector_store (ChromaVectorStore): The actual place where vectors are stored.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
config: Config = config
_storage: Any = None
_vector_store: Any = None
@property
def storage(self):
if self._storage is None:
try:
from metagpt.rag.engines import SimpleEngine
from metagpt.rag.schema import ChromaRetrieverConfig, LLMRankerConfig
except ImportError:
raise ImportError("To use the experience pool, you need to install the rag module.")
retriever_configs = [
ChromaRetrieverConfig(
persist_path=self.config.exp_pool.persist_path,
collection_name=DEFAULT_COLLECTION_NAME,
similarity_top_k=DEFAULT_SIMILARITY_TOP_K,
)
]
ranker_configs = [LLMRankerConfig(top_n=DEFAULT_SIMILARITY_TOP_K)]
self._storage: SimpleEngine = SimpleEngine.from_objs(
retriever_configs=retriever_configs, ranker_configs=ranker_configs
)
logger.debug(f"exp_pool config: {self.config.exp_pool}")
return self._storage
@property
def vector_store(self):
if not self._vector_store:
self._vector_store: ChromaVectorStore = self.storage._retriever._vector_store
return self._vector_store
@handle_exception
def create_exp(self, exp: Experience):
"""Adds an experience to the storage if writing is enabled.
Args:
exp (Experience): The experience to add.
"""
if not self.config.exp_pool.enable_write:
return
self.storage.add_objs([exp])
@handle_exception(default_return=[])
async def query_exps(self, req: str, tag: str = "", query_type: QueryType = QueryType.SEMANTIC) -> list[Experience]:
"""Retrieves and filters experiences.
Args:
req (str): The query string to retrieve experiences.
tag (str): Optional tag to filter the experiences by.
query_type (QueryType): Default semantic to vector matching. exact to same matching.
Returns:
list[Experience]: A list of experiences that match the args.
"""
if not self.config.exp_pool.enable_read:
return []
nodes = await self.storage.aretrieve(req)
exps: list[Experience] = [node.metadata["obj"] for node in nodes]
# TODO: filter by metadata
if tag:
exps = [exp for exp in exps if exp.tag == tag]
if query_type == QueryType.EXACT:
exps = [exp for exp in exps if exp.req == req]
return exps
def get_exps_count(self) -> int:
"""Get the total number of experiences."""
return self.vector_store._collection.count()
exp_manager = ExperienceManager()

View file

@ -0,0 +1,6 @@
"""Perfect judges init."""
from metagpt.exp_pool.perfect_judges.base import BasePerfectJudge
from metagpt.exp_pool.perfect_judges.simple import SimplePerfectJudge
__all__ = ["BasePerfectJudge", "SimplePerfectJudge"]

View file

@ -0,0 +1,20 @@
"""Base perfect judge."""
from abc import ABC, abstractmethod
from pydantic import BaseModel, ConfigDict
from metagpt.exp_pool.schema import Experience
class BasePerfectJudge(BaseModel, ABC):
model_config = ConfigDict(arbitrary_types_allowed=True)
@abstractmethod
async def is_perfect_exp(self, exp: Experience, serialized_req: str, *args, **kwargs) -> bool:
"""Determine whether the experience is perfect.
Args:
exp (Experience): The experience to evaluate.
serialized_req (str): The serialized request to compare against the experience's request.
"""

View file

@ -0,0 +1,27 @@
"""Simple perfect judge."""
from pydantic import ConfigDict
from metagpt.exp_pool.perfect_judges.base import BasePerfectJudge
from metagpt.exp_pool.schema import MAX_SCORE, Experience
class SimplePerfectJudge(BasePerfectJudge):
model_config = ConfigDict(arbitrary_types_allowed=True)
async def is_perfect_exp(self, exp: Experience, serialized_req: str, *args, **kwargs) -> bool:
"""Determine whether the experience is perfect.
Args:
exp (Experience): The experience to evaluate.
serialized_req (str): The serialized request to compare against the experience's request.
Returns:
bool: True if the serialized request matches the experience's request and the experience's score is perfect, False otherwise.
"""
if not exp.metric or not exp.metric.score:
return False
return serialized_req == exp.req and exp.metric.score.val == MAX_SCORE

View file

@ -0,0 +1,72 @@
"""Experience schema."""
from enum import Enum
from typing import Optional
from pydantic import BaseModel, Field
MAX_SCORE = 10
DEFAULT_COLLECTION_NAME = "experience_pool"
DEFAULT_SIMILARITY_TOP_K = 2
class QueryType(str, Enum):
"""Type of query experiences."""
EXACT = "exact"
SEMANTIC = "semantic"
class ExperienceType(str, Enum):
"""Experience Type."""
SUCCESS = "success"
FAILURE = "failure"
INSIGHT = "insight"
class EntryType(Enum):
"""Experience Entry Type."""
AUTOMATIC = "Automatic"
MANUAL = "Manual"
class Score(BaseModel):
"""Score in Metric."""
val: int = Field(default=1, description="Value of the score, Between 1 and 10, higher is better.")
reason: str = Field(default="", description="Reason for the value.")
class Metric(BaseModel):
"""Experience Metric."""
time_cost: float = Field(default=0.000, description="Time cost, the unit is milliseconds.")
money_cost: float = Field(default=0.000, description="Money cost, the unit is US dollars.")
score: Score = Field(default=None, description="Score, with value and reason.")
class Trajectory(BaseModel):
"""Experience Trajectory."""
plan: str = Field(default="", description="The plan.")
action: str = Field(default="", description="Action for the plan.")
observation: str = Field(default="", description="Output of the action.")
reward: int = Field(default=0, description="Measure the action.")
class Experience(BaseModel):
"""Experience."""
req: str = Field(..., description="")
resp: str = Field(..., description="The type is string/json/code.")
metric: Optional[Metric] = Field(default=None, description="Metric.")
exp_type: ExperienceType = Field(default=ExperienceType.SUCCESS, description="The type of experience.")
entry_type: EntryType = Field(default=EntryType.AUTOMATIC, description="Type of entry: Manual or Automatic.")
tag: str = Field(default="", description="Tagging experience.")
traj: Optional[Trajectory] = Field(default=None, description="Trajectory.")
def rag_key(self):
return self.req

View file

@ -0,0 +1,6 @@
"""Scorers init."""
from metagpt.exp_pool.scorers.base import BaseScorer
from metagpt.exp_pool.scorers.simple import SimpleScorer
__all__ = ["BaseScorer", "SimpleScorer"]

View file

@ -0,0 +1,15 @@
"""Base scorer."""
from abc import ABC, abstractmethod
from pydantic import BaseModel, ConfigDict
from metagpt.exp_pool.schema import Score
class BaseScorer(BaseModel, ABC):
model_config = ConfigDict(arbitrary_types_allowed=True)
@abstractmethod
async def evaluate(self, req: str, resp: str) -> Score:
"""Evaluates the quality of a response relative to a given request."""

View file

@ -0,0 +1,65 @@
"""Simple scorer."""
import json
from pydantic import Field
from metagpt.exp_pool.schema import Score
from metagpt.exp_pool.scorers.base import BaseScorer
from metagpt.llm import LLM
from metagpt.provider.base_llm import BaseLLM
from metagpt.utils.common import CodeParser
SIMPLE_SCORER_TEMPLATE = """
Role: You are a highly efficient assistant, tasked with evaluating a response to a given request. The response is generated by a large language model (LLM).
I will provide you with a request and a corresponding response. Your task is to assess this response and provide a score from a human perspective.
## Context
### Request
{req}
### Response
{resp}
## Format Example
```json
{{
"val": "the value of the score, int from 1 to 10, higher is better.",
"reason": "an explanation supporting the score."
}}
```
## Instructions
- Understand the request and response given by the user.
- Evaluate the response based on its quality relative to the given request.
- Provide a score from 1 to 10, where 10 is the best.
- Provide a reason supporting your score.
## Constraint
Format: Just print the result in json format like **Format Example**.
## Action
Follow instructions, generate output and make sure it follows the **Constraint**.
"""
class SimpleScorer(BaseScorer):
llm: BaseLLM = Field(default_factory=LLM)
async def evaluate(self, req: str, resp: str) -> Score:
"""Evaluates the quality of a response relative to a given request, as scored by an LLM.
Args:
req (str): The request.
resp (str): The response.
Returns:
Score: An object containing the score (1-10) and the reasoning.
"""
prompt = SIMPLE_SCORER_TEMPLATE.format(req=req, resp=resp)
resp = await self.llm.aask(prompt)
resp_json = json.loads(CodeParser.parse_code(resp, lang="json"))
return Score(**resp_json)

View file

@ -0,0 +1,9 @@
"""Serializers init."""
from metagpt.exp_pool.serializers.base import BaseSerializer
from metagpt.exp_pool.serializers.simple import SimpleSerializer
from metagpt.exp_pool.serializers.action_node import ActionNodeSerializer
from metagpt.exp_pool.serializers.role_zero import RoleZeroSerializer
__all__ = ["BaseSerializer", "SimpleSerializer", "ActionNodeSerializer", "RoleZeroSerializer"]

View file

@ -0,0 +1,36 @@
"""ActionNode Serializer."""
from __future__ import annotations
from typing import TYPE_CHECKING, Type
# Import ActionNode only for type checking to avoid circular imports
if TYPE_CHECKING:
from metagpt.actions.action_node import ActionNode
from metagpt.exp_pool.serializers.simple import SimpleSerializer
class ActionNodeSerializer(SimpleSerializer):
def serialize_resp(self, resp: ActionNode) -> str:
return resp.instruct_content.model_dump_json()
def deserialize_resp(self, resp: str) -> ActionNode:
"""Customized deserialization, it will be triggered when a perfect experience is found.
ActionNode cannot be serialized, it throws an error 'cannot pickle 'SSLContext' object'.
"""
class InstructContent:
def __init__(self, json_data):
self.json_data = json_data
def model_dump_json(self):
return self.json_data
from metagpt.actions.action_node import ActionNode
action_node = ActionNode(key="", expected_type=Type[str], instruction="", example="")
action_node.instruct_content = InstructContent(resp)
return action_node

View file

@ -0,0 +1,29 @@
"""Base serializer."""
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel, ConfigDict
class BaseSerializer(BaseModel, ABC):
model_config = ConfigDict(arbitrary_types_allowed=True)
@abstractmethod
def serialize_req(self, **kwargs) -> str:
"""Serializes the request for storage.
Do not modify kwargs. If modification is necessary, use copy.deepcopy to create a copy first.
Note that copy.deepcopy may raise errors, such as TypeError: cannot pickle '_thread.RLock' object.
"""
@abstractmethod
def serialize_resp(self, resp: Any) -> str:
"""Serializes the function's return value for storage.
Do not modify resp. The rest is the same as `serialize_req`.
"""
@abstractmethod
def deserialize_resp(self, resp: str) -> Any:
"""Deserializes the stored response back to the function's return value"""

View file

@ -0,0 +1,58 @@
"""RoleZero Serializer."""
import copy
import json
from metagpt.exp_pool.serializers.simple import SimpleSerializer
class RoleZeroSerializer(SimpleSerializer):
def serialize_req(self, **kwargs) -> str:
"""Serialize the request for database storage, ensuring it is a string.
Only extracts the necessary content from `req` because `req` may be very lengthy and could cause embedding errors.
Args:
req (list[dict]): The request to be serialized. Example:
[
{"role": "user", "content": "..."},
{"role": "assistant", "content": "..."},
{"role": "user", "content": "context"},
]
Returns:
str: The serialized request as a JSON string.
"""
req = kwargs.get("req", [])
if not req:
return ""
filtered_req = self._filter_req(req)
if state_data := kwargs.get("state_data"):
filtered_req.append({"role": "user", "content": state_data})
return json.dumps(filtered_req)
def _filter_req(self, req: list[dict]) -> list[dict]:
"""Filter the `req` to include only necessary items.
Args:
req (list[dict]): The original request.
Returns:
list[dict]: The filtered request.
"""
filtered_req = [copy.deepcopy(item) for item in req if self._is_useful_content(item["content"])]
return filtered_req
def _is_useful_content(self, content: str) -> bool:
"""Currently, only the content of the file is considered, and more judgments can be added later."""
if "Command Editor.read executed: file_path" in content:
return True
return False

View file

@ -0,0 +1,22 @@
"""Simple Serializer."""
from typing import Any
from metagpt.exp_pool.serializers.base import BaseSerializer
class SimpleSerializer(BaseSerializer):
def serialize_req(self, **kwargs) -> str:
"""Just use `str` to convert the request object into a string."""
return str(kwargs.get("req", ""))
def serialize_resp(self, resp: Any) -> str:
"""Just use `str` to convert the response object into a string."""
return str(resp)
def deserialize_resp(self, resp: str) -> Any:
"""Just return the string response as it is."""
return resp

View file

@ -5,7 +5,7 @@
import json
import re
from unidiff import PatchedFile, PatchSet
from unidiff import PatchSet
from metagpt.actions.action import Action
from metagpt.ext.cr.utils.cleaner import (
@ -19,6 +19,7 @@ from metagpt.utils.common import parse_json_code_block
CODE_REVIEW_PROMPT_TEMPLATE = """
NOTICE
Let's think and work step by step.
With the given pull-request(PR) Patch, and referenced Points(Code Standards), you should compare each point with the code one-by-one.
The Patch code has added line number at the first character each line for reading, but the review should focus on new added code inside the `Patch` (lines starting with line number and '+').
@ -52,12 +53,13 @@ CodeReview guidelines:
- Don't suggest to add docstring unless it's necessary indeed.
- If the same code error occurs multiple times, it cannot be omitted, and all places need to be identified.But Don't duplicate at the same place with the same comment!
- Every line of code in the patch needs to be carefully checked, and laziness cannot be omitted. It is necessary to find out all the places.
- The `comment` and `point_id` in the Output must correspond to and belong to the same one `Point`.
Just print the PR Patch comments in json format like **Output Format**.
"""
CODE_REVIEW_COMFIRM_SYSTEM_PROMPT = """
You are a professional engineer with Java stack, and good at code review comment result judgement.
You are a professional engineer with {code_language} stack, and good at code review comment result judgement.Let's think and work step by step.
"""
CODE_REVIEW_COMFIRM_TEMPLATE = """
@ -76,7 +78,7 @@ CODE_REVIEW_COMFIRM_TEMPLATE = """
## Your Task:
1. First, check if the code meets the requirements and does not violate any defects. If it meets the requirements and does not violate any defects, print `False` and do not proceed with further judgment.
2. If the check in step 1 does not print `False`, proceed to further judgment. Based on the "Reference Example for Judgment" provided, determine if the "Code" and "Code Review Comments" match. If they match, print `True`; otherwise, print `False`.
2. Based on the `Reference Example for Judgment` provided, determine if the `Code` and `Code Review Comments` match. If they match, print `True`; otherwise, print `False`.
Note: Your output should only be `True` or `False` without any explanations.
"""
@ -89,25 +91,35 @@ class CodeReview(Action):
new_comments = []
logger.debug(f"original comments: {comments}")
for cmt in comments:
for p in points:
if int(cmt.get("point_id", -1)) == p.id:
code_start_line = cmt.get("code_start_line")
code_end_line = cmt.get("code_end_line")
code = get_code_block_from_patch(patch, code_start_line, code_end_line)
try:
if cmt.get("commented_file").endswith(".py"):
points = [p for p in points if p.language == "Python"]
elif cmt.get("commented_file").endswith(".java"):
points = [p for p in points if p.language == "Java"]
else:
continue
for p in points:
point_id = int(cmt.get("point_id", -1))
if point_id == p.id:
code_start_line = cmt.get("code_start_line")
code_end_line = cmt.get("code_end_line")
code = get_code_block_from_patch(patch, code_start_line, code_end_line)
new_comments.append(
{
"commented_file": cmt.get("commented_file"),
"code": code,
"code_start_line": code_start_line,
"code_end_line": code_end_line,
"comment": cmt.get("comment"),
"point_id": p.id,
"point": p.text,
"point_detail": p.detail,
}
)
break
new_comments.append(
{
"commented_file": cmt.get("commented_file"),
"code": code,
"code_start_line": code_start_line,
"code_end_line": code_end_line,
"comment": cmt.get("comment"),
"point_id": p.id,
"point": p.text,
"point_detail": p.detail,
}
)
break
except Exception:
pass
logger.debug(f"new_comments: {new_comments}")
return new_comments
@ -132,51 +144,49 @@ class CodeReview(Action):
code = get_code_block_from_patch(
patch, str(max(1, int(code_start_line) - 5)), str(int(code_end_line) + 5)
)
code_language = "Java"
code_file_ext = cmt.get("commented_file", ".java").split(".")[-1]
if code_file_ext == ".java":
code_language = "Java"
elif code_file_ext == ".py":
code_language = "Python"
prompt = CODE_REVIEW_COMFIRM_TEMPLATE.format(
code=code,
comment=cmt.get("comment"),
desc=point.text,
example=point.yes_example + "\n" + point.no_example,
)
resp = await self.llm.aask(prompt, system_msgs=[CODE_REVIEW_COMFIRM_SYSTEM_PROMPT])
system_prompt = [CODE_REVIEW_COMFIRM_SYSTEM_PROMPT.format(code_language=code_language)]
resp = await self.llm.aask(prompt, system_msgs=system_prompt)
if "True" in resp or "true" in resp:
new_comments.append(cmt)
logger.info(f"original comments num: {len(comments)}, confirmed comments num: {len(new_comments)}")
return new_comments
async def cr_by_full_points(self, patch: PatchSet, points: list[Point]):
async def cr_by_points(self, patch: PatchSet, points: list[Point]):
comments = []
points_str = "\n".join([f"{p.id} {p.text}" for p in points])
for patched_file in patch:
if not patched_file:
continue
if patched_file.path.endswith(".py"):
points_str = "\n".join([f"{p.id} {p.text}" for p in points if p.language == "Python"])
points = [p for p in points if p.language == "Python"]
elif patched_file.path.endswith(".java"):
points_str = "\n".join([f"{p.id} {p.text}" for p in points if p.language == "Java"])
points = [p for p in points if p.language == "Java"]
else:
continue
if len(str(patched_file).splitlines()) >= 50:
cr_by_segment_points_comments = await self.cr_by_segment_points(
patched_file=patched_file, points=points
)
comments += cr_by_segment_points_comments
continue
prompt = CODE_REVIEW_PROMPT_TEMPLATE.format(patch=str(patched_file), points=points_str)
resp = await self.llm.aask(prompt)
json_str = parse_json_code_block(resp)[0]
comments += json.loads(json_str)
return comments
async def cr_by_segment_points(self, patched_file: PatchedFile, points: list[Point]):
comments = []
group_points = [points[i : i + 3] for i in range(0, len(points), 3)]
for group_point in group_points:
points_str = "\n".join([f"{p.id} {p.text}" for p in group_point])
prompt = CODE_REVIEW_PROMPT_TEMPLATE.format(patch=str(patched_file), points=points_str)
resp = await self.llm.aask(prompt)
json_str = parse_json_code_block(resp)[0]
comments_batch = json.loads(json_str)
comments += comments_batch
group_points = [points[i : i + 3] for i in range(0, len(points), 3)]
for group_point in group_points:
points_str = "id description\n"
points_str += "\n".join([f"{p.id} {p.text}" for p in group_point])
prompt = CODE_REVIEW_PROMPT_TEMPLATE.format(patch=str(patched_file), points=points_str)
resp = await self.llm.aask(prompt)
json_str = parse_json_code_block(resp)[0]
comments_batch = json.loads(json_str)
if comments_batch:
patched_file_path = patched_file.path
for c in comments_batch:
c["commented_file"] = patched_file_path
comments += comments_batch
return comments
@ -185,7 +195,7 @@ class CodeReview(Action):
patch: PatchSet = add_line_num_on_patch(patch)
result = []
comments = await self.cr_by_full_points(patch=patch, points=points)
comments = await self.cr_by_points(patch=patch, points=points)
if len(comments) != 0:
comments = self.format_comments(comments, points, patch)
comments = await self.confirm_comments(patch=patch, comments=comments, points=points)

View file

@ -81,17 +81,18 @@ class ModifyCode(Action):
}
resp = None
for patched_file in patch:
patch_target_file_name = str(patched_file.target_file).split("/", maxsplit=1)[-1]
if patch_target_file_name not in grouped_comments:
patch_target_file_name = str(patched_file.path).split("/")[-1]
if patched_file.path not in grouped_comments:
continue
comments_prompt = ""
index = 1
for grouped_comment in grouped_comments[patch_target_file_name]:
for grouped_comment in grouped_comments[patched_file.path]:
comments_prompt += f"""
<comment{index}>
{grouped_comment}
</comment{index}>\n
"""
index += 1
prompt = MODIFY_CODE_PROMPT.format(patch=patched_file, comments=comments_prompt)
output_dir = (
Path(output_dir)

File diff suppressed because it is too large Load diff

View file

@ -1,44 +1,26 @@
CMD_PROMPT = """
# Data Structure
class Task(BaseModel):
task_id: str = ""
dependent_task_ids: list[str] = []
instruction: str = ""
task_type: str = ""
assignee: str = "David"
from metagpt.strategy.task_type import TaskType
# Available Commands
{available_commands}
# Current Plan
{plan_status}
# Example
{example}
# Instructions
Based on the context, write a plan or modify an existing plan to achieve the goal. A plan consists of one to 3 tasks.
If plan is created, you should track the progress and update the plan accordingly, such as finish_current_task, append_task, reset_task, replace_task, etc.
Pay close attention to new user message, review the conversation history, use reply_to_human to respond to new user requirement.
Note:
1. If you keeping encountering errors, unexpected situation, or you are not sure of proceeding, use ask_human to ask for help.
2. Carefully review your progress at the current task, if your actions so far has not fulfilled the task instruction, you should continue with current task. Otherwise, finish current task.
3. Each time you finish a task, use reply_to_human to report your progress.
Pay close attention to the Example provided, you can reuse the example for your current situation if it fits.
You may use any of the available commands to create a plan or update the plan. You may output mutiple commands, they will be executed sequentially.
If you finish current task, you will automatically take the next task in the existing plan, use finish_task, DON'T append a new task.
# Your commands in a json array, in the following output format, always output a json array, if there is nothing to do, use the pass command:
Some text indicating your thoughts, such as how you should update the plan status, respond to inquiry, or seek for help. Then a json array of commands.
```json
[
{{
"command_name": str,
"args": {{"arg_name": arg_value, ...}}
}},
...
]
```
Notice: your output JSON data section must start with **```json [**
BROWSER_INSTRUCTION = """
4. Carefully choose to use or not use the browser tool to assist you in web tasks.
- When no click action is required, no need to use the browser tool to navigate to the webpage before scraping.
- If you need detail HTML content, write code to get it but not to use the browser tool.
- Make sure the command_name are certainly in Available Commands when you use the browser tool.
"""
TASK_TYPE_DESC = "\n".join([f"- **{tt.type_name}**: {tt.value.desc}" for tt in TaskType])
CODE_STATUS = """
**Code written**:
{code}
**Execution status**: {status}
**Execution result**: {result}
"""
BROWSER_INFO = """
Here are ordered web actions in the browser environment, note that you can not use the browser tool in the current environment.
{browser_actions}
The latest url is the one you should use to view the page. If view page has been done, directly use the variable and html content in executing result.
"""

View file

@ -11,6 +11,7 @@ EXTRA_INSTRUCTION = """
11. Write out EVERY CODE DETAIL, DON'T LEAVE TODO.
12. To modify code in a file, read the entire file, make changes, and update the file with the complete code, ensuring that no line numbers are included in the final write.
13. When a system design or project schedule is provided, at the end of the plan, add a CodeRview Task for each file; for example, if there are three files, add three CodeRview Tasks. For each CodeRview Task, just call ReviewAndRewriteCode.run.
14. When you are making plan.it is hightly recommand to plan all the coding plan and reviews plan in first response.
"""

View file

@ -5,10 +5,12 @@ When presented a current task, tackle the task using the available commands.
Pay close attention to new user message, review the conversation history, use RoleZero.reply_to_human to respond to new user requirement.
Note:
1. If you keeping encountering errors, unexpected situation, or you are not sure of proceeding, use RoleZero.ask_human to ask for help.
2. Carefully review your progress at the current task, if your actions so far has not fulfilled the task instruction, you should continue with current task. Otherwise, finish current task.
2. Carefully review your progress at the current task, if your actions so far has not fulfilled the task instruction, you should continue with current task. Otherwise, finish current task by Plan.finish_current_task explicitly.
3. Each time you finish a task, use RoleZero.reply_to_human to report your progress.
4. Don't forget to append task first when all existing tasks are finished and new tasks are required.
5. Avoid repeating tasks you have already completed. And end loop when all requirements are met.
"""
# To ensure compatibility with hard-coded experience, do not add any other content between "# Example" and "# Available Commands".
CMD_PROMPT = """
# Data Structure
class Task(BaseModel):
@ -17,6 +19,9 @@ class Task(BaseModel):
instruction: str = ""
task_type: str = ""
assignee: str = ""
# Available Task Types
{task_type_desc}
# Available Commands
{available_commands}
@ -37,9 +42,10 @@ Special Command: Use {{"command_name": "end"}} to do nothing or indicate complet
Pay close attention to the Example provided, you can reuse the example for your current situation if it fits.
You may use any of the available commands to create a plan or update the plan. You may output mutiple commands, they will be executed sequentially.
If you finish current task, you will automatically take the next task in the existing plan, use Plan.finish_task, DON'T append a new task.
Pay close attention to what you have done. Be different with your previous action.
# Your commands in a json array, in the following output format. If there is nothing to do, use the pass or end command:
Some text indicating your thoughts, such as how you should update the plan status, respond to inquiry, or seek for help. Then a json array of commands. You must output ONE and ONLY ONE json array. DON'T output multiple json arrays with thoughts between them.
# Your commands in a json array, in the following output format with correct command_name and args. If there is nothing to do, use the pass or end command:
Some text indicating your thoughts before JSON is required, such as what tasks have been completed, what tasks are next, how you should update the plan status, respond to inquiry, or seek for help. Then a json array of commands. You must output ONE and ONLY ONE json array. DON'T output multiple json arrays with thoughts between them.
```json
[
{{
@ -49,6 +55,7 @@ Some text indicating your thoughts, such as how you should update the plan statu
...
]
```
Notice: your output JSON data must be a command list.
Notice: your output JSON data section must start with **```json [**
"""
@ -58,9 +65,9 @@ JSON_REPAIR_PROMPT = """
## Output Format
```json
Formatted JSON data
```
Help check if there are any formatting issues with the JSON data? If so, please help format it
Help check if there are any formatting issues with the JSON data? If so, please help format it.
"""
QUICK_THINK_PROMPT = """

View file

@ -7,7 +7,7 @@ https://github.com/princeton-nlp/SWE-agent/tree/main/config/configs
SWE_AGENT_SYSTEM_TEMPLATE = """
SETTING: You are an autonomous programmer, and you're working directly in the environment line with a special interface.
The special interface consists of a file editor that shows you {WINDOW} lines of a file at a time.
The special interface consists of a file editor that shows you 100 lines of a file at a time.
Please note that THE EDIT COMMAND REQUIRES PROPER INDENTATION. Pay attention to the original indentation when replacing the function.
If you'd like to add the line ' print(x)' you must fully write that out, with all those spaces before the code! Indentation is important and code that is not indented correctly will fail and require fixing before it can be run.
@ -50,7 +50,8 @@ MINIMAL_EXAMPLE = """
## Example of a actions trajectory
User Requirement and Issue: Fix the bug in the repo. Because the environment is not available, you DO NOT need to run and modify any existing test case files or add new test case files to ensure that the bug is fixed.
### Read and understand issue(Require):
### Read and understand issue:
Thought: Firstly, I need to review the detailed information of this issue in order to understand the problem that needs fixing.
{{
"command_name": "Browser.goto",
"args": {{
@ -60,43 +61,99 @@ User Requirement and Issue: Fix the bug in the repo. Because the environment is
->
### Locate issue(Require): Locate the issue in the code by searching for the relevant file, function, or class and open the file to view the code.
Thought: I need to come under the repo path
{{
"command_name": "Bash.run",
"args": {{
"cmd": "cd /workspace/django__django_3.0"
"cmd": "cd /workspace/MetaGPT"
}}
}}
->
Bash.run(cmd='search_dir_and_preview ASCIIUsernameValidator')
Thought: Let's start by locating the `openai_api.py` file.\nFirst, let's search for the `openai_api.py` file.
{{
"command_name": "Bash.run",
"args": {{
"cmd": "open /workspace/django__django_3.0/django/contrib/auth/validators.py"
"cmd": "find_file 'openai_api.py'"
}}
}}
->
Thought: We have located both the `openai_api.py` file. Let's start by opening the `openai_api.py` file to apply the necessary changes.",
{{
"command_name": "Bash.run",
"args": {{
"cmd": "open '/workspace/MetaGPT/provider/openai_api.py'"
}}
}}
->
### Fix the Bug(Require): Fix the bug in the code by editing the relevant function, class or code snippet.
Thought: Now that I've found the bug, let's fix it by edit.
{{
"command_name": "Bash.run",
"args": {{
"cmd": "edit 10:20 <<EOF\n regex = r'\A[\w.@+-]+\Z'\n message = _( \n 'Enter a valid username. This value may contain only English letters, ' \n 'numbers, and @/./+/-/_ characters.'\n )\n flags = re.ASCII\n\n@deconstructible\nclass UnicodeUsernameValidator(validators.RegexValidator):\n regex = r'\A[\w.@+-]+\Z'\nEOF"
"cmd": "edit 93:95 <<EOF\n usage = None\n collected_messages = []\n async for chunk in response:\n if chunk.usage is not None:\n usage = CompletionUsage(**chunk.usage)\n chunk_message = chunk.choices[0].delta.content or '' if chunk.choices else '' # extract the message\n finish_reason = (\n chunk.choices[0].finish_reason if chunk.choices and hasattr(chunk.choices[0], 'finish_reason') else None\n )\n log_llm_stream(chunk_message)\nEOF"
}}
}}
->
Thought: Due to a syntax error related to an undefined name 'Image', we need to address this issue even though it is not directly related to our work. Let's try importing the package to fix it.
{{
"command_name": "Bash.run",
"args": {{
"cmd": "edit 14:14 <<EOF\nfrom PIL.Image import Image\nEOF"
}}
}}
->
### Submit the Changes(Require): Submit the changes to the repository.
### Save the Changes (Required): After all changes have been made, save them to the repository.
> You must choose one of the following two methods.
#### Just save the changes locally, it only need one action.
Thought: The bug has been fixed. Let's submit the changes.
{{
"command_name": "Bash.run",
"args": {{
"cmd": "submit"
}}
}}
Bash.run(cmd='submit')
->
#### Save the changes and commit them to the remote repository.
##### Push the changes from the local repository to the remote repository.
Thought: All changes have been saved, let's push the code to the remote repository.
{{
"command_name": "end",
"command_name": "git_push",
"args": {{
"local_path": "/workspace/MetaGPT",
"app_name": "github",
"comments": "Fix Issue #1275: produced TypeError: openai.types.completion_usage.CompletionUsage() argument after ** must be a mapping, not NoneType"",
"new_branch": "test-fix"
}}
}}
->
##### Create a pull request (Optional): Merge the changes from the new branch into the master branch.
Thought: Now that the changes have been pushed to the remote repository, due to the user's requirement, let's create a pull request to merge the changes into the master branch.
[{{
"command_name": "git_create_pull",
"args": {{
"base": "master",
"head": "test-fix",
"base_repo_name": "garylin2099/MetaGPT",
"head_repo_name": "seeker-jie/MetaGPT",
"app_name": "github",
"title": "Fix Issue #1275: produced TypeError: openai.types.completion_usage.CompletionUsage() argument after ** must be a mapping, not NoneType"",
"body": "This pull request addresses issue #1275 by ensuring that chunk.usage is not None before passing it to CompletionUsage."
}}
}}]
->
### Finally
Thought: All task has been done, let's end the conversation.
{{
"command_name": "end"
}}
"""
@ -162,7 +219,10 @@ IMPORTANT_TIPS = """
- If a search command fails, modify the search criteria and check for typos or incorrect paths, then try again.
- Based on feedback of observation or bash command in trajectory to guide adjustments in your search strategy.
13. If the task results in succeed, fail, or NO PROGRESS, output `submit`.
13. Save the code change:
- If you need to submit changes to the remote repository, first use the regular git commit command to save the changes locally, then select a command from the `Available Commands: [git_push, git_create_pull]` to submit the changes to the remote repository.
- If you don't need to submit code changes to the remote repository. use the command Bash.run('submit') to commit the changes locally.
14. If provided an issue link, you MUST go to the issue page using Browser tool to understand the issue before starting your fix.
@ -185,5 +245,4 @@ The current bash state is:
(Current directory: {{working_dir}})
Avoid repeating the same command. Instead, please think about the current situation and provide the next bash command to execute in JSON format:"
"""

View file

@ -28,7 +28,10 @@ your code
```
"""
REFLECTION_SYSTEM_MSG = """You are an AI Python assistant. You will be given your previous implementation code of a task, runtime error results, and a hint to change the implementation appropriately. Write your full implementation."""
REFLECTION_SYSTEM_MSG = """
You are an AI Python assistant. You will be given your previous implementation code of a task, runtime error results, and a hint to change the implementation appropriately. Write your full implementation.
When occuring ModuleNotFoundError, always install the required package. And use Terminal tool if available.
"""
DEBUG_REFLECTION_EXAMPLE = '''
[previous impl]:

View file

@ -53,3 +53,9 @@ The current task is about converting image into webpage code. please note the fo
- Single-Step Code Generation: Execute the entire code generation process in a single step, encompassing HTML, CSS, and JavaScript. Avoid fragmenting the code generation into multiple separate steps to maintain consistency and simplify the development workflow.
- Save webpages: Be sure to use the save method provided.
"""
# Prompt for taking on "web_scraping" tasks
WEB_SCRAPING_PROMPT = """
- Remember to view and print the necessary HTML content in a separate task to understand the structure first before scraping data. Such as `html_content = await view_page_element_to_scrape(...)\nprint(html_content)`.
- Since the data required by user may not correspond directly to the actual HTML element names, you should thoroughly analyze the HTML structure and meanings of all elements in the executing result first. Ensure the `class_` in your code should derived from the actual HTML structure directly, not based on your knowledge. To ensure it, analyse the most suitable location of the 'class_' in the actual HTML content before code.
"""

View file

@ -1,151 +1,111 @@
from __future__ import annotations
import json
from typing import Literal
import re
from typing import List
from pydantic import model_validator
from pydantic import Field, model_validator
from metagpt.actions import Action
from metagpt.actions.di.execute_nb_code import ExecuteNbCode
from metagpt.actions.di.write_analysis_code import WriteAnalysisCode
from metagpt.logs import logger
from metagpt.prompts.di.data_analyst import CMD_PROMPT
from metagpt.prompts.di.role_zero import JSON_REPAIR_PROMPT
from metagpt.roles.di.data_interpreter import DataInterpreter
from metagpt.schema import Message, TaskResult
from metagpt.strategy.experience_retriever import KeywordExpRetriever
from metagpt.strategy.planner import Planner
from metagpt.strategy.thinking_command import (
Command,
prepare_command_prompt,
run_commands,
)
from metagpt.tools.tool_recommend import BM25ToolRecommender
from metagpt.utils.common import CodeParser
from metagpt.utils.report import ThoughtReporter
from metagpt.utils.repair_llm_raw_output import repair_llm_raw_output, RepairType
from metagpt.prompts.di.data_analyst import BROWSER_INSTRUCTION, TASK_TYPE_DESC, CODE_STATUS, BROWSER_INFO
from metagpt.prompts.di.role_zero import ROLE_INSTRUCTION
from metagpt.roles.di.role_zero import RoleZero
from metagpt.schema import TaskResult, Message
from metagpt.strategy.experience_retriever import ExpRetriever, KeywordExpRetriever
from metagpt.tools.tool_recommend import BM25ToolRecommender, ToolRecommender
from metagpt.tools.tool_registry import register_tool
class DataAnalyst(DataInterpreter):
@register_tool(include_functions=["write_and_exec_code"])
class DataAnalyst(RoleZero):
name: str = "David"
profile: str = "DataAnalyst"
goal: str = "Take on any data-related tasks, such as data analysis, machine learning, deep learning, web browsing, web scraping, web searching, web deployment, terminal operation, git and github operation, etc."
react_mode: Literal["react"] = "react"
max_react_loop: int = 20 # used for react mode
task_result: TaskResult = None
available_commands: list[Command] = [
Command.APPEND_TASK,
Command.RESET_TASK,
Command.REPLACE_TASK,
Command.FINISH_CURRENT_TASK,
# Command.PUBLISH_MESSAGE,
Command.ASK_HUMAN,
Command.REPLY_TO_HUMAN,
# Command.PASS,
]
commands: list[dict] = [] # issued commands to be executed
user_requirement: str = ""
instruction: str = ROLE_INSTRUCTION + BROWSER_INSTRUCTION
task_type_desc: str = TASK_TYPE_DESC
tools: list[str] = ["Plan", "DataAnalyst", "RoleZero", "Browser"]
custom_tools: list[str] = ["machine learning", "web scraping", "Terminal"]
custom_tool_recommender: ToolRecommender = None
experience_retriever: ExpRetriever = KeywordExpRetriever()
use_reflection: bool = True
write_code: WriteAnalysisCode = Field(default_factory=WriteAnalysisCode, exclude=True)
execute_code: ExecuteNbCode = Field(default_factory=ExecuteNbCode, exclude=True)
@model_validator(mode="after")
def set_plan_and_tool(self) -> "DataInterpreter":
# We force using this parameter for DataAnalyst
assert self.react_mode == "react"
assert self.auto_run
assert self.use_plan
def set_custom_tool(self):
if self.custom_tools and not self.custom_tool_recommender:
self.custom_tool_recommender = BM25ToolRecommender(tools=self.custom_tools)
# Roughly the same part as DataInterpreter.set_plan_and_tool
self._set_react_mode(react_mode=self.react_mode, max_react_loop=self.max_react_loop, auto_run=self.auto_run)
if self.tools and not self.tool_recommender:
self.tool_recommender = BM25ToolRecommender(tools=self.tools)
self.set_actions([WriteAnalysisCode])
def _update_tool_execution(self):
self.tool_execution_map.update({
"DataAnalyst.write_and_exec_code": self.write_and_exec_code,
})
# HACK: Init Planner, control it through dynamic thinking; Consider formalizing as a react mode
self.planner = Planner(goal="", working_memory=self.rc.working_memory, auto_run=True)
async def parse_browser_actions(self, memory: List[Message]) -> List[Message]:
memory = await super().parse_browser_actions(memory)
browser_actions = []
for index, msg in enumerate(memory):
if msg.cause_by == "browser":
browser_url = re.search('URL: (.*?)\\n', msg.content).group(1)
pattern = re.compile(r"Command Browser\.(\w+) executed")
browser_actions.append({
'command': pattern.match(memory[index - 1].content).group(1),
'current url': browser_url
})
if browser_actions:
browser_actions = BROWSER_INFO.format(browser_actions=browser_actions)
self.rc.working_memory.add(Message(content=browser_actions, role="user", cause_by="browser"))
return memory
return self
async def write_and_exec_code(self):
"""Write a code block for current task and execute it in an interactive notebook environment."""
counter = 0
success = False
await self.execute_code.init_code()
async def _think(self) -> bool:
"""Useful in 'react' mode. Use LLM to decide whether and what to do next."""
self._set_state(0)
example = ""
if not self.planner.plan.goal:
self.user_requirement = self.get_memories()[-1].content
self.planner.plan.goal = self.user_requirement
example = KeywordExpRetriever().retrieve(self.user_requirement)
# plan info
plan_status = self.planner.get_plan_status()
plan_status = self.planner.plan.model_dump(include=["goal", "tasks"])
# for task in plan_status["tasks"]:
# task.pop("code")
# task.pop("result")
prompt = CMD_PROMPT.format(
plan_status=plan_status,
example=example,
available_commands=prepare_command_prompt(self.available_commands),
)
context = self.llm.format_msg(self.working_memory.get() + [Message(content=prompt, role="user")])
# print(*context, sep="\n" + "*" * 5 + "\n")
async with ThoughtReporter(enable_llm_stream=True):
rsp = await self.llm.aask(context)
# tool info
if self.custom_tool_recommender:
plan = self.planner.plan
fixed = ["Terminal"] if "Terminal" in self.custom_tools else None
tool_info = await self.custom_tool_recommender.get_recommended_tool_info(fixed=fixed, plan=plan)
else:
tool_info = ""
# 临时方案待role zero的版本完成可将本注释内的代码直接替换掉
# -------------开始---------------
try:
commands = CodeParser.parse_code(block=None, lang="json", text=rsp)
commands = json.loads(repair_llm_raw_output(output=commands, req_keys=[None], repair_type=RepairType.JSON))
except json.JSONDecodeError as e:
commands = await self.llm.aask(msg=JSON_REPAIR_PROMPT.format(json_data=rsp))
commands = json.loads(CodeParser.parse_code(block=None, lang="json", text=commands))
except Exception as e:
tb = traceback.format_exc()
print(tb)
while not success and counter < 3:
### write code ###
logger.info(f"ready to WriteAnalysisCode")
use_reflection = (counter > 0 and self.use_reflection) # only use reflection after the first trial
# 为了对LLM不按格式生成进行容错
if isinstance(commands, dict):
commands = commands["commands"] if "commands" in commands else [commands]
# -------------结束---------------
code = await self.write_code.run(
user_requirement=self.planner.plan.goal,
plan_status=plan_status,
tool_info=tool_info,
working_memory=self.rc.working_memory.get(),
use_reflection=use_reflection,
)
self.rc.working_memory.add(Message(content=code, role="assistant", cause_by=WriteAnalysisCode))
self.rc.working_memory.add(Message(content=rsp, role="assistant"))
await run_commands(self, commands, self.rc.working_memory)
return bool(self.rc.todo)
### execute code ###
result, success = await self.execute_code.run(code)
print(result)
async def _act(self) -> Message:
"""Useful in 'react' mode. Return a Message conforming to Role._act interface."""
logger.info(f"ready to take on task {self.planner.plan.current_task}")
self.rc.working_memory.add(Message(content=result, role="user", cause_by=ExecuteNbCode))
# TODO: Consider an appropriate location to insert task experience formally
experience = KeywordExpRetriever().retrieve(self.planner.plan.current_task.instruction, exp_type="task")
if experience and experience not in [msg.content for msg in self.rc.working_memory.get()]:
exp_msg = Message(content=experience, role="assistant")
self.rc.working_memory.add(exp_msg)
### process execution result ###
counter += 1
if success:
task_result = TaskResult(code=code, result=result, is_success=success)
self.planner.current_task.update_task_result(task_result)
code, result, is_success = await self._write_and_exec_code()
self.planner.plan.current_task.is_success = (
is_success # mark is_success, determine is_finished later in thinking
)
# FIXME: task result is always overwritten by the last act, whereas it can be made of of multiple acts
self.task_result = TaskResult(code=code, result=result, is_success=is_success)
return Message(content="Task completed", role="assistant", sent_from=self._setting, cause_by=WriteAnalysisCode)
async def _react(self) -> Message:
# NOTE: Diff 1: Each time landing here means observing news, set todo to allow news processing in _think
self._set_state(0)
actions_taken = 0
rsp = Message(content="No actions taken yet", cause_by=Action) # will be overwritten after Role _act
while actions_taken < self.rc.max_react_loop:
# NOTE: Diff 2: Keep observing within _react, news will go into memory, allowing adapting to new info
# add news from self._observe, the one called in self.run, consider removing when switching from working_memory to memory
self.working_memory.add_batch(self.rc.news)
await self._observe()
# add news from this self._observe, we need twice because _observe rewrites rc.news
self.working_memory.add_batch(self.rc.news)
# think
has_todo = await self._think()
if not has_todo:
break
# act
logger.debug(f"{self._setting}: {self.rc.state=}, will do {self.rc.todo}")
rsp = await self._act()
actions_taken += 1
return rsp # return output from the last action
status = 'Success' if success else 'Failed'
output = CODE_STATUS.format(code=code, status=status, result=result)
if success:
output += 'The code written has been executed successfully.'
self.rc.working_memory.clear()
return output

View file

@ -10,6 +10,9 @@ from pydantic import model_validator
from metagpt.actions import Action, UserRequirement
from metagpt.actions.di.run_command import RunCommand
from metagpt.exp_pool import exp_cache
from metagpt.exp_pool.context_builders import RoleZeroContextBuilder
from metagpt.exp_pool.serializers import RoleZeroSerializer
from metagpt.logs import logger
from metagpt.prompts.di.role_zero import (
CMD_PROMPT,
@ -41,6 +44,7 @@ class RoleZero(Role):
system_msg: list[str] = None # Use None to conform to the default value at llm.aask
cmd_prompt: str = CMD_PROMPT
instruction: str = ROLE_INSTRUCTION
task_type_desc: str = None
# React Mode
react_mode: Literal["react"] = "react"
@ -142,27 +146,50 @@ class RoleZero(Role):
tool_info = json.dumps({tool.name: tool.schemas for tool in tools})
### Make Decision Dynamically ###
instruction = self.instruction.strip()
prompt = self.cmd_prompt.format(
plan_status=plan_status,
current_task=current_task,
example=example,
available_commands=tool_info,
instruction=self.instruction.strip(),
task_type_desc=self.task_type_desc,
plan_status=plan_status,
current_task=current_task,
instruction=instruction,
)
memory = self.rc.memory.get(self.memory_k)
memory = await self.parse_browser_actions(memory)
req = self.llm.format_msg(memory + [UserMessage(content=prompt)])
async with ThoughtReporter(enable_llm_stream=True) as reporter:
await reporter.async_report({"type": "react"})
state_data = dict(
plan_status=plan_status,
current_task=current_task,
instruction=instruction,
)
self.command_rsp = await self.llm_cached_aask(req=req, system_msgs=self.system_msg, state_data=state_data)
self.rc.memory.add(AIMessage(content=self.command_rsp))
return True
@exp_cache(context_builder=RoleZeroContextBuilder(), serializer=RoleZeroSerializer())
async def llm_cached_aask(self, *, req: list[dict], system_msgs: list[str], **kwargs) -> str:
"""Use `exp_cache` to automatically manage experiences.
The `RoleZeroContextBuilder` attempts to add experiences to `req`.
The `RoleZeroSerializer` extracts essential parts of `req` for the experience pool, trimming lengthy entries to retain only necessary parts.
"""
return await self.llm.aask(req, system_msgs=system_msgs)
async def parse_browser_actions(self, memory: List[Message]) -> List[Message]:
if not self.browser.is_empty_page:
pattern = re.compile(r"Command Browser\.(\w+) executed")
for index, msg in zip(range(len(memory), 0, -1), memory[::-1]):
if pattern.match(msg.content):
memory.insert(index, UserMessage(cause_by="browser", content=await self.browser.view()))
break
context = self.llm.format_msg(memory + [UserMessage(content=prompt)])
# print(*context, sep="\n" + "*" * 5 + "\n")
async with ThoughtReporter(enable_llm_stream=True):
self.command_rsp = await self.llm.aask(context, system_msgs=self.system_msg)
self.rc.memory.add(AIMessage(content=self.command_rsp))
return True
return memory
async def _act(self) -> Message:
if self.use_fixed_sop:
@ -217,12 +244,13 @@ class RoleZero(Role):
# routing
memory = self.get_memories(k=4)
context = self.llm.format_msg(memory + [UserMessage(content=QUICK_THINK_PROMPT)])
# print(context)
rsp = await self.llm.aask(context)
if "yes" in rsp.lower():
# llm call with the original context
answer = await self.llm.aask(self.llm.format_msg(memory))
async with ThoughtReporter(enable_llm_stream=True) as reporter:
await reporter.async_report({"type": "quick"})
answer = await self.llm.aask(self.llm.format_msg(memory))
self.rc.memory.add(AIMessage(content=answer, cause_by=RunCommand))
await self.reply_to_human(content=answer)
rsp_msg = AIMessage(
@ -246,6 +274,8 @@ class RoleZero(Role):
"""
try:
commands = CodeParser.parse_code(block=None, lang="json", text=self.command_rsp)
if commands.endswith("]") and not commands.startswith("["):
commands = "[" + commands
commands = json.loads(repair_llm_raw_output(output=commands, req_keys=[None], repair_type=RepairType.JSON))
except json.JSONDecodeError:
logger.warning(f"Failed to parse JSON for: {self.command_rsp}. Trying to repair...")
@ -266,13 +296,15 @@ class RoleZero(Role):
async def _run_commands(self, commands) -> str:
outputs = []
for cmd in commands:
output = f"Command {cmd['command_name']} executed"
# handle special command first
if await self._run_special_command(cmd):
if self._is_special_command(cmd):
special_command_output = await self._run_special_command(cmd)
outputs.append(output + ":" + special_command_output)
continue
# run command as specified by tool_execute_map
if cmd["command_name"] in self.tool_execution_map:
tool_obj = self.tool_execution_map[cmd["command_name"]]
output = f"Command {cmd['command_name']} executed"
try:
if inspect.iscoroutinefunction(tool_obj):
tool_output = await tool_obj(**cmd["args"])
@ -293,19 +325,24 @@ class RoleZero(Role):
return outputs
async def _run_special_command(self, cmd) -> bool:
def _is_special_command(self, cmd) -> bool:
return cmd["command_name"] in self.special_tool_commands
async def _run_special_command(self, cmd) -> str:
"""command requiring special check or parsing"""
is_special_cmd = cmd["command_name"] in self.special_tool_commands
command_output = ""
if cmd["command_name"] == "Plan.finish_current_task" and not self.planner.plan.is_plan_finished():
# task_result = TaskResult(code=str(commands), result=outputs, is_success=is_success)
# self.planner.plan.current_task.update_task_result(task_result=task_result)
self.planner.plan.finish_current_task()
command_output = "Current task is finished. "
elif cmd["command_name"] == "end":
self._set_state(-1)
command_output = "Everything Done"
return is_special_cmd
return command_output
def _get_plan_status(self) -> Tuple[str, str]:
plan_status = self.planner.plan.model_dump(include=["goal", "tasks"])

View file

@ -1,5 +1,4 @@
import json
import os
from pydantic import Field
@ -10,6 +9,7 @@ from metagpt.prompts.di.swe_agent import (
SWE_AGENT_SYSTEM_TEMPLATE,
)
from metagpt.roles.di.role_zero import RoleZero
from metagpt.tools.libs.git import git_create_pull, git_push
from metagpt.tools.libs.terminal import Bash
@ -17,50 +17,46 @@ class SWEAgent(RoleZero):
name: str = "Swen"
profile: str = "Issue Solver"
goal: str = "Resolve GitHub issue"
_bash_window_size: int = 100
_system_msg: str = SWE_AGENT_SYSTEM_TEMPLATE
system_msg: list[str] = [_system_msg.format(WINDOW=_bash_window_size)]
system_msg: str = [SWE_AGENT_SYSTEM_TEMPLATE]
_instruction: str = NEXT_STEP_TEMPLATE
tools: list[str] = ["Bash", "Browser:goto,scroll"]
tools: list[str] = [
"Bash",
"Browser:goto,scroll",
"RoleZero",
"git_push",
"git_create_pull",
]
terminal: Bash = Field(default_factory=Bash, exclude=True)
output_diff: str = ""
max_react_loop: int = 40
run_eval: bool = False
async def _think(self) -> bool:
self._update_system_msg()
self._format_instruction()
await self._format_instruction()
res = await super()._think()
if self.run_eval:
await self._parse_commands_for_eval()
return res
def _update_system_msg(self):
"""
Sets the system message for the SWE agent.
def _update_tool_execution(self):
self.tool_execution_map.update(
{
"Bash.run": self.terminal.run,
"git_push": git_push,
"git_create_pull": git_create_pull,
}
)
Sets the `_bash_window_size` from the environment variable `WINDOW` if it exists.
Formats the `_system_msg` template with the current `_bash_window_size`.
"""
if os.getenv("WINDOW"):
self._bash_window_size = int(os.getenv("WINDOW"))
self.system_msg = [self._system_msg.format(WINDOW=self._bash_window_size)]
def _format_instruction(self):
async def _format_instruction(self):
"""
Formats the instruction message for the SWE agent.
Runs the "state" command in the terminal, parses its output as JSON,
and uses it to format the `_instruction` template.
"""
state_output = self.terminal.run("state")
state_output = await self.terminal.run("state")
bash_state = json.loads(state_output)
self.instruction = self._instruction.format(
WINDOW=self._bash_window_size, examples=MINIMAL_EXAMPLE, **bash_state
).strip()
return self.instruction
self.instruction = self._instruction.format(**bash_state).strip()
async def _parse_commands_for_eval(self):
"""
@ -81,7 +77,7 @@ class SWEAgent(RoleZero):
if "end" != cmd.get("command_name", ""):
return
try:
diff_output = self.terminal.run("git diff --cached")
diff_output = await self.terminal.run("git diff --cached")
clear_diff = extract_patch(diff_output)
logger.info(f"Diff output: \n{clear_diff}")
if clear_diff:
@ -90,8 +86,5 @@ class SWEAgent(RoleZero):
except Exception as e:
logger.error(f"Error during submission: {e}")
def _update_tool_execution(self):
self.tool_execution_map.update({"Bash.run": self.terminal.run})
def _retrieve_experience(self) -> str:
return MINIMAL_EXAMPLE

View file

@ -15,7 +15,7 @@ from metagpt.tools.tool_registry import register_tool
@register_tool(include_functions=["publish_team_message"])
class TeamLeader(RoleZero):
name: str = "Tim"
name: str = "Mike"
profile: str = "Team Leader"
goal: str = "Manage a team to assist users"
system_msg: list[str] = [SYSTEM_PROMPT]

View file

@ -464,8 +464,8 @@ class Task(BaseModel):
self.is_finished = False
def update_task_result(self, task_result: TaskResult):
self.code = task_result.code
self.result = task_result.result
self.code = self.code + "\n" + task_result.code
self.result = self.result + "\n" + task_result.result
self.is_success = task_result.is_success
@ -669,10 +669,14 @@ class Plan(BaseModel):
"""
return [task for task in self.tasks if task.is_finished]
def append_task(self, task_id: str, dependent_task_ids: list[str], instruction: str, assignee: str):
def append_task(self, task_id: str, dependent_task_ids: list[str], instruction: str, assignee: str, task_type: str = ""):
"""Append a new task with task_id (number) to the end of existing task sequences. If dependent_task_ids is not empty, the task will depend on the tasks with the ids in the list."""
new_task = Task(
task_id=task_id, dependent_task_ids=dependent_task_ids, instruction=instruction, assignee=assignee
task_id=task_id,
dependent_task_ids=dependent_task_ids,
instruction=instruction,
assignee=assignee,
task_type=task_type
)
return self._append_task(new_task)

View file

@ -520,7 +520,7 @@ Explanation: The requirement is about software development. Assign each tasks to
{
"command_name": "RoleZero.reply_to_human",
"args": {
"content": "I have assigned the tasks to the team members. Alice will create the PRD, Bob will design the software architecture, Eve will break down the architecture into tasks, Alex will implement the core game logic, and Edward will write comprehensive tests. The team will work on the project accordingly",
"content": "I have assigned the tasks to the team members. Alice will create the PRD, Bob will design the software architecture, Eve will break down the architecture into tasks, Alex will implement the core game logic, and Edward will write comprehensive tests. The team will work on the project accordingly"
}
},
{
@ -585,7 +585,7 @@ Explanation: You received a message from Alice, the Product Manager, that she ha
{
"command_name": "RoleZero.reply_to_human",
"args": {
"content": "Alice has completed the PRD. I have marked her task as finished and sent the PRD to Bob. Bob will work on the software architecture.",
"content": "Alice has completed the PRD. I have marked her task as finished and sent the PRD to Bob. Bob will work on the software architecture."
}
},
{
@ -602,7 +602,7 @@ Explanation: The user is asking for a general update on the project status. Give
{
"command_name": "RoleZero.reply_to_human",
"args": {
"content": "The team is currently working on ... We have completed ...",
"content": "The team is currently working on ... We have completed ..."
}
},
{
@ -610,6 +610,17 @@ Explanation: The user is asking for a general update on the project status. Give
}
]
```
## example 4
OBSERVATION : current task is none and all task is finished.
Explanation: Last task is "Plan.finish_current_task" or 'RoleZero.reply_to_human' and now the current task is none, it means everything is done.Just coutput command "end".
```json
[
{
"command_name": "end"
}
]
```
"""
@ -629,6 +640,10 @@ class KeywordExpRetriever(ExpRetriever):
return DEPLOY_EXAMPLE
elif "issue" in context.lower():
return FIX_ISSUE_EXAMPLE
elif "https:" in context.lower() or "http:" in context.lower():
if "search" in context.lower() or "click" in context.lower():
return WEB_SCRAPING_EXAMPLE
return WEB_SCRAPING_EXAMPLE_SIMPLE
elif exp_type == "task":
if "diagnose" in context.lower():
return SEARCH_SYMBOL_EXAMPLE
@ -789,13 +804,13 @@ Explanation: I will first need to read the system design document and the projec
{
"command_name": "Editor.read",
"args": {
"path": "/tmp/docs/project_schedule.json"
"path": "/tmp/project_schedule.json"
}
},
{
"command_name": "Editor.read",
"args": {
"path": "/tmp/docs/system_design.json"
"path": "/tmp/system_design.json"
}
}
]
@ -890,3 +905,177 @@ Explanation: to review the code, call ReviewAndRewriteCode.run.
]
```
"""
WEB_SCRAPING_EXAMPLE = """
## action 1
User Requirement: Scrap and list the restaurant names of first page by searching for the keyword `beef` on the website https://www.yelp.com/.
Explanation: The requirement is to scrape data from a website and extract information about restaurants. The process involves searching for restaurants with a specific keyword, retrieving and presenting the data in a structured format.
```json
[
{
"command_name": "Plan.append_task",
"args": {
"task_id": "1",
"dependent_task_ids": [],
"instruction": "Navigate to the yelp website.",
"assignee": "David"
}
},
{
"command_name": "Plan.append_task",
"args": {
"task_id": "2",
"dependent_task_ids": ["1"],
"instruction": "Search for restaurants with the keyword 'beef'.",
"assignee": "David"
}
},
{
"command_name": "Plan.append_task",
"args": {
"task_id": "3",
"dependent_task_ids": ["2"],
"instruction": "View the html content of the search result page before scrap data to understand the structure.",
"assignee": "David"
}
},
{
"command_name": "Plan.append_task",
"args": {
"task_id": "4",
"dependent_task_ids": ["3"],
"instruction": "Parse the html content to scrape the restaurant names and print it.",
"assignee": "David"
}
}
]
```
## action 2
Explanation: To search for restaurants, I will now go to the website https://www.yelp.com/ first.
```json
[
{
"command_name": "Browser.goto",
"args": {
"url": "https://www.yelp.com/"
}
}
]
```
## action 3
Explanation: Since the Browser has successfully navigated to the website, and I find that the element id of the search box is 53. I will finish the current task and then use the Browser tool to type the keyword `beef` in the search box and press enter.
```json
[
{
"command_name": "Plan.finish_current_task",
"args": {}
},
{
"command_name": "Browser.type",
"args": {
"element_id": 53,
"content": "beef",
"press_enter_after": true
}
}
]
```
## action 4
Explanation: Since the Browser has successfully search the keyword `beef`, I will finish the current task and then write code to view the html content of the page.
```json
[
{
"command_name": "Plan.finish_current_task",
"args": {}
},
{
"command_name": "DataAnalyst.write_and_exec_code",
"args": {}
}
]
```
## action 5
Explanation: Since I has successfully viewed the html content in the context, I will first finish the current task and then write code to parse the html content and extract the restaurant names.
```json
[
{
"command_name": "Plan.finish_current_task",
"args": {}
},
{
"command_name": "DataAnalyst.write_and_exec_code",
"args": {}
}
]
...
"""
WEB_SCRAPING_EXAMPLE_SIMPLE = """
## action 1
User Requirement: List the restaurant names on the website https://www.yelp.com/search?find_desc=beef&find_loc=New+York%2C+NY.
Explanation: The requirement is to scrape data from a website and extract information about restaurants. The process involves retrieving and presenting the data in a structured format.
```json
[
{
"command_name": "Plan.append_task",
"args": {
"task_id": "1",
"dependent_task_ids": [],
"instruction": "View the html content of the page before scrap data to understand the structure.",
"assignee": "David"
}
},
{
"command_name": "Plan.append_task",
"args": {
"task_id": "2",
"dependent_task_ids": ["1"],
"instruction": "Parse the html content to scrape the restaurant names and print it.",
"assignee": "David"
}
}
]
```
## action 2
Explanation: To scrap data from the website, I will first view the html content of the page.
```json
[
{
"command_name": "DataAnalyst.write_and_exec_code",
"args": {}
}
]
```
## action 3
Explanation: Since I has successfully viewed the html content in the context, I will first finish the current task and then write code to parse the html content and extract the restaurant names.
```json
[
{
"command_name": "Plan.finish_current_task",
"args": {}
},
{
"command_name": "DataAnalyst.write_and_exec_code",
"args": {}
}
]
```
...
"""

View file

@ -40,8 +40,14 @@ PLAN_STATUS = """
## Current Task
{current_task}
## Finished Section of Current Task
### code
{current_task_code}
### execution result
{current_task_result}
## Task Guidance
Write complete code for 'Current Task'. And avoid duplicating code from 'Finished Tasks', such as repeated import of packages, reading data, etc.
Write code for the incomplete sections of 'Current Task'. And avoid duplicating code from 'Finished Tasks' and 'Finished Section of Current Task', such as repeated import of packages, reading data, etc.
Specifically, {guidance}
"""
@ -173,6 +179,8 @@ class Planner(BaseModel):
code_written=code_written,
task_results=task_results,
current_task=self.current_task.instruction,
current_task_code=self.current_task.code if self.current_task.code else "",
current_task_result=self.current_task.result if self.current_task.result else "",
guidance=guidance,
)

View file

@ -39,7 +39,7 @@ class NaiveSolver(BaseSolver):
self.graph.topological_sort()
for key in self.graph.execution_order:
op = self.graph.nodes[key]
await op.fill(self.context, self.llm, mode="root")
await op.fill(req=self.context, llm=self.llm, mode="root")
class TOTSolver(BaseSolver):

View file

@ -8,7 +8,7 @@ from metagpt.prompts.task_type import (
FEATURE_ENGINEERING_PROMPT,
IMAGE2WEBPAGE_PROMPT,
MODEL_EVALUATE_PROMPT,
MODEL_TRAIN_PROMPT,
MODEL_TRAIN_PROMPT, WEB_SCRAPING_PROMPT,
)
@ -62,6 +62,7 @@ class TaskType(Enum):
WEBSCRAPING = TaskTypeDef(
name="web scraping",
desc="For scraping data from web pages.",
guidance=WEB_SCRAPING_PROMPT,
)
EMAIL_LOGIN = TaskTypeDef(
name="email login",

View file

@ -5,11 +5,11 @@
# @File : __init__.py
# @Desc :
from metagpt.tools.libs import (
# data_preprocess,
# feature_engineering,
data_preprocess,
feature_engineering,
sd_engine,
gpt_v_generator,
# web_scraping,
web_scraping,
# email_login,
terminal,
editor,
@ -20,11 +20,11 @@ from metagpt.tools.libs import (
from metagpt.tools.libs.env import get_env, set_get_env_entry, default_get_env, get_env_description
_ = (
# data_preprocess,
# feature_engineering,
data_preprocess,
feature_engineering,
sd_engine,
gpt_v_generator,
# web_scraping,
web_scraping,
# email_login,
terminal,
editor,

View file

@ -24,6 +24,7 @@ from metagpt.utils.a11y_tree import (
scroll_page,
type_text,
)
from metagpt.utils.proxy_env import get_proxy_from_env
from metagpt.utils.report import BrowserReporter
@ -72,7 +73,7 @@ class Browser:
self.page: Optional[Page] = None
self.accessibility_tree: list = []
self.headless: bool = True
self.proxy = None
self.proxy = get_proxy_from_env()
self.is_empty_page = True
self.reporter = BrowserReporter()
@ -120,7 +121,7 @@ class Browser:
await scroll_page(self.page, direction)
return await self._wait_page()
async def goto(self, url: str, timeout: float = 30000):
async def goto(self, url: str, timeout: float = 90000):
"""Navigate to a specific URL."""
if self.page is None:
await self.start()
@ -161,7 +162,7 @@ class Browser:
await self._wait_until_page_idle(page)
self.accessibility_tree = await get_accessibility_tree(page)
await self.reporter.async_report(page, "page")
return f"SUCCESS, URL: {page.url}"
return f"SUCCESS, URL: {page.url} have been loaded."
def _register_page_event(self, page: Page):
page.last_busy_time = time.time()

View file

@ -3,6 +3,7 @@ from pathlib import Path
from typing import Optional
import aiofiles
from bs4 import BeautifulSoup
from unidiff import PatchSet
import metagpt.ext.cr
@ -29,7 +30,7 @@ class CodeReview:
Args:
patch_path: The local path of the patch file or the url of the pull request. Example: "/data/xxx-pr-1.patch", "https://github.com/xx/XX/pull/1362"
cr_output_file: Output file path where code review comments will be saved. Example: "cr/xxx-pr-1.json"
cr_point_file: File path for specifying code review points. Defaults to a predefined file.
cr_point_file: File path for specifying code review points. If not specified, this parameter is not passed..
"""
patch = await self._get_patch_content(patch_path)
cr_point_file = cr_point_file if cr_point_file else Path(metagpt.ext.cr.__file__).parent / "points.json"
@ -45,7 +46,7 @@ class CodeReview:
)
comments = await CodeReview_().run(patch, cr_points)
cr_output_path.parent.mkdir(exist_ok=True, parents=True)
async with aiofiles.open(cr_output_path, "w") as f:
async with aiofiles.open(cr_output_path, "w", encoding="utf-8") as f:
await f.write(json.dumps(comments, ensure_ascii=False))
await reporter.async_report(cr_output_path)
@ -65,7 +66,7 @@ class CodeReview:
output_dir: File path where code review comments are stored.
"""
patch = await self._get_patch_content(patch_path)
async with aiofiles.open(cr_file, "r") as f:
async with aiofiles.open(cr_file, "r", encoding="utf-8") as f:
comments = json.loads(await f.read())
await ModifyCode(pr="").run(patch, comments, output_dir)
return f"The fixed patch files store in {output_dir}"
@ -75,12 +76,14 @@ class CodeReview:
# async with aiohttp.ClientSession(trust_env=True) as client:
# async with client.get(f"{patch_path}.diff", ) as resp:
# patch_file_content = await resp.text()
browser = Browser()
browser.proxy = {"server": "http://127.0.0.1:20172"}
async with browser:
async with Browser() as browser:
await browser.goto(f"{patch_path}.diff")
patch_file_content = await browser.page.content()
if patch_file_content.startswith("<html>"):
soup = BeautifulSoup(patch_file_content, "html.parser")
pre = soup.find("pre")
if pre:
patch_file_content = pre.text
else:
async with aiofiles.open(patch_path) as f:
patch_file_content = await f.read()

View file

@ -38,7 +38,7 @@ class Editor:
# self.resource.report(path, "path")
def read(self, path: str) -> FileBlock:
"""Read the whole content of a file."""
"""Read the whole content of a file. It is strongly advised to utilize absolute paths"""
with open(path, "r") as f:
self.resource.report(path, "path")
lines = f.readlines()

View file

@ -11,10 +11,10 @@ from github.PullRequest import PullRequest
from metagpt.tools.tool_registry import register_tool
@register_tool(tags=["software development", "git", "Commit the changes and push to remote git repository."])
@register_tool(tags=["software development", "git", "Push to remote git repository."])
async def git_push(
local_path: Union[str, Path],
access_token: str,
app_name: str,
comments: str = "Commit",
new_branch: str = "",
) -> "GitBranch":
@ -22,38 +22,41 @@ async def git_push(
Pushes changes from a local Git repository to its remote counterpart.
Args:
local_path (Union[str, Path]): The path to the local Git repository.
access_token (str): The access token for authentication. Use `get_env` to get access token.
comments (str, optional): The commit message to use. Defaults to "Commit".
local_path (Union[str, Path]): The absolute path to the local Git repository.
app_name (str): The name of the platform hosting the repository (e.g., "github", "gitlab", "bitbucket").
comments (str, optional): Comments to be associated with the push. Defaults to "Commit".
new_branch (str, optional): The name of the new branch to create and push changes to.
If not provided, changes will be pushed to the current branch. Defaults to "".
Returns:
GitBranch: The branch to which the changes were pushed.
Raises:
ValueError: If the provided local_path does not point to a valid Git repository.
Example:
>>> url = "https://github.com/iorisa/snake-game.git"
>>> local_path = await git_clone(url=url)
>>> from metagpt.tools.libs import get_env
>>> access_token = await get_env(key="access_token", app_name="github") # Read access token from enviroment variables.
>>> comments = "Archive"
>>> app_name = "github"
>>> comments = "Commit"
>>> new_branch = "feature/new"
>>> branch = await git_push(local_path=local_path, access_token=access_token, comments=comments, new_branch=new_branch)
>>> branch = await git_push(local_path=local_path, app_name=app_name, comments=comments, new_branch=new_branch)
>>> base = branch.base
>>> head = branch.head
>>> repo_name = branch.repo_name
>>> print(f"base branch:'{base}', head branch:'{head}', repo_name:'{repo_name}'")
base branch:'master', head branch:'feature/new', repo_name:'iorisa/snake-game'
"""
from metagpt.tools.libs import get_env
from metagpt.utils.git_repository import GitRepository
if not GitRepository.is_git_dir(local_path):
raise ValueError("Invalid local git repository")
repo = GitRepository(local_path=local_path, auto_init=False)
# Read access token from environment variables.
access_token = await get_env(key="access_token", app_name=app_name)
branch = await repo.push(new_branch=new_branch, comments=comments, access_token=access_token)
return branch
@ -62,9 +65,9 @@ async def git_push(
async def git_create_pull(
base: str,
head: str,
app_name: str,
base_repo_name: str,
access_token: str,
head_repo_name: Optional[str] = None,
head_repo_name: str = None,
title: Optional[str] = None,
body: Optional[str] = None,
issue: Optional[Issue] = None,
@ -73,46 +76,16 @@ async def git_create_pull(
Creates a pull request on a Git repository. Use this tool in priority over Browser to create a pull request.
Args:
base (str): The base branch of the pull request.
head (str): The head branch of the pull request.
base_repo_name (str): The full repository name (user/repo) where the pull request will be created.
access_token (str): The access token for authentication. Use `get_env` to get access token.
head_repo_name (Optional[str], optional): The full repository name (user/repo) where the pull request will merge from. Defaults to None.
title (Optional[str], optional): The title of the pull request. Defaults to None.
body (Optional[str], optional): The body of the pull request. Defaults to None.
issue (Optional[Issue], optional): The related issue of the pull request. Defaults to None.
base (str): The name of the base branch where the pull request will be merged.
head (str): The name of the branch that contains the changes for the pull request.
app_name (str): The name of the platform hosting the repository (e.g., "github", "gitlab", "bitbucket").
base_repo_name (str): The full name of the target repository (in the format "user/repo") where the pull request will be created.
head_repo_name (Optional[str]): The full name of the source repository (in the format "user/repo") from which the changes will be pulled.
title (Optional[str]): The title of the pull request. Defaults to None.
body (Optional[str]): The description or body content of the pull request. Defaults to None.
issue (Optional[Issue]): An optional issue related to the pull request. Defaults to None.
Example:
>>> # push and create pull
>>> url = "https://github.com/iorisa/snake-game.git"
>>> local_path = await git_clone(url=url)
>>> from metagpt.tools.libs import get_env
>>> access_token = await get_env(key="access_token", app_name="github")
>>> comments = "Archive"
>>> new_branch = "feature/new"
>>> branch = await git_push(local_path=local_path, access_token=access_token, comments=comments, new_branch=new_branch)
>>> base = branch.base
>>> head = branch.head
>>> repo_name = branch.repo_name
>>> print(f"base branch:'{base}', head branch:'{head}', repo_name:'{repo_name}'")
base branch:'master', head branch:'feature/new', repo_name:'iorisa/snake-game'
>>> title = "feat: modify http lib",
>>> body = "Change HTTP library used to send requests"
>>> pr = await git_create_pull(
>>> base_repo_name=repo_name,
>>> base=base,
>>> head=head,
>>> title=title,
>>> body=body,
>>> access_token=access_token,
>>> )
>>> if isinstance(pr, PullRequest):
>>> print(pr)
PullRequest("feat: modify http lib")
>>> if isinstance(pr, str):
>>> print(f"Visit this url to create a new pull request: '{pr}'")
Visit this url to create a new pull request: 'https://github.com/iorisa/snake-game/compare/master...feature/new'
>>> # create pull request
>>> base_repo_name = "geekan/MetaGPT"
>>> head_repo_name = "ioris/MetaGPT"
@ -120,8 +93,7 @@ async def git_create_pull(
>>> head = "feature/http"
>>> title = "feat: modify http lib",
>>> body = "Change HTTP library used to send requests"
>>> from metagpt.tools.libs import get_env
>>> access_token = await get_env(key="access_token", app_name="github")
>>> app_name = "github"
>>> pr = await git_create_pull(
>>> base_repo_name=base_repo_name,
>>> head_repo_name=head_repo_name,
@ -129,7 +101,7 @@ async def git_create_pull(
>>> head=head,
>>> title=title,
>>> body=body,
>>> access_token=access_token,
>>> app_name=app_name,
>>> )
>>> if isinstance(pr, PullRequest):
>>> print(pr)
@ -141,8 +113,11 @@ async def git_create_pull(
Returns:
PullRequest: The created pull request.
"""
from metagpt.tools.libs import get_env
from metagpt.utils.git_repository import GitRepository
access_token = await get_env(key="access_token", app_name=app_name)
return await GitRepository.create_pull(
base=base,
head=head,

View file

@ -1,8 +1,10 @@
import subprocess
import threading
from queue import Queue
import asyncio
from asyncio import Queue
from asyncio.subprocess import PIPE, STDOUT
from typing import Optional
from metagpt.const import DEFAULT_WORKSPACE_ROOT, SWE_SETUP_PATH
from metagpt.logs import logger
from metagpt.tools.tool_registry import register_tool
from metagpt.utils.report import END_MARKER_VALUE, TerminalReporter
@ -19,62 +21,54 @@ class Terminal:
def __init__(self):
self.shell_command = ["bash"] # FIXME: should consider windows support later
self.command_terminator = "\n"
# Start a persistent shell process
self.process = subprocess.Popen(
self.shell_command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
executable="/bin/bash",
)
self.stdout_queue = Queue()
self.stdout_queue = Queue(maxsize=1000)
self.observer = TerminalReporter()
self.process: Optional[asyncio.subprocess.Process] = None
self._check_state()
async def _start_process(self):
# Start a persistent shell process
self.process = await asyncio.create_subprocess_exec(
*self.shell_command, stdin=PIPE, stdout=PIPE, stderr=STDOUT, executable="bash"
)
await self._check_state()
def _check_state(self):
"""Check the state of the terminal, e.g. the current directory of the terminal process. Useful for agent to understand."""
print("The terminal is at:", self.run_command("pwd"))
async def _check_state(self):
"""
Check the state of the terminal, e.g. the current directory of the terminal process. Useful for agent to understand.
"""
output = await self.run_command("pwd")
logger.info("The terminal is at:", output)
def run_command(self, cmd: str, daemon=False) -> str:
async def run_command(self, cmd: str, daemon=False) -> str:
"""
Executes a specified command in the terminal and streams the output back in real time.
This command maintains state across executions, such as the current directory,
allowing for sequential commands to be contextually aware. The output from the
command execution is placed into `stdout_queue`, which can be consumed as needed.
allowing for sequential commands to be contextually aware.
Args:
cmd (str): The command to execute in the terminal.
daemon (bool): If True, executes the command in a background thread, allowing
the main program to continue execution. The command's output is
collected asynchronously in daemon mode and placed into `stdout_queue`.
daemon (bool): If True, executes the command in an asynchronous task, allowing
the main program to continue execution.
Returns:
str: The command's output or an empty string if `daemon` is True. Remember that
when `daemon` is True, the output is collected into `stdout_queue` and must
be consumed from there.
Note:
If `stdout_queue` is not periodically consumed, it could potentially grow indefinitely,
consuming memory. Ensure that there's a mechanism in place to consume this queue,
especially during long-running or output-heavy command executions.
when `daemon` is True, use the `get_stdout_output` method to get the output.
"""
if self.process is None:
await self._start_process()
# Send the command
self.process.stdin.write((cmd + self.command_terminator).encode())
self.process.stdin.write(
(f'echo "{END_MARKER_VALUE}"{self.command_terminator}').encode() # write EOF
f'echo "{END_MARKER_VALUE}"{self.command_terminator}'.encode() # write EOF
) # Unique marker to signal command end
self.process.stdin.flush()
await self.process.stdin.drain()
if daemon:
threading.Thread(target=self._read_and_process_output, args=(cmd,), daemon=True).start()
asyncio.create_task(self._read_and_process_output(cmd))
return ""
else:
return self._read_and_process_output(cmd)
return await self._read_and_process_output(cmd)
def execute_in_conda_env(self, cmd: str, env, daemon=False) -> str:
async def execute_in_conda_env(self, cmd: str, env, daemon=False) -> str:
"""
Executes a given command within a specified Conda environment automatically without
the need for manual activation. Users just need to provide the name of the Conda
@ -84,7 +78,7 @@ class Terminal:
cmd (str): The command to execute within the Conda environment.
env (str, optional): The name of the Conda environment to activate before executing the command.
If not specified, the command will run in the current active environment.
daemon (bool): If True, the command is run in a background thread, similar to `run_command`,
daemon (bool): If True, the command is run in an asynchronous task, similar to `run_command`,
affecting error logging and handling in the same manner.
Returns:
@ -96,19 +90,34 @@ class Terminal:
to ensure the specified environment is active for the command's execution.
"""
cmd = f"conda run -n {env} {cmd}"
return self.run_command(cmd, daemon=daemon)
return await self.run_command(cmd, daemon=daemon)
def _read_and_process_output(self, cmd):
with self.observer as observer:
async def get_stdout_output(self) -> str:
"""
Retrieves all collected output from background running commands and returns it as a string.
Returns:
str: The collected output from background running commands, returned as a string.
"""
output_lines = []
while not self.stdout_queue.empty():
line = await self.stdout_queue.get()
output_lines.append(line)
return "\n".join(output_lines)
async def _read_and_process_output(self, cmd, daemon=False) -> str:
async with self.observer as observer:
cmd_output = []
observer.report(cmd + self.command_terminator, "cmd")
# report the comman
await observer.async_report(cmd + self.command_terminator, "cmd")
# report the command
# Read the output until the unique marker is found.
# We read bytes directly from stdout instead of text because when reading text,
# '\r' is changed to '\n', resulting in excessive output.
tmp = b""
while True:
output = tmp + self.process.stdout.read(1)
output = tmp + await self.process.stdout.read(1)
if not output:
continue
*lines, tmp = output.splitlines(True)
for line in lines:
line = line.decode()
@ -116,20 +125,20 @@ class Terminal:
if ix >= 0:
line = line[0:ix]
if line:
observer.report(line, "output")
await observer.async_report(line, "output")
# report stdout in real-time
cmd_output.append(line)
return "".join(cmd_output)
# log stdout in real-time
observer.report(line, "output")
await observer.async_report(line, "output")
cmd_output.append(line)
self.stdout_queue.put(line)
if daemon:
await self.stdout_queue.put(line)
def close(self):
async def close(self):
"""Close the persistent shell process."""
self.process.stdin.close()
self.process.terminate()
self.process.wait()
await self.process.wait()
@register_tool(include_functions=["run"])
@ -142,10 +151,13 @@ class Bash(Terminal):
def __init__(self):
"""init"""
super().__init__()
self.run_command(f"cd {DEFAULT_WORKSPACE_ROOT}")
self.run_command(f"source {SWE_SETUP_PATH}")
self.start_flag = False
def run(self, cmd) -> str:
async def start(self):
await self.run_command(f"cd {DEFAULT_WORKSPACE_ROOT}")
await self.run_command(f"source {SWE_SETUP_PATH}")
async def run(self, cmd) -> str:
"""
Executes a bash command.
@ -184,9 +196,6 @@ class Bash(Terminal):
Arguments:
filename (str): The name of the file to create.
- submit
Submits your current code. it can only be executed once, the last action before the `end`.
- search_dir_and_preview <search_term> [<dir>]
Searches for search_term in all files in dir and gives their code preview
with line numbers. If dir is not provided, searches in the current directory.
@ -220,6 +229,13 @@ class Bash(Terminal):
end_line (int): The line number to end the edit at (inclusive), starting from 1.
replacement_text (str): The text to replace the current selection with, must conform to PEP8 standards.
- submit
Submits your current code locally. it can only be executed once, the last action before the `end`.
Note: Make sure to use these functions as per their defined arguments and behaviors.
"""
return self.run_command(cmd)
if not self.start_flag:
await self.start()
self.start_flag = True
return await self.run_command(cmd)

View file

@ -8,13 +8,15 @@ from metagpt.utils.parse_html import simplify_html
@register_tool(tags=["web scraping"])
async def view_page_element_to_scrape(url: str, requirement: str, keep_links: bool = False) -> None:
"""view the HTML content of current page to understand the structure. When executed, the content will be printed out
async def view_page_element_to_scrape(url: str, requirement: str, keep_links: bool = False) -> str:
"""view the HTML content of current page to understand the structure.
Args:
url (str): The URL of the web page to scrape.
requirement (str): Providing a clear and detailed requirement helps in focusing the inspection on the desired elements.
keep_links (bool): Whether to keep the hyperlinks in the HTML content. Set to True if links are required
Returns:
str: The HTML content of the page.
"""
async with Browser() as browser:
await browser.goto(url)
@ -36,7 +38,7 @@ async def view_page_element_to_scrape(url: str, requirement: str, keep_links: bo
html = "\n".join(i.text for i in nodes)
mem_fs.rm_file(filename)
print(html)
return html
# async def get_elements_outerhtml(self, element_ids: list[int]):

View file

@ -17,4 +17,3 @@ source $REPO_ROOT_DIR/metagpt/tools/swe_agent_commands/search.sh
source $REPO_ROOT_DIR/metagpt/tools/swe_agent_commands/edit_linting.sh
export SWE_CMD_WORK_DIR="$REPO_ROOT_DIR/workspace/swe_agent_workdir"
#sudo chmod 777 $REPO_ROOT_DIR/workspace/swe_agent_workdir

View file

@ -104,11 +104,13 @@ class ToolRecommender(BaseModel):
return ranked_tools
async def get_recommended_tool_info(self, **kwargs) -> str:
async def get_recommended_tool_info(self, fixed: list[str] = None, **kwargs) -> str:
"""
Wrap recommended tools with their info in a string, which can be used directly in a prompt.
"""
recommended_tools = await self.recommend_tools(**kwargs)
if fixed:
recommended_tools.extend([self.tools[tool_name] for tool_name in fixed if tool_name in self.tools])
if not recommended_tools:
return ""
tool_schemas = {tool.name: tool.schemas for tool in recommended_tools}

View file

@ -41,7 +41,7 @@ class WebPage(BaseModel):
def get_slim_soup(self, keep_links: bool = False):
soup = _get_soup(self.html)
keep_attrs = ["class"]
keep_attrs = ["class", "id"]
if keep_links:
keep_attrs.append("href")

View file

@ -0,0 +1,19 @@
import os
def get_proxy_from_env():
proxy_config = {}
server = None
for i in ("ALL_PROXY", "all_proxy", "HTTPS_PROXY", "https_proxy", "HTTP_PROXY", "http_proxy"):
if os.environ.get(i):
server = os.environ.get(i)
if server:
proxy_config["server"] = server
no_proxy = os.environ.get("NO_PROXY") or os.environ.get("no_proxy")
if no_proxy:
proxy_config["bypass"] = no_proxy
if not proxy_config:
proxy_config = None
return proxy_config

View file

@ -266,10 +266,6 @@ class ThoughtReporter(ObjectReporter):
block: Literal[BlockType.THOUGHT] = BlockType.THOUGHT
async def __aenter__(self):
await self.async_report({})
return await super().__aenter__()
class FileReporter(ResourceReporter):
"""File resource callback for reporting complete file paths.

View file

@ -31,9 +31,9 @@ TOKEN_COSTS = {
"gpt-4-0125-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4-1106-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4-vision-preview": {"prompt": 0.01, "completion": 0.03}, # TODO add extra image price calculator
"gpt-4-1106-vision-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4o": {"prompt": 0.005, "completion": 0.015},
"gpt-4o-2024-05-13": {"prompt": 0.005, "completion": 0.015},
"gpt-4-1106-vision-preview": {"prompt": 0.01, "completion": 0.03},
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
"glm-3-turbo": {"prompt": 0.0007, "completion": 0.0007}, # 128k version, prompt + completion tokens=0.005¥/k-tokens
"glm-4": {"prompt": 0.014, "completion": 0.014}, # 128k version, prompt + completion tokens=0.1¥/k-tokens
@ -147,11 +147,14 @@ FIREWORKS_GRADE_TOKEN_COSTS = {
# https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
TOKEN_MAX = {
"gpt-4o-2024-05-13": 128000,
"gpt-4o": 128000,
"gpt-4-0125-preview": 128000,
"gpt-4-turbo-preview": 128000,
"gpt-4-1106-preview": 128000,
"gpt-4-vision-preview": 128000,
"gpt-4-1106-vision-preview": 128000,
"gpt-4-turbo": 128000,
"gpt-4": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,