From b4e09341b354d12d419977eb5907d310eb3d226a Mon Sep 17 00:00:00 2001 From: Arnaud Gelas Date: Thu, 18 Jan 2024 20:47:33 +0100 Subject: [PATCH 1/6] Stop generating unit test for non python files When trying to create a simple HelloWorld with test, metagpt creates test for README.md --- metagpt/roles/qa_engineer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/metagpt/roles/qa_engineer.py b/metagpt/roles/qa_engineer.py index 0e323893e..45a1c7715 100644 --- a/metagpt/roles/qa_engineer.py +++ b/metagpt/roles/qa_engineer.py @@ -65,6 +65,8 @@ class QaEngineer(Role): code_doc = await src_file_repo.get(filename) if not code_doc: continue + if not code_doc.filename.endswith(".py"): + continue test_doc = await tests_file_repo.get("test_" + code_doc.filename) if not test_doc: test_doc = Document( From 6a9bd4a3914b565e98b04752d11cc58ee1f4afe2 Mon Sep 17 00:00:00 2001 From: Arnaud Gelas Date: Thu, 18 Jan 2024 20:48:35 +0100 Subject: [PATCH 2/6] Do not try installing requirements if there are none Do not try running pip install -r requirements.txt if the file does not exist or is empty. It avoids seeing an error in the log. --- metagpt/actions/run_code.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/metagpt/actions/run_code.py b/metagpt/actions/run_code.py index 30b06f1a6..4a26e5137 100644 --- a/metagpt/actions/run_code.py +++ b/metagpt/actions/run_code.py @@ -16,6 +16,7 @@ class. """ import subprocess +from pathlib import Path from typing import Tuple from pydantic import Field @@ -152,11 +153,23 @@ class RunCode(Action): return subprocess.run(cmd, check=check, cwd=cwd, env=env) @staticmethod - def _install_dependencies(working_directory, env): + def _install_requirements(working_directory, env): + file_path = Path(working_directory) / "requirements.txt" + if not file_path.exists(): + return + if file_path.stat().st_size == 0: + return install_command = ["python", "-m", "pip", "install", "-r", "requirements.txt"] logger.info(" ".join(install_command)) RunCode._install_via_subprocess(install_command, check=True, cwd=working_directory, env=env) + @staticmethod + def _install_pytest(working_directory, env): install_pytest_command = ["python", "-m", "pip", "install", "pytest"] logger.info(" ".join(install_pytest_command)) RunCode._install_via_subprocess(install_pytest_command, check=True, cwd=working_directory, env=env) + + @staticmethod + def _install_dependencies(working_directory, env): + RunCode._install_requirements(working_directory, env) + RunCode._install_pytest(working_directory, env) From 4680ff5e6232a9e17953c2db02140ab59f4baa80 Mon Sep 17 00:00:00 2001 From: mannaandpoem <1580466765@qq.com> Date: Thu, 25 Jan 2024 17:33:23 +0800 Subject: [PATCH 3/6] Only retain test_simple_add_calculator, skip other test cases --- tests/metagpt/test_incremental_dev.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/metagpt/test_incremental_dev.py b/tests/metagpt/test_incremental_dev.py index 6a26f9b83..3e4a1b901 100644 --- a/tests/metagpt/test_incremental_dev.py +++ b/tests/metagpt/test_incremental_dev.py @@ -50,33 +50,39 @@ def test_simple_add_calculator(): log_and_check_result(result) +@pytest.mark.skip def test_number_guessing_game(): result = get_incremental_dev_result(IDEAS[1], PROJECT_NAMES[1]) log_and_check_result(result) +@pytest.mark.skip def test_word_cloud(): result = get_incremental_dev_result(IDEAS[2], PROJECT_NAMES[2]) log_and_check_result(result) +@pytest.mark.skip def test_gomoku(): result = get_incremental_dev_result(IDEAS[3], PROJECT_NAMES[3]) log_and_check_result(result) +@pytest.mark.skip def test_dice_simulator_new(): for i, (idea, project_name) in enumerate(zip(IDEAS[4:6], PROJECT_NAMES[4:6]), start=1): result = get_incremental_dev_result(idea, project_name) log_and_check_result(result, "refine_" + str(i)) +@pytest.mark.skip def test_refined_pygame_2048(): for i, (idea, project_name) in enumerate(zip(IDEAS[6:8], PROJECT_NAMES[6:8]), start=1): result = get_incremental_dev_result(idea, project_name) log_and_check_result(result, "refine_" + str(i)) +@pytest.mark.skip def test_refined_snake_game(): for i, (idea, project_name) in enumerate(zip(IDEAS[8:10], PROJECT_NAMES[8:10]), start=1): result = get_incremental_dev_result(idea, project_name) From cfadd54a3a5aee02416ea092087cdb65b6171611 Mon Sep 17 00:00:00 2001 From: geekan Date: Fri, 26 Jan 2024 15:02:34 +0800 Subject: [PATCH 4/6] Update token_counter.py --- metagpt/utils/token_counter.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/metagpt/utils/token_counter.py b/metagpt/utils/token_counter.py index 885eb37d7..feec20928 100644 --- a/metagpt/utils/token_counter.py +++ b/metagpt/utils/token_counter.py @@ -4,10 +4,11 @@ @Time : 2023/5/18 00:40 @Author : alexanderwu @File : token_counter.py -ref1: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb -ref2: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/llm/token_counter.py -ref3: https://github.com/hwchase17/langchain/blob/master/langchain/chat_models/openai.py -ref4: https://ai.google.dev/models/gemini +ref1: https://openai.com/pricing +ref2: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb +ref3: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/llm/token_counter.py +ref4: https://github.com/hwchase17/langchain/blob/master/langchain/chat_models/openai.py +ref5: https://ai.google.dev/models/gemini """ import tiktoken @@ -25,7 +26,10 @@ TOKEN_COSTS = { "gpt-4-32k": {"prompt": 0.06, "completion": 0.12}, "gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12}, "gpt-4-0613": {"prompt": 0.06, "completion": 0.12}, + "gpt-4-turbo-preview": {"prompt": 0.01, "completion": 0.03}, + "gpt-4-0125-preview": {"prompt": 0.01, "completion": 0.03}, "gpt-4-1106-preview": {"prompt": 0.01, "completion": 0.03}, + "gpt-4-1106-vision-preview": {"prompt": 0.01, "completion": 0.03}, "text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0}, "glm-3-turbo": {"prompt": 0.0, "completion": 0.0007}, # 128k version, prompt + completion tokens=0.005¥/k-tokens "glm-4": {"prompt": 0.0, "completion": 0.014}, # 128k version, prompt + completion tokens=0.1¥/k-tokens @@ -47,7 +51,10 @@ TOKEN_MAX = { "gpt-4-32k": 32768, "gpt-4-32k-0314": 32768, "gpt-4-0613": 8192, + "gpt-4-turbo-preview": 128000, + "gpt-4-0125-preview": 128000, "gpt-4-1106-preview": 128000, + "gpt-4-1106-vision-preview": 128000, "text-embedding-ada-002": 8192, "chatglm_turbo": 32768, "gemini-pro": 32768, From 59afc5301f55037e7b379497767f4af62fd65b31 Mon Sep 17 00:00:00 2001 From: geekan Date: Fri, 26 Jan 2024 15:08:08 +0800 Subject: [PATCH 5/6] update token counter --- metagpt/utils/token_counter.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/metagpt/utils/token_counter.py b/metagpt/utils/token_counter.py index feec20928..94506e373 100644 --- a/metagpt/utils/token_counter.py +++ b/metagpt/utils/token_counter.py @@ -79,7 +79,10 @@ def count_message_tokens(messages, model="gpt-3.5-turbo-0613"): "gpt-4-32k-0314", "gpt-4-0613", "gpt-4-32k-0613", + "gpt-4-turbo-preview", + "gpt-4-0125-preview", "gpt-4-1106-preview", + "gpt-4-1106-vision-preview", }: tokens_per_message = 3 # # every reply is primed with <|start|>assistant<|message|> tokens_per_name = 1 From a6bdd0201765e3f6b1dca8aa398f25496d3158b3 Mon Sep 17 00:00:00 2001 From: geekan Date: Fri, 26 Jan 2024 15:03:17 +0800 Subject: [PATCH 6/6] add ActionNode.from_pydantic --- metagpt/actions/action_node.py | 79 +++++++++++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 2 deletions(-) diff --git a/metagpt/actions/action_node.py b/metagpt/actions/action_node.py index ca41c76a5..162ab90eb 100644 --- a/metagpt/actions/action_node.py +++ b/metagpt/actions/action_node.py @@ -12,7 +12,7 @@ import json from enum import Enum from typing import Any, Dict, List, Optional, Tuple, Type, Union -from pydantic import BaseModel, create_model, model_validator +from pydantic import BaseModel, Field, create_model, model_validator from tenacity import retry, stop_after_attempt, wait_random_exponential from metagpt.actions.action_outcls_registry import register_action_outcls @@ -186,11 +186,27 @@ class ActionNode: obj.add_children(nodes) return obj - def get_children_mapping(self, exclude=None) -> Dict[str, Tuple[Type, Any]]: + def get_children_mapping_old(self, exclude=None) -> Dict[str, Tuple[Type, Any]]: """获得子ActionNode的字典,以key索引""" exclude = exclude or [] return {k: (v.expected_type, ...) for k, v in self.children.items() if k not in exclude} + def get_children_mapping(self, exclude=None) -> Dict[str, Tuple[Type, Any]]: + """获得子ActionNode的字典,以key索引,支持多级结构""" + exclude = exclude or [] + mapping = {} + + def _get_mapping(node: "ActionNode", prefix: str = ""): + for key, child in node.children.items(): + if key in exclude: + continue + full_key = f"{prefix}{key}" + mapping[full_key] = (child.expected_type, ...) + _get_mapping(child, prefix=f"{full_key}.") + + _get_mapping(self) + return mapping + def get_self_mapping(self) -> Dict[str, Tuple[Type, Any]]: """get self key: type mapping""" return {self.key: (self.expected_type, ...)} @@ -616,3 +632,62 @@ class ActionNode: self.update_instruct_content(revise_contents) return revise_contents + + @classmethod + def from_pydantic(cls, model: Type[BaseModel], key: str = None): + """ + Creates an ActionNode tree from a Pydantic model. + + Args: + model (Type[BaseModel]): The Pydantic model to convert. + + Returns: + ActionNode: The root node of the created ActionNode tree. + """ + key = key or model.__name__ + root_node = cls(key=model.__name__, expected_type=Type[model], instruction="", example="") + + for field_name, field_model in model.model_fields.items(): + # Extracting field details + expected_type = field_model.annotation + instruction = field_model.description or "" + example = field_model.default + + # Check if the field is a Pydantic model itself. + # Use isinstance to avoid typing.List, typing.Dict, etc. (they are instances of type, not subclasses) + if isinstance(expected_type, type) and issubclass(expected_type, BaseModel): + # Recursively process the nested model + child_node = cls.from_pydantic(expected_type, key=field_name) + else: + child_node = cls(key=field_name, expected_type=expected_type, instruction=instruction, example=example) + + root_node.add_child(child_node) + + return root_node + + +class ToolUse(BaseModel): + tool_name: str = Field(default="a", description="tool name", examples=[]) + + +class Task(BaseModel): + task_id: int = Field(default="1", description="task id", examples=[1, 2, 3]) + name: str = Field(default="Get data from ...", description="task name", examples=[]) + dependent_task_ids: List[int] = Field(default=[], description="dependent task ids", examples=[1, 2, 3]) + tool: ToolUse = Field(default=ToolUse(), description="tool use", examples=[]) + + +class Tasks(BaseModel): + tasks: List[Task] = Field(default=[], description="tasks", examples=[]) + + +if __name__ == "__main__": + node = ActionNode.from_pydantic(Tasks) + print("Tasks") + print(Tasks.model_json_schema()) + print("Task") + print(Task.model_json_schema()) + print(node) + prompt = node.compile(context="") + node.create_children_class() + print(prompt)