mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-05-03 21:02:38 +02:00
Merge branch 'dev' of https://github.com/geekan/MetaGPT into geekan/dev
This commit is contained in:
commit
e9d7c99021
5 changed files with 113 additions and 7 deletions
|
|
@ -12,7 +12,7 @@ import json
|
|||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Tuple, Type, Union
|
||||
|
||||
from pydantic import BaseModel, create_model, model_validator
|
||||
from pydantic import BaseModel, Field, create_model, model_validator
|
||||
from tenacity import retry, stop_after_attempt, wait_random_exponential
|
||||
|
||||
from metagpt.actions.action_outcls_registry import register_action_outcls
|
||||
|
|
@ -186,11 +186,27 @@ class ActionNode:
|
|||
obj.add_children(nodes)
|
||||
return obj
|
||||
|
||||
def get_children_mapping(self, exclude=None) -> Dict[str, Tuple[Type, Any]]:
|
||||
def get_children_mapping_old(self, exclude=None) -> Dict[str, Tuple[Type, Any]]:
|
||||
"""获得子ActionNode的字典,以key索引"""
|
||||
exclude = exclude or []
|
||||
return {k: (v.expected_type, ...) for k, v in self.children.items() if k not in exclude}
|
||||
|
||||
def get_children_mapping(self, exclude=None) -> Dict[str, Tuple[Type, Any]]:
|
||||
"""获得子ActionNode的字典,以key索引,支持多级结构"""
|
||||
exclude = exclude or []
|
||||
mapping = {}
|
||||
|
||||
def _get_mapping(node: "ActionNode", prefix: str = ""):
|
||||
for key, child in node.children.items():
|
||||
if key in exclude:
|
||||
continue
|
||||
full_key = f"{prefix}{key}"
|
||||
mapping[full_key] = (child.expected_type, ...)
|
||||
_get_mapping(child, prefix=f"{full_key}.")
|
||||
|
||||
_get_mapping(self)
|
||||
return mapping
|
||||
|
||||
def get_self_mapping(self) -> Dict[str, Tuple[Type, Any]]:
|
||||
"""get self key: type mapping"""
|
||||
return {self.key: (self.expected_type, ...)}
|
||||
|
|
@ -616,3 +632,62 @@ class ActionNode:
|
|||
self.update_instruct_content(revise_contents)
|
||||
|
||||
return revise_contents
|
||||
|
||||
@classmethod
|
||||
def from_pydantic(cls, model: Type[BaseModel], key: str = None):
|
||||
"""
|
||||
Creates an ActionNode tree from a Pydantic model.
|
||||
|
||||
Args:
|
||||
model (Type[BaseModel]): The Pydantic model to convert.
|
||||
|
||||
Returns:
|
||||
ActionNode: The root node of the created ActionNode tree.
|
||||
"""
|
||||
key = key or model.__name__
|
||||
root_node = cls(key=model.__name__, expected_type=Type[model], instruction="", example="")
|
||||
|
||||
for field_name, field_model in model.model_fields.items():
|
||||
# Extracting field details
|
||||
expected_type = field_model.annotation
|
||||
instruction = field_model.description or ""
|
||||
example = field_model.default
|
||||
|
||||
# Check if the field is a Pydantic model itself.
|
||||
# Use isinstance to avoid typing.List, typing.Dict, etc. (they are instances of type, not subclasses)
|
||||
if isinstance(expected_type, type) and issubclass(expected_type, BaseModel):
|
||||
# Recursively process the nested model
|
||||
child_node = cls.from_pydantic(expected_type, key=field_name)
|
||||
else:
|
||||
child_node = cls(key=field_name, expected_type=expected_type, instruction=instruction, example=example)
|
||||
|
||||
root_node.add_child(child_node)
|
||||
|
||||
return root_node
|
||||
|
||||
|
||||
class ToolUse(BaseModel):
|
||||
tool_name: str = Field(default="a", description="tool name", examples=[])
|
||||
|
||||
|
||||
class Task(BaseModel):
|
||||
task_id: int = Field(default="1", description="task id", examples=[1, 2, 3])
|
||||
name: str = Field(default="Get data from ...", description="task name", examples=[])
|
||||
dependent_task_ids: List[int] = Field(default=[], description="dependent task ids", examples=[1, 2, 3])
|
||||
tool: ToolUse = Field(default=ToolUse(), description="tool use", examples=[])
|
||||
|
||||
|
||||
class Tasks(BaseModel):
|
||||
tasks: List[Task] = Field(default=[], description="tasks", examples=[])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
node = ActionNode.from_pydantic(Tasks)
|
||||
print("Tasks")
|
||||
print(Tasks.model_json_schema())
|
||||
print("Task")
|
||||
print(Task.model_json_schema())
|
||||
print(node)
|
||||
prompt = node.compile(context="")
|
||||
node.create_children_class()
|
||||
print(prompt)
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
class.
|
||||
"""
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Tuple
|
||||
|
||||
from pydantic import Field
|
||||
|
|
@ -150,11 +151,23 @@ class RunCode(Action):
|
|||
return subprocess.run(cmd, check=check, cwd=cwd, env=env)
|
||||
|
||||
@staticmethod
|
||||
def _install_dependencies(working_directory, env):
|
||||
def _install_requirements(working_directory, env):
|
||||
file_path = Path(working_directory) / "requirements.txt"
|
||||
if not file_path.exists():
|
||||
return
|
||||
if file_path.stat().st_size == 0:
|
||||
return
|
||||
install_command = ["python", "-m", "pip", "install", "-r", "requirements.txt"]
|
||||
logger.info(" ".join(install_command))
|
||||
RunCode._install_via_subprocess(install_command, check=True, cwd=working_directory, env=env)
|
||||
|
||||
@staticmethod
|
||||
def _install_pytest(working_directory, env):
|
||||
install_pytest_command = ["python", "-m", "pip", "install", "pytest"]
|
||||
logger.info(" ".join(install_pytest_command))
|
||||
RunCode._install_via_subprocess(install_pytest_command, check=True, cwd=working_directory, env=env)
|
||||
|
||||
@staticmethod
|
||||
def _install_dependencies(working_directory, env):
|
||||
RunCode._install_requirements(working_directory, env)
|
||||
RunCode._install_pytest(working_directory, env)
|
||||
|
|
|
|||
|
|
@ -57,6 +57,8 @@ class QaEngineer(Role):
|
|||
code_doc = await src_file_repo.get(filename)
|
||||
if not code_doc:
|
||||
continue
|
||||
if not code_doc.filename.endswith(".py"):
|
||||
continue
|
||||
test_doc = await self.project_repo.tests.get("test_" + code_doc.filename)
|
||||
if not test_doc:
|
||||
test_doc = Document(
|
||||
|
|
|
|||
|
|
@ -4,10 +4,11 @@
|
|||
@Time : 2023/5/18 00:40
|
||||
@Author : alexanderwu
|
||||
@File : token_counter.py
|
||||
ref1: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
ref2: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/llm/token_counter.py
|
||||
ref3: https://github.com/hwchase17/langchain/blob/master/langchain/chat_models/openai.py
|
||||
ref4: https://ai.google.dev/models/gemini
|
||||
ref1: https://openai.com/pricing
|
||||
ref2: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
ref3: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/llm/token_counter.py
|
||||
ref4: https://github.com/hwchase17/langchain/blob/master/langchain/chat_models/openai.py
|
||||
ref5: https://ai.google.dev/models/gemini
|
||||
"""
|
||||
import tiktoken
|
||||
|
||||
|
|
@ -25,7 +26,10 @@ TOKEN_COSTS = {
|
|||
"gpt-4-32k": {"prompt": 0.06, "completion": 0.12},
|
||||
"gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12},
|
||||
"gpt-4-0613": {"prompt": 0.06, "completion": 0.12},
|
||||
"gpt-4-turbo-preview": {"prompt": 0.01, "completion": 0.03},
|
||||
"gpt-4-0125-preview": {"prompt": 0.01, "completion": 0.03},
|
||||
"gpt-4-1106-preview": {"prompt": 0.01, "completion": 0.03},
|
||||
"gpt-4-1106-vision-preview": {"prompt": 0.01, "completion": 0.03},
|
||||
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
|
||||
"glm-3-turbo": {"prompt": 0.0, "completion": 0.0007}, # 128k version, prompt + completion tokens=0.005¥/k-tokens
|
||||
"glm-4": {"prompt": 0.0, "completion": 0.014}, # 128k version, prompt + completion tokens=0.1¥/k-tokens
|
||||
|
|
@ -47,7 +51,10 @@ TOKEN_MAX = {
|
|||
"gpt-4-32k": 32768,
|
||||
"gpt-4-32k-0314": 32768,
|
||||
"gpt-4-0613": 8192,
|
||||
"gpt-4-turbo-preview": 128000,
|
||||
"gpt-4-0125-preview": 128000,
|
||||
"gpt-4-1106-preview": 128000,
|
||||
"gpt-4-1106-vision-preview": 128000,
|
||||
"text-embedding-ada-002": 8192,
|
||||
"chatglm_turbo": 32768,
|
||||
"gemini-pro": 32768,
|
||||
|
|
@ -72,7 +79,10 @@ def count_message_tokens(messages, model="gpt-3.5-turbo-0613"):
|
|||
"gpt-4-32k-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-1106-vision-preview",
|
||||
}:
|
||||
tokens_per_message = 3 # # every reply is primed with <|start|>assistant<|message|>
|
||||
tokens_per_name = 1
|
||||
|
|
|
|||
|
|
@ -50,33 +50,39 @@ def test_simple_add_calculator():
|
|||
log_and_check_result(result)
|
||||
|
||||
|
||||
@pytest.mark.skip
|
||||
def test_number_guessing_game():
|
||||
result = get_incremental_dev_result(IDEAS[1], PROJECT_NAMES[1])
|
||||
log_and_check_result(result)
|
||||
|
||||
|
||||
@pytest.mark.skip
|
||||
def test_word_cloud():
|
||||
result = get_incremental_dev_result(IDEAS[2], PROJECT_NAMES[2])
|
||||
log_and_check_result(result)
|
||||
|
||||
|
||||
@pytest.mark.skip
|
||||
def test_gomoku():
|
||||
result = get_incremental_dev_result(IDEAS[3], PROJECT_NAMES[3])
|
||||
log_and_check_result(result)
|
||||
|
||||
|
||||
@pytest.mark.skip
|
||||
def test_dice_simulator_new():
|
||||
for i, (idea, project_name) in enumerate(zip(IDEAS[4:6], PROJECT_NAMES[4:6]), start=1):
|
||||
result = get_incremental_dev_result(idea, project_name)
|
||||
log_and_check_result(result, "refine_" + str(i))
|
||||
|
||||
|
||||
@pytest.mark.skip
|
||||
def test_refined_pygame_2048():
|
||||
for i, (idea, project_name) in enumerate(zip(IDEAS[6:8], PROJECT_NAMES[6:8]), start=1):
|
||||
result = get_incremental_dev_result(idea, project_name)
|
||||
log_and_check_result(result, "refine_" + str(i))
|
||||
|
||||
|
||||
@pytest.mark.skip
|
||||
def test_refined_snake_game():
|
||||
for i, (idea, project_name) in enumerate(zip(IDEAS[8:10], PROJECT_NAMES[8:10]), start=1):
|
||||
result = get_incremental_dev_result(idea, project_name)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue