feat: merge geekan/dev

This commit is contained in:
莘权 马 2024-01-27 13:38:14 +08:00
commit aa0909525e
62 changed files with 1733 additions and 154 deletions

View file

@ -0,0 +1,46 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/03
@Author : mannaandpoem
@File : test_design_api_an.py
"""
import pytest
from openai._models import BaseModel
from metagpt.actions.action_node import ActionNode, dict_to_markdown
from metagpt.actions.design_api import NEW_REQ_TEMPLATE
from metagpt.actions.design_api_an import REFINED_DESIGN_NODE
from metagpt.llm import LLM
from tests.data.incremental_dev_project.mock import (
DESIGN_SAMPLE,
REFINED_DESIGN_JSON,
REFINED_PRD_JSON,
)
@pytest.fixture()
def llm():
return LLM()
def mock_refined_design_json():
return REFINED_DESIGN_JSON
@pytest.mark.asyncio
async def test_write_design_an(mocker):
root = ActionNode.from_children(
"RefinedDesignAPI", [ActionNode(key="", expected_type=str, instruction="", example="")]
)
root.instruct_content = BaseModel()
root.instruct_content.model_dump = mock_refined_design_json
mocker.patch("metagpt.actions.design_api_an.REFINED_DESIGN_NODE.fill", return_value=root)
prompt = NEW_REQ_TEMPLATE.format(old_design=DESIGN_SAMPLE, context=dict_to_markdown(REFINED_PRD_JSON))
node = await REFINED_DESIGN_NODE.fill(prompt, llm)
assert "Refined Implementation Approach" in node.instruct_content.model_dump()
assert "Refined File list" in node.instruct_content.model_dump()
assert "Refined Data structures and interfaces" in node.instruct_content.model_dump()
assert "Refined Program call flow" in node.instruct_content.model_dump()

View file

@ -0,0 +1,45 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/03
@Author : mannaandpoem
@File : test_project_management_an.py
"""
import pytest
from openai._models import BaseModel
from metagpt.actions.action_node import ActionNode, dict_to_markdown
from metagpt.actions.project_management import NEW_REQ_TEMPLATE
from metagpt.actions.project_management_an import REFINED_PM_NODE
from metagpt.llm import LLM
from tests.data.incremental_dev_project.mock import (
REFINED_DESIGN_JSON,
REFINED_TASKS_JSON,
TASKS_SAMPLE,
)
@pytest.fixture()
def llm():
return LLM()
def mock_refined_tasks_json():
return REFINED_TASKS_JSON
@pytest.mark.asyncio
async def test_project_management_an(mocker):
root = ActionNode.from_children(
"RefinedProjectManagement", [ActionNode(key="", expected_type=str, instruction="", example="")]
)
root.instruct_content = BaseModel()
root.instruct_content.model_dump = mock_refined_tasks_json
mocker.patch("metagpt.actions.project_management_an.REFINED_PM_NODE.fill", return_value=root)
prompt = NEW_REQ_TEMPLATE.format(old_task=TASKS_SAMPLE, context=dict_to_markdown(REFINED_DESIGN_JSON))
node = await REFINED_PM_NODE.fill(prompt, llm)
assert "Refined Logic Analysis" in node.instruct_content.model_dump()
assert "Refined Task list" in node.instruct_content.model_dump()
assert "Refined Shared Knowledge" in node.instruct_content.model_dump()

View file

@ -0,0 +1,71 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/03
@Author : mannaandpoem
@File : test_write_code_plan_and_change_an.py
"""
import pytest
from openai._models import BaseModel
from metagpt.actions.action_node import ActionNode
from metagpt.actions.write_code import WriteCode
from metagpt.actions.write_code_plan_and_change_an import (
REFINED_TEMPLATE,
WriteCodePlanAndChange,
)
from metagpt.schema import CodePlanAndChangeContext
from tests.data.incremental_dev_project.mock import (
CODE_PLAN_AND_CHANGE_SAMPLE,
DESIGN_SAMPLE,
NEW_REQUIREMENT_SAMPLE,
REFINED_CODE_INPUT_SAMPLE,
REFINED_CODE_SAMPLE,
TASKS_SAMPLE,
)
def mock_code_plan_and_change():
return CODE_PLAN_AND_CHANGE_SAMPLE
@pytest.mark.asyncio
async def test_write_code_plan_and_change_an(mocker):
root = ActionNode.from_children(
"WriteCodePlanAndChange", [ActionNode(key="", expected_type=str, instruction="", example="")]
)
root.instruct_content = BaseModel()
root.instruct_content.model_dump = mock_code_plan_and_change
mocker.patch("metagpt.actions.write_code_plan_and_change_an.WriteCodePlanAndChange.run", return_value=root)
requirement = "New requirement"
prd_filename = "prd.md"
design_filename = "design.md"
task_filename = "task.md"
code_plan_and_change_context = CodePlanAndChangeContext(
requirement=requirement,
prd_filename=prd_filename,
design_filename=design_filename,
task_filename=task_filename,
)
node = await WriteCodePlanAndChange(i_context=code_plan_and_change_context).run()
assert "Code Plan And Change" in node.instruct_content.model_dump()
@pytest.mark.asyncio
async def test_refine_code(mocker):
mocker.patch.object(WriteCode, "_aask", return_value=REFINED_CODE_SAMPLE)
prompt = REFINED_TEMPLATE.format(
user_requirement=NEW_REQUIREMENT_SAMPLE,
code_plan_and_change=CODE_PLAN_AND_CHANGE_SAMPLE,
design=DESIGN_SAMPLE,
task=TASKS_SAMPLE,
code=REFINED_CODE_INPUT_SAMPLE,
logs="",
feedback="",
filename="game.py",
summary_log="",
)
code = await WriteCode().write_code(prompt=prompt)
assert "def" in code

View file

@ -0,0 +1,48 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/03
@Author : mannaandpoem
@File : test_write_prd_an.py
"""
import pytest
from openai._models import BaseModel
from metagpt.actions.action_node import ActionNode
from metagpt.actions.write_prd import NEW_REQ_TEMPLATE
from metagpt.actions.write_prd_an import REFINED_PRD_NODE
from metagpt.llm import LLM
from tests.data.incremental_dev_project.mock import (
NEW_REQUIREMENT_SAMPLE,
PRD_SAMPLE,
REFINED_PRD_JSON,
)
@pytest.fixture()
def llm():
return LLM()
def mock_refined_prd_json():
return REFINED_PRD_JSON
@pytest.mark.asyncio
async def test_write_prd_an(mocker):
root = ActionNode.from_children("RefinedPRD", [ActionNode(key="", expected_type=str, instruction="", example="")])
root.instruct_content = BaseModel()
root.instruct_content.model_dump = mock_refined_prd_json
mocker.patch("metagpt.actions.write_prd_an.REFINED_PRD_NODE.fill", return_value=root)
prompt = NEW_REQ_TEMPLATE.format(
requirements=NEW_REQUIREMENT_SAMPLE,
old_prd=PRD_SAMPLE,
)
node = await REFINED_PRD_NODE.fill(prompt, llm)
assert "Refined Requirements" in node.instruct_content.model_dump()
assert "Refined Product Goals" in node.instruct_content.model_dump()
assert "Refined User Stories" in node.instruct_content.model_dump()
assert "Refined Requirement Analysis" in node.instruct_content.model_dump()
assert "Refined Requirement Pool" in node.instruct_content.model_dump()

View file

@ -17,7 +17,7 @@ default_resp = {
}
async def mock_zhipuai_acreate_stream(self, **kwargs):
async def mock_zhipuai_acreate_stream(**kwargs):
class MockResponse(object):
async def _aread(self):
class Iterator(object):
@ -37,7 +37,7 @@ async def mock_zhipuai_acreate_stream(self, **kwargs):
return MockResponse()
async def mock_zhipuai_acreate(self, **kwargs) -> dict:
async def mock_zhipuai_acreate(**kwargs) -> dict:
return default_resp

View file

@ -6,8 +6,6 @@ from typing import Any, Tuple
import pytest
import zhipuai
from zhipuai.model_api.api import InvokeType
from zhipuai.utils.http_client import headers as zhipuai_default_headers
from metagpt.provider.zhipuai.zhipu_model_api import ZhiPuModelAPI
@ -23,14 +21,7 @@ async def mock_requestor_arequest(self, **kwargs) -> Tuple[Any, Any, str]:
@pytest.mark.asyncio
async def test_zhipu_model_api(mocker):
header = ZhiPuModelAPI.get_header()
zhipuai_default_headers.update({"Authorization": api_key})
assert header == zhipuai_default_headers
ZhiPuModelAPI.get_sse_header()
# assert len(sse_header["Authorization"]) == 191
url_prefix, url_suffix = ZhiPuModelAPI.split_zhipu_api_url(InvokeType.SYNC, kwargs={"model": "chatglm_turbo"})
url_prefix, url_suffix = ZhiPuModelAPI(api_key=api_key).split_zhipu_api_url()
assert url_prefix == "https://open.bigmodel.cn/api"
assert url_suffix == "/paas/v4/chat/completions"

View file

@ -99,7 +99,7 @@ def test_parse_code():
def test_todo():
role = Engineer()
assert role.todo == any_to_name(WriteCode)
assert role.action_description == any_to_name(WriteCode)
@pytest.mark.asyncio

View file

@ -144,3 +144,7 @@ async def test_team_recover_multi_roles_save(mocker, context):
assert new_company.env.get_role(role_b.profile).rc.state == 1
await new_company.run(n_round=4)
if __name__ == "__main__":
pytest.main([__file__, "-s"])

View file

@ -6,7 +6,7 @@
@File : test_context.py
"""
from metagpt.configs.llm_config import LLMType
from metagpt.context import CONTEXT, AttrDict, Context
from metagpt.context import AttrDict, Context
def test_attr_dict_1():
@ -51,11 +51,12 @@ def test_context_1():
def test_context_2():
llm = CONTEXT.config.get_openai_llm()
ctx = Context()
llm = ctx.config.get_openai_llm()
assert llm is not None
assert llm.api_type == LLMType.OPENAI
kwargs = CONTEXT.kwargs
kwargs = ctx.kwargs
assert kwargs is not None
kwargs.test_key = "test_value"

View file

@ -109,6 +109,7 @@ async def test_config_priority():
if not home_dir.exists():
assert gpt4t is None
gpt35 = Config.default()
gpt35.llm.model = "gpt-3.5-turbo-1106"
gpt4 = Config.default()
gpt4.llm.model = "gpt-4-0613"

View file

@ -11,7 +11,6 @@ from pathlib import Path
import pytest
from metagpt.actions import UserRequirement
from metagpt.context import CONTEXT
from metagpt.environment import Environment
from metagpt.logs import logger
from metagpt.roles import Architect, ProductManager, Role
@ -44,9 +43,9 @@ def test_get_roles(env: Environment):
@pytest.mark.asyncio
async def test_publish_and_process_message(env: Environment):
if CONTEXT.git_repo:
CONTEXT.git_repo.delete_repository()
CONTEXT.git_repo = None
if env.context.git_repo:
env.context.git_repo.delete_repository()
env.context.git_repo = None
product_manager = ProductManager(name="Alice", profile="Product Manager", goal="做AI Native产品", constraints="资源有限")
architect = Architect(

View file

@ -0,0 +1,189 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/01/03
@Author : mannaandpoem
@File : test_incremental_dev.py
"""
import os
import subprocess
import time
import pytest
from typer.testing import CliRunner
from metagpt.const import TEST_DATA_PATH
from metagpt.logs import logger
from metagpt.startup import app
runner = CliRunner()
IDEAS = [
"Add subtraction, multiplication and division operations to the calculator. The current calculator can only perform basic addition operations, and it is necessary to introduce subtraction, multiplication, division operation into the calculator",
"Adding graphical interface functionality to enhance the user experience in the number-guessing game. The existing number-guessing game currently relies on command-line input for numbers. The goal is to introduce a graphical interface to improve the game's usability and visual appeal",
"Add a feature to remove deprecated words from the word cloud. The current word cloud generator does not support removing deprecated words. Now, The word cloud generator should support removing deprecated words. Customize deactivated words to exclude them from word cloud. Let users see all the words in the text file, and allow users to select the words they want to remove.",
"Add an AI opponent with fixed difficulty levels. Currently, the game only allows players to compete against themselves. Implement an AI algorithm that can playing with player. This will provide a more engaging and challenging experience for players.",
"Add functionality to view the history of scores. The original dice rolling game could only display the current game result, but the new requirement allows players to view the history of scores",
"Add functionality to view the history of scores and perform statistical analysis on them. The original dice rolling game could only display the current game result, but the new requirement allows players to view the history of scores and display the statistical analysis results of the current score",
"Changed score target for 2048 game from 2048 to 4096. Please change the game's score target from 2048 to 4096, and change the interface size from 4*4 to 8*8",
"Display the history score of the player in the 2048 game. Add a record board that can display players' historical score records so that players can trace their scores",
"Incremental Idea Gradually increase the speed of the snake as the game progresses. In the current version of the game, the snakes speed remains constant throughout the gameplay. Implement a feature where the snakes speed gradually increases over time, making the game more challenging and intense as the player progresses.",
"Introduce power-ups and obstacles to the game. The current version of the game only involves eating food and growing the snake. Add new elements such as power-ups that can enhance the snakes speed or make it invincible for a short duration. At the same time, introduce obstacles like walls or enemies that the snake must avoid or overcome to continue growing.",
]
PROJECT_NAMES = [
"simple_add_calculator",
"number_guessing_game",
"word_cloud",
"Gomoku",
"dice_simulator_new",
"dice_simulator_new",
"pygame_2048",
"pygame_2048",
"snake_game",
"snake_game",
]
def test_simple_add_calculator():
result = get_incremental_dev_result(IDEAS[0], PROJECT_NAMES[0])
log_and_check_result(result)
@pytest.mark.skip
def test_number_guessing_game():
result = get_incremental_dev_result(IDEAS[1], PROJECT_NAMES[1])
log_and_check_result(result)
@pytest.mark.skip
def test_word_cloud():
result = get_incremental_dev_result(IDEAS[2], PROJECT_NAMES[2])
log_and_check_result(result)
@pytest.mark.skip
def test_gomoku():
result = get_incremental_dev_result(IDEAS[3], PROJECT_NAMES[3])
log_and_check_result(result)
@pytest.mark.skip
def test_dice_simulator_new():
for i, (idea, project_name) in enumerate(zip(IDEAS[4:6], PROJECT_NAMES[4:6]), start=1):
result = get_incremental_dev_result(idea, project_name)
log_and_check_result(result, "refine_" + str(i))
@pytest.mark.skip
def test_refined_pygame_2048():
for i, (idea, project_name) in enumerate(zip(IDEAS[6:8], PROJECT_NAMES[6:8]), start=1):
result = get_incremental_dev_result(idea, project_name)
log_and_check_result(result, "refine_" + str(i))
@pytest.mark.skip
def test_refined_snake_game():
for i, (idea, project_name) in enumerate(zip(IDEAS[8:10], PROJECT_NAMES[8:10]), start=1):
result = get_incremental_dev_result(idea, project_name)
log_and_check_result(result, "refine_" + str(i))
def log_and_check_result(result, tag_name="refine"):
logger.info(result)
logger.info(result.output)
if "Aborting" in result.output:
assert False
else:
# After running, there will be new commit
cur_tag = subprocess.run(["git", "describe", "--tags"], capture_output=True, text=True).stdout.strip()
if cur_tag == "base":
assert False
else:
assert True
if subprocess.run(["git", "show-ref", "--verify", "--quiet", f"refs/tags/{tag_name}"]).returncode == 0:
tag_name += str(int(time.time()))
try:
subprocess.run(["git", "tag", tag_name], check=True)
except subprocess.CalledProcessError as e:
raise e
def get_incremental_dev_result(idea, project_name, use_review=True):
project_path = TEST_DATA_PATH / "incremental_dev_project" / project_name
# Check if the project path exists
if not project_path.exists():
# If the project does not exist, extract the project file
try:
# Use the tar command to extract the .zip file
subprocess.run(["tar", "-xf", f"{project_path}.zip", "-C", str(project_path.parent)], check=True)
except subprocess.CalledProcessError as e:
# If the extraction fails, throw an exception
raise Exception(f"Failed to extract project {project_name}. Error: {e}")
check_or_create_base_tag(project_path)
args = [idea, "--inc", "--project-path", project_path, "--n-round", "20"]
if not use_review:
args.append("--no-code-review")
result = runner.invoke(app, args)
return result
def check_or_create_base_tag(project_path):
# Change the current working directory to the specified project path
os.chdir(project_path)
# Initialize a Git repository
subprocess.run(["git", "init"], check=True)
# Check if the 'base' tag exists
check_base_tag_cmd = ["git", "show-ref", "--verify", "--quiet", "refs/tags/base"]
if subprocess.run(check_base_tag_cmd).returncode == 0:
has_base_tag = True
else:
has_base_tag = False
if has_base_tag:
logger.info("Base tag exists")
# Switch to the 'base' branch if it exists
try:
status = subprocess.run(["git", "status", "-s"], capture_output=True, text=True).stdout.strip()
if status:
subprocess.run(["git", "clean", "-df"])
subprocess.run(["git", "checkout", "-f", "base"], check=True)
logger.info("Switched to base branch")
except Exception as e:
logger.error("Failed to switch to base branch")
raise e
else:
logger.info("Base tag doesn't exist.")
# Add and commit the current code if 'base' tag doesn't exist
add_cmd = ["git", "add", "."]
try:
subprocess.run(add_cmd, check=True)
logger.info("Files added successfully.")
except subprocess.CalledProcessError as e:
logger.error(f"Failed to add files: {e}")
commit_cmd = ["git", "commit", "-m", "Initial commit"]
try:
subprocess.run(commit_cmd, check=True)
logger.info("Committed all files with the message 'Initial commit'.")
except subprocess.CalledProcessError as e:
logger.error(f"Failed to commit: {e.stderr}")
# Add 'base' tag
add_base_tag_cmd = ["git", "tag", "base"]
# Check if the 'git tag' command was successful
try:
subprocess.run(add_base_tag_cmd, check=True)
logger.info("Added 'base' tag.")
except Exception as e:
logger.error("Failed to add 'base' tag.")
raise e
if __name__ == "__main__":
pytest.main([__file__, "-s"])

View file

@ -131,7 +131,7 @@ async def test_recover():
role.recovered = True
role.latest_observed_msg = Message(content="recover_test")
role.rc.state = 0
assert role.first_action == any_to_name(MockAction)
assert role.action_description == any_to_name(MockAction)
rsp = await role.run()
assert rsp.cause_by == any_to_str(MockAction)

View file

@ -9,23 +9,25 @@ from unittest.mock import AsyncMock
import pytest
from metagpt.config2 import Config
from metagpt.utils.redis import Redis
async def async_mock_from_url(*args, **kwargs):
mock_client = AsyncMock()
mock_client.set.return_value = None
mock_client.get.side_effect = [b"test", b""]
return mock_client
@pytest.mark.asyncio
async def test_redis(mocker):
redis = Config.default().redis
mocker.patch("aioredis.from_url", return_value=async_mock_from_url())
async def async_mock_from_url(*args, **kwargs):
mock_client = AsyncMock()
mock_client.set.return_value = None
mock_client.get.return_value = b"test"
return mock_client
conn = Redis(redis)
mocker.patch("aioredis.from_url", return_value=async_mock_from_url())
mock_config = mocker.Mock()
mock_config.to_url.return_value = "http://mock.com"
mock_config.username = "mockusername"
mock_config.password = "mockpwd"
mock_config.db = "0"
conn = Redis(mock_config)
await conn.set("test", "test", timeout_sec=0)
assert await conn.get("test") == b"test"
await conn.close()

View file

@ -8,8 +8,8 @@
import uuid
from pathlib import Path
import aioboto3
import aiofiles
import mock
import pytest
from metagpt.config2 import Config
@ -18,21 +18,18 @@ from metagpt.utils.s3 import S3
@pytest.mark.asyncio
@mock.patch("aioboto3.Session")
async def test_s3(mock_session_class):
async def test_s3(mocker):
# Set up the mock response
data = await aread(__file__, "utf-8")
mock_session_object = mock.Mock()
reader_mock = mock.AsyncMock()
reader_mock = mocker.AsyncMock()
reader_mock.read.side_effect = [data.encode("utf-8"), b"", data.encode("utf-8")]
type(reader_mock).url = mock.PropertyMock(return_value="https://mock")
mock_client = mock.AsyncMock()
type(reader_mock).url = mocker.PropertyMock(return_value="https://mock")
mock_client = mocker.AsyncMock()
mock_client.put_object.return_value = None
mock_client.get_object.return_value = {"Body": reader_mock}
mock_client.__aenter__.return_value = mock_client
mock_client.__aexit__.return_value = None
mock_session_object.client.return_value = mock_client
mock_session_class.return_value = mock_session_object
mocker.patch.object(aioboto3.Session, "client", return_value=mock_client)
# Prerequisites
s3 = Config.default().s3
@ -55,7 +52,7 @@ async def test_s3(mock_session_class):
# Mock session env
s3.access_key = "ABC"
type(reader_mock).url = mock.PropertyMock(return_value="")
type(reader_mock).url = mocker.PropertyMock(return_value="")
try:
conn = S3(s3)
res = await conn.cache("ABC", ".bak", "script")