Merge pull request #771 from iorisa/feature/context

feat: replace CONTEXT with local context
This commit is contained in:
geekan 2024-01-18 19:17:32 +08:00 committed by GitHub
commit 16ec0ccc4c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
68 changed files with 354 additions and 356 deletions

View file

@ -11,9 +11,7 @@ import uuid
import pytest
from metagpt.actions.debug_error import DebugError
from metagpt.context import CONTEXT
from metagpt.schema import RunCodeContext, RunCodeResult
from metagpt.utils.project_repo import ProjectRepo
CODE_CONTENT = '''
from typing import List
@ -116,9 +114,8 @@ if __name__ == '__main__':
@pytest.mark.asyncio
async def test_debug_error():
CONTEXT.src_workspace = CONTEXT.git_repo.workdir / uuid.uuid4().hex
project_repo = ProjectRepo(CONTEXT.git_repo)
async def test_debug_error(context):
context.src_workspace = context.git_repo.workdir / uuid.uuid4().hex
ctx = RunCodeContext(
code_filename="player.py",
test_filename="test_player.py",
@ -126,8 +123,8 @@ async def test_debug_error():
output_filename="output.log",
)
await project_repo.with_src_path(CONTEXT.src_workspace).srcs.save(filename=ctx.code_filename, content=CODE_CONTENT)
await project_repo.tests.save(filename=ctx.test_filename, content=TEST_CONTENT)
await context.repo.with_src_path(context.src_workspace).srcs.save(filename=ctx.code_filename, content=CODE_CONTENT)
await context.repo.tests.save(filename=ctx.test_filename, content=TEST_CONTENT)
output_data = RunCodeResult(
stdout=";",
stderr="",
@ -141,8 +138,8 @@ async def test_debug_error():
"----------------------------------------------------------------------\n"
"Ran 5 tests in 0.007s\n\nFAILED (failures=1)\n;\n",
)
await project_repo.test_outputs.save(filename=ctx.output_filename, content=output_data.model_dump_json())
debug_error = DebugError(i_context=ctx)
await context.repo.test_outputs.save(filename=ctx.output_filename, content=output_data.model_dump_json())
debug_error = DebugError(i_context=ctx, context=context)
rsp = await debug_error.run()

View file

@ -9,20 +9,17 @@
import pytest
from metagpt.actions.design_api import WriteDesign
from metagpt.context import CONTEXT
from metagpt.logs import logger
from metagpt.schema import Message
from metagpt.utils.project_repo import ProjectRepo
@pytest.mark.asyncio
async def test_design_api():
async def test_design_api(context):
inputs = ["我们需要一个音乐播放器,它应该有播放、暂停、上一曲、下一曲等功能。"] # PRD_SAMPLE
project_repo = ProjectRepo(CONTEXT.git_repo)
for prd in inputs:
await project_repo.docs.prd.save(filename="new_prd.txt", content=prd)
await context.repo.docs.prd.save(filename="new_prd.txt", content=prd)
design_api = WriteDesign()
design_api = WriteDesign(context=context)
result = await design_api.run(Message(content=prd, instruct_content=None))
logger.info(result)

View file

@ -11,7 +11,7 @@ from metagpt.actions.design_api_review import DesignReview
@pytest.mark.asyncio
async def test_design_api_review():
async def test_design_api_review(context):
prd = "我们需要一个音乐播放器,它应该有播放、暂停、上一曲、下一曲等功能。"
api_design = """
数据结构:
@ -26,7 +26,7 @@ API列表:
"""
_ = "API设计看起来非常合理满足了PRD中的所有需求。"
design_api_review = DesignReview()
design_api_review = DesignReview(context=context)
result = await design_api_review.run(prd, api_design)

View file

@ -12,6 +12,6 @@ from metagpt.actions.fix_bug import FixBug
@pytest.mark.asyncio
async def test_fix_bug():
fix_bug = FixBug()
async def test_fix_bug(context):
fix_bug = FixBug(context=context)
assert fix_bug.name == "FixBug"

View file

@ -10,7 +10,7 @@ import pytest
from metagpt.actions.generate_questions import GenerateQuestions
from metagpt.logs import logger
context = """
msg = """
## topic
如何做一个生日蛋糕
@ -20,9 +20,9 @@ context = """
@pytest.mark.asyncio
async def test_generate_questions():
action = GenerateQuestions()
rsp = await action.run(context)
async def test_generate_questions(context):
action = GenerateQuestions(context=context)
rsp = await action.run(msg)
logger.info(f"{rsp.content=}")
assert "Questions" in rsp.content

View file

@ -23,9 +23,9 @@ from metagpt.const import TEST_DATA_PATH
Path("invoices/invoice-4.zip"),
],
)
async def test_invoice_ocr(invoice_path: Path):
async def test_invoice_ocr(invoice_path: Path, context):
invoice_path = TEST_DATA_PATH / invoice_path
resp = await InvoiceOCR().run(file_path=Path(invoice_path))
resp = await InvoiceOCR(context=context).run(file_path=Path(invoice_path))
assert isinstance(resp, list)

View file

@ -10,21 +10,18 @@ import pytest
from metagpt.actions.prepare_documents import PrepareDocuments
from metagpt.const import REQUIREMENT_FILENAME
from metagpt.context import CONTEXT
from metagpt.context import Context
from metagpt.schema import Message
from metagpt.utils.project_repo import ProjectRepo
@pytest.mark.asyncio
async def test_prepare_documents():
msg = Message(content="New user requirements balabala...")
context = Context()
if CONTEXT.git_repo:
CONTEXT.git_repo.delete_repository()
CONTEXT.git_repo = None
await PrepareDocuments(context=CONTEXT).run(with_messages=[msg])
assert CONTEXT.git_repo
doc = await ProjectRepo(CONTEXT.git_repo).docs.get(filename=REQUIREMENT_FILENAME)
await PrepareDocuments(context=context).run(with_messages=[msg])
assert context.git_repo
assert context.repo
doc = await context.repo.docs.get(filename=REQUIREMENT_FILENAME)
assert doc
assert doc.content == msg.content

View file

@ -12,8 +12,8 @@ from metagpt.logs import logger
@pytest.mark.asyncio
async def test_prepare_interview():
action = PrepareInterview()
async def test_prepare_interview(context):
action = PrepareInterview(context=context)
rsp = await action.run("I just graduated and hope to find a job as a Python engineer")
logger.info(f"{rsp.content=}")

View file

@ -9,21 +9,18 @@
import pytest
from metagpt.actions.project_management import WriteTasks
from metagpt.context import CONTEXT
from metagpt.logs import logger
from metagpt.schema import Message
from metagpt.utils.project_repo import ProjectRepo
from tests.metagpt.actions.mock_json import DESIGN, PRD
@pytest.mark.asyncio
async def test_design_api():
project_repo = ProjectRepo(CONTEXT.git_repo)
await project_repo.docs.prd.save("1.txt", content=str(PRD))
await project_repo.docs.system_design.save("1.txt", content=str(DESIGN))
logger.info(CONTEXT.git_repo)
async def test_design_api(context):
await context.repo.docs.prd.save("1.txt", content=str(PRD))
await context.repo.docs.system_design.save("1.txt", content=str(DESIGN))
logger.info(context.git_repo)
action = WriteTasks()
action = WriteTasks(context=context)
result = await action.run(Message(content="", instruct_content=None))
logger.info(result)

View file

@ -11,19 +11,19 @@ from pathlib import Path
import pytest
from metagpt.actions.rebuild_class_view import RebuildClassView
from metagpt.const import GRAPH_REPO_FILE_REPO
from metagpt.context import CONTEXT
from metagpt.llm import LLM
@pytest.mark.asyncio
async def test_rebuild():
async def test_rebuild(context):
action = RebuildClassView(
name="RedBean", i_context=str(Path(__file__).parent.parent.parent.parent / "metagpt"), llm=LLM()
name="RedBean",
i_context=str(Path(__file__).parent.parent.parent.parent / "metagpt"),
llm=LLM(),
context=context,
)
await action.run()
graph_file_repo = CONTEXT.git_repo.new_file_repository(relative_path=GRAPH_REPO_FILE_REPO)
assert graph_file_repo.changed_files
assert context.repo.docs.graph_repo.changed_files
@pytest.mark.parametrize(

View file

@ -11,28 +11,28 @@ import pytest
from metagpt.actions.rebuild_sequence_view import RebuildSequenceView
from metagpt.const import GRAPH_REPO_FILE_REPO
from metagpt.context import CONTEXT
from metagpt.llm import LLM
from metagpt.utils.common import aread
from metagpt.utils.git_repository import ChangeType
from metagpt.utils.project_repo import ProjectRepo
@pytest.mark.asyncio
async def test_rebuild():
async def test_rebuild(context):
# Mock
data = await aread(filename=Path(__file__).parent / "../../data/graph_db/networkx.json")
graph_db_filename = Path(CONTEXT.git_repo.workdir.name).with_suffix(".json")
project_repo = ProjectRepo(CONTEXT.git_repo)
await project_repo.docs.graph_repo.save(filename=str(graph_db_filename), content=data)
CONTEXT.git_repo.add_change({f"{GRAPH_REPO_FILE_REPO}/{graph_db_filename}": ChangeType.UNTRACTED})
CONTEXT.git_repo.commit("commit1")
graph_db_filename = Path(context.repo.workdir.name).with_suffix(".json")
await context.repo.docs.graph_repo.save(filename=str(graph_db_filename), content=data)
context.git_repo.add_change({f"{GRAPH_REPO_FILE_REPO}/{graph_db_filename}": ChangeType.UNTRACTED})
context.git_repo.commit("commit1")
action = RebuildSequenceView(
name="RedBean", i_context=str(Path(__file__).parent.parent.parent.parent / "metagpt"), llm=LLM()
name="RedBean",
i_context=str(Path(__file__).parent.parent.parent.parent / "metagpt"),
llm=LLM(),
context=context,
)
await action.run()
assert project_repo.docs.graph_repo.changed_files
assert context.repo.docs.graph_repo.changed_files
@pytest.mark.parametrize(

View file

@ -14,7 +14,7 @@ from metagpt.tools.search_engine import SearchEngine
@pytest.mark.asyncio
async def test_collect_links(mocker, search_engine_mocker):
async def test_collect_links(mocker, search_engine_mocker, context):
async def mock_llm_ask(self, prompt: str, system_msgs):
if "Please provide up to 2 necessary keywords" in prompt:
return '["metagpt", "llm"]'
@ -28,7 +28,7 @@ async def test_collect_links(mocker, search_engine_mocker):
return "[1,2]"
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_llm_ask)
resp = await research.CollectLinks(search_engine=SearchEngine(SearchEngineType.DUCK_DUCK_GO)).run(
resp = await research.CollectLinks(search_engine=SearchEngine(SearchEngineType.DUCK_DUCK_GO), context=context).run(
"The application of MetaGPT"
)
for i in ["MetaGPT use cases", "The roadmap of MetaGPT", "The function of MetaGPT", "What llm MetaGPT support"]:
@ -36,7 +36,7 @@ async def test_collect_links(mocker, search_engine_mocker):
@pytest.mark.asyncio
async def test_collect_links_with_rank_func(mocker, search_engine_mocker):
async def test_collect_links_with_rank_func(mocker, search_engine_mocker, context):
rank_before = []
rank_after = []
url_per_query = 4
@ -50,7 +50,7 @@ async def test_collect_links_with_rank_func(mocker, search_engine_mocker):
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_collect_links_llm_ask)
resp = await research.CollectLinks(
search_engine=SearchEngine(SearchEngineType.DUCK_DUCK_GO), rank_func=rank_func
search_engine=SearchEngine(SearchEngineType.DUCK_DUCK_GO), rank_func=rank_func, context=context
).run("The application of MetaGPT")
for x, y, z in zip(rank_before, rank_after, resp.values()):
assert x[::-1] == y
@ -58,7 +58,7 @@ async def test_collect_links_with_rank_func(mocker, search_engine_mocker):
@pytest.mark.asyncio
async def test_web_browse_and_summarize(mocker):
async def test_web_browse_and_summarize(mocker, context):
async def mock_llm_ask(*args, **kwargs):
return "metagpt"
@ -66,20 +66,20 @@ async def test_web_browse_and_summarize(mocker):
url = "https://github.com/geekan/MetaGPT"
url2 = "https://github.com/trending"
query = "What's new in metagpt"
resp = await research.WebBrowseAndSummarize().run(url, query=query)
resp = await research.WebBrowseAndSummarize(context=context).run(url, query=query)
assert len(resp) == 1
assert url in resp
assert resp[url] == "metagpt"
resp = await research.WebBrowseAndSummarize().run(url, url2, query=query)
resp = await research.WebBrowseAndSummarize(context=context).run(url, url2, query=query)
assert len(resp) == 2
async def mock_llm_ask(*args, **kwargs):
return "Not relevant."
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_llm_ask)
resp = await research.WebBrowseAndSummarize().run(url, query=query)
resp = await research.WebBrowseAndSummarize(context=context).run(url, query=query)
assert len(resp) == 1
assert url in resp
@ -87,7 +87,7 @@ async def test_web_browse_and_summarize(mocker):
@pytest.mark.asyncio
async def test_conduct_research(mocker):
async def test_conduct_research(mocker, context):
data = None
async def mock_llm_ask(*args, **kwargs):
@ -101,7 +101,7 @@ async def test_conduct_research(mocker):
"outputs user stories / competitive analysis / requirements / data structures / APIs / documents, etc."
)
resp = await research.ConductResearch().run("The application of MetaGPT", content)
resp = await research.ConductResearch(context=context).run("The application of MetaGPT", content)
assert resp == data

View file

@ -24,19 +24,19 @@ async def test_run_text():
@pytest.mark.asyncio
async def test_run_script():
async def test_run_script(context):
# Successful command
out, err = await RunCode().run_script(".", command=["echo", "Hello World"])
out, err = await RunCode(context=context).run_script(".", command=["echo", "Hello World"])
assert out.strip() == "Hello World"
assert err == ""
# Unsuccessful command
out, err = await RunCode().run_script(".", command=["python", "-c", "print(1/0)"])
out, err = await RunCode(context=context).run_script(".", command=["python", "-c", "print(1/0)"])
assert "ZeroDivisionError" in err
@pytest.mark.asyncio
async def test_run():
async def test_run(context):
inputs = [
(RunCodeContext(mode="text", code_filename="a.txt", code="print('Hello, World')"), "PASS"),
(
@ -61,5 +61,5 @@ async def test_run():
),
]
for ctx, result in inputs:
rsp = await RunCode(i_context=ctx).run()
rsp = await RunCode(i_context=ctx, context=context).run()
assert result in rsp.summary

View file

@ -47,18 +47,18 @@ class TestSkillAction:
assert args.get("size_type") == "512x512"
@pytest.mark.asyncio
async def test_parser_action(self, mocker):
async def test_parser_action(self, mocker, context):
# mock
mocker.patch("metagpt.learn.text_to_image", return_value="https://mock.com/xxx")
parser_action = ArgumentsParingAction(skill=self.skill, ask="Draw an apple")
parser_action = ArgumentsParingAction(skill=self.skill, ask="Draw an apple", context=context)
rsp = await parser_action.run()
assert rsp
assert parser_action.args
assert parser_action.args.get("text") == "Draw an apple"
assert parser_action.args.get("size_type") == "512x512"
action = SkillAction(skill=self.skill, args=parser_action.args)
action = SkillAction(skill=self.skill, args=parser_action.args, context=context)
rsp = await action.run()
assert rsp
assert "image/png;base64," in rsp.content or "http" in rsp.content
@ -81,8 +81,8 @@ class TestSkillAction:
await SkillAction.find_and_call_function("dummy_call", {"a": 1})
@pytest.mark.asyncio
async def test_skill_action_error(self):
action = SkillAction(skill=self.skill, args={})
async def test_skill_action_error(self, context):
action = SkillAction(skill=self.skill, args={}, context=context)
rsp = await action.run()
assert "Error" in rsp.content

View file

@ -6,18 +6,14 @@
@File : test_summarize_code.py
@Modifiled By: mashenquan, 2023-12-6. Unit test for summarize_code.py
"""
import shutil
import uuid
from pathlib import Path
import pytest
from metagpt.actions.summarize_code import SummarizeCode
from metagpt.context import Context
from metagpt.logs import logger
from metagpt.schema import CodeSummarizeContext
from metagpt.utils.git_repository import GitRepository
from metagpt.utils.project_repo import ProjectRepo
DESIGN_CONTENT = """
{"Implementation approach": "To develop this snake game, we will use the Python language and choose the Pygame library. Pygame is an open-source Python module collection specifically designed for writing video games. It provides functionalities such as displaying images and playing sounds, making it suitable for creating intuitive and responsive user interfaces. We will ensure efficient game logic to prevent any delays during gameplay. The scoring system will be simple, with the snake gaining points for each food it eats. We will use Pygame's event handling system to implement pause and resume functionality, as well as high-score tracking. The difficulty will increase by speeding up the snake's movement. In the initial version, we will focus on single-player mode and consider adding multiplayer mode and customizable skins in future updates. Based on the new requirement, we will also add a moving obstacle that appears randomly. If the snake eats this obstacle, the game will end. If the snake does not eat the obstacle, it will disappear after 5 seconds. For this, we need to add mechanisms for obstacle generation, movement, and disappearance in the game logic.", "Project_name": "snake_game", "File list": ["main.py", "game.py", "snake.py", "food.py", "obstacle.py", "scoreboard.py", "constants.py", "assets/styles.css", "assets/index.html"], "Data structures and interfaces": "```mermaid\n classDiagram\n class Game{\n +int score\n +int speed\n +bool game_over\n +bool paused\n +Snake snake\n +Food food\n +Obstacle obstacle\n +Scoreboard scoreboard\n +start_game() void\n +pause_game() void\n +resume_game() void\n +end_game() void\n +increase_difficulty() void\n +update() void\n +render() void\n Game()\n }\n class Snake{\n +list body_parts\n +str direction\n +bool grow\n +move() void\n +grow() void\n +check_collision() bool\n Snake()\n }\n class Food{\n +tuple position\n +spawn() void\n Food()\n }\n class Obstacle{\n +tuple position\n +int lifetime\n +bool active\n +spawn() void\n +move() void\n +check_collision() bool\n +disappear() void\n Obstacle()\n }\n class Scoreboard{\n +int high_score\n +update_score(int) void\n +reset_score() void\n +load_high_score() void\n +save_high_score() void\n Scoreboard()\n }\n class Constants{\n }\n Game \"1\" -- \"1\" Snake: has\n Game \"1\" -- \"1\" Food: has\n Game \"1\" -- \"1\" Obstacle: has\n Game \"1\" -- \"1\" Scoreboard: has\n ```", "Program call flow": "```sequenceDiagram\n participant M as Main\n participant G as Game\n participant S as Snake\n participant F as Food\n participant O as Obstacle\n participant SB as Scoreboard\n M->>G: start_game()\n loop game loop\n G->>S: move()\n G->>S: check_collision()\n G->>F: spawn()\n G->>O: spawn()\n G->>O: move()\n G->>O: check_collision()\n G->>O: disappear()\n G->>SB: update_score(score)\n G->>G: update()\n G->>G: render()\n alt if paused\n M->>G: pause_game()\n M->>G: resume_game()\n end\n alt if game_over\n G->>M: end_game()\n end\n end\n```", "Anything UNCLEAR": "There is no need for further clarification as the requirements are already clear."}
@ -181,35 +177,27 @@ class Snake:
@pytest.mark.asyncio
async def test_summarize_code():
async def test_summarize_code(context):
git_dir = Path(__file__).parent / f"unittest/{uuid.uuid4().hex}"
git_dir.mkdir(parents=True, exist_ok=True)
try:
context = Context()
context.git_repo = GitRepository(local_path=git_dir)
context.src_workspace = context.git_repo.workdir / "src"
project_repo = ProjectRepo(context.git_repo)
await project_repo.docs.system_design.save(filename="1.json", content=DESIGN_CONTENT)
await project_repo.docs.task.save(filename="1.json", content=TASK_CONTENT)
await project_repo.with_src_path(context.src_workspace).srcs.save(filename="food.py", content=FOOD_PY)
assert project_repo.srcs.workdir == context.src_workspace
await project_repo.srcs.save(filename="game.py", content=GAME_PY)
await project_repo.srcs.save(filename="main.py", content=MAIN_PY)
await project_repo.srcs.save(filename="snake.py", content=SNAKE_PY)
context.src_workspace = context.git_repo.workdir / "src"
await context.repo.docs.system_design.save(filename="1.json", content=DESIGN_CONTENT)
await context.repo.docs.task.save(filename="1.json", content=TASK_CONTENT)
await context.repo.with_src_path(context.src_workspace).srcs.save(filename="food.py", content=FOOD_PY)
assert context.repo.srcs.workdir == context.src_workspace
await context.repo.srcs.save(filename="game.py", content=GAME_PY)
await context.repo.srcs.save(filename="main.py", content=MAIN_PY)
await context.repo.srcs.save(filename="snake.py", content=SNAKE_PY)
all_files = project_repo.srcs.all_files
summarization_context = CodeSummarizeContext(
design_filename="1.json", task_filename="1.json", codes_filenames=all_files
)
action = SummarizeCode(context=context, i_context=summarization_context)
rsp = await action.run()
assert rsp
logger.info(rsp)
except Exception as e:
assert not e
finally:
shutil.rmtree(git_dir)
all_files = context.repo.srcs.all_files
summarization_context = CodeSummarizeContext(
design_filename="1.json", task_filename="1.json", codes_filenames=all_files
)
action = SummarizeCode(context=context, i_context=summarization_context)
rsp = await action.run()
assert rsp
logger.info(rsp)
if __name__ == "__main__":

View file

@ -9,13 +9,12 @@
import pytest
from metagpt.actions.talk_action import TalkAction
from metagpt.context import CONTEXT
from metagpt.schema import Message
@pytest.mark.asyncio
@pytest.mark.parametrize(
("agent_description", "language", "context", "knowledge", "history_summary"),
("agent_description", "language", "talk_context", "knowledge", "history_summary"),
[
(
"mathematician",
@ -33,12 +32,12 @@ from metagpt.schema import Message
),
],
)
async def test_prompt(agent_description, language, context, knowledge, history_summary):
async def test_prompt(agent_description, language, talk_context, knowledge, history_summary, context):
# Prerequisites
CONTEXT.kwargs.agent_description = agent_description
CONTEXT.kwargs.language = language
context.kwargs.agent_description = agent_description
context.kwargs.language = language
action = TalkAction(i_context=context, knowledge=knowledge, history_summary=history_summary)
action = TalkAction(i_context=talk_context, knowledge=knowledge, history_summary=history_summary, context=context)
assert "{" not in action.prompt
assert "{" not in action.prompt_gpt4

View file

@ -12,25 +12,22 @@ from pathlib import Path
import pytest
from metagpt.actions.write_code import WriteCode
from metagpt.context import CONTEXT
from metagpt.llm import LLM
from metagpt.logs import logger
from metagpt.schema import CodingContext, Document
from metagpt.utils.common import aread
from metagpt.utils.project_repo import ProjectRepo
from tests.metagpt.actions.mock_markdown import TASKS_2, WRITE_CODE_PROMPT_SAMPLE
@pytest.mark.asyncio
async def test_write_code():
async def test_write_code(context):
# Prerequisites
CONTEXT.src_workspace = CONTEXT.git_repo.workdir / "writecode"
context.src_workspace = context.git_repo.workdir / "writecode"
coding_ctx = CodingContext(
filename="task_filename.py", design_doc=Document(content="设计一个名为'add'的函数,该函数接受两个整数作为输入,并返回它们的和。")
)
doc = Document(content=coding_ctx.model_dump_json())
write_code = WriteCode(i_context=doc)
write_code = WriteCode(i_context=doc, context=context)
code = await write_code.run()
logger.info(code.model_dump_json())
@ -41,45 +38,44 @@ async def test_write_code():
@pytest.mark.asyncio
async def test_write_code_directly():
async def test_write_code_directly(context):
prompt = WRITE_CODE_PROMPT_SAMPLE + "\n" + TASKS_2[0]
llm = LLM()
llm = context.llm_with_cost_manager_from_llm_config(context.config.llm)
rsp = await llm.aask(prompt)
logger.info(rsp)
@pytest.mark.asyncio
async def test_write_code_deps():
async def test_write_code_deps(context):
# Prerequisites
CONTEXT.src_workspace = CONTEXT.git_repo.workdir / "snake1/snake1"
context.src_workspace = context.git_repo.workdir / "snake1/snake1"
demo_path = Path(__file__).parent / "../../data/demo_project"
project_repo = ProjectRepo(CONTEXT.git_repo)
await project_repo.test_outputs.save(
await context.repo.test_outputs.save(
filename="test_game.py.json", content=await aread(str(demo_path / "test_game.py.json"))
)
await project_repo.docs.code_summary.save(
await context.repo.docs.code_summary.save(
filename="20231221155954.json",
content=await aread(str(demo_path / "code_summaries.json")),
)
await project_repo.docs.system_design.save(
await context.repo.docs.system_design.save(
filename="20231221155954.json",
content=await aread(str(demo_path / "system_design.json")),
)
await project_repo.docs.task.save(
await context.repo.docs.task.save(
filename="20231221155954.json", content=await aread(str(demo_path / "tasks.json"))
)
await project_repo.with_src_path(CONTEXT.src_workspace).srcs.save(
await context.repo.with_src_path(context.src_workspace).srcs.save(
filename="main.py", content='if __name__ == "__main__":\nmain()'
)
ccontext = CodingContext(
filename="game.py",
design_doc=await project_repo.docs.system_design.get(filename="20231221155954.json"),
task_doc=await project_repo.docs.task.get(filename="20231221155954.json"),
design_doc=await context.repo.docs.system_design.get(filename="20231221155954.json"),
task_doc=await context.repo.docs.task.get(filename="20231221155954.json"),
code_doc=Document(filename="game.py", content="", root_path="snake1"),
)
coding_doc = Document(root_path="snake1", filename="game.py", content=ccontext.json())
action = WriteCode(i_context=coding_doc)
action = WriteCode(i_context=coding_doc, context=context)
rsp = await action.run()
assert rsp
assert rsp.code_doc.content

View file

@ -12,28 +12,25 @@ from metagpt.schema import CodingContext, Document
@pytest.mark.asyncio
async def test_write_code_review(capfd):
async def test_write_code_review(capfd, context):
context.src_workspace = context.repo.workdir / "srcs"
code = """
def add(a, b):
return a +
"""
context = CodingContext(
coding_context = CodingContext(
filename="math.py", design_doc=Document(content="编写一个从a加b的函数返回a+b"), code_doc=Document(content=code)
)
context = await WriteCodeReview(i_context=context).run()
await WriteCodeReview(i_context=coding_context, context=context).run()
# 我们不能精确地预测生成的代码评审,但我们可以检查返回的是否为字符串
assert isinstance(context.code_doc.content, str)
assert len(context.code_doc.content) > 0
assert isinstance(coding_context.code_doc.content, str)
assert len(coding_context.code_doc.content) > 0
captured = capfd.readouterr()
print(f"输出内容: {captured.out}")
# @pytest.mark.asyncio
# async def test_write_code_review_directly():
# code = SEARCH_CODE_SAMPLE
# write_code_review = WriteCodeReview("write_code_review")
# review = await write_code_review.run(code)
# logger.info(review)
if __name__ == "__main__":
pytest.main([__file__, "-s"])

View file

@ -27,8 +27,8 @@ class Person:
],
ids=["google", "numpy", "sphinx"],
)
async def test_write_docstring(style: str, part: str):
ret = await WriteDocstring().run(code, style=style)
async def test_write_docstring(style: str, part: str, context):
ret = await WriteDocstring(context=context).run(code, style=style)
assert part in ret

View file

@ -10,21 +10,18 @@ import pytest
from metagpt.actions import UserRequirement, WritePRD
from metagpt.const import REQUIREMENT_FILENAME
from metagpt.context import CONTEXT
from metagpt.logs import logger
from metagpt.roles.product_manager import ProductManager
from metagpt.roles.role import RoleReactMode
from metagpt.schema import Message
from metagpt.utils.common import any_to_str
from metagpt.utils.project_repo import ProjectRepo
@pytest.mark.asyncio
async def test_write_prd(new_filename):
product_manager = ProductManager()
async def test_write_prd(new_filename, context):
product_manager = ProductManager(context=context)
requirements = "开发一个基于大语言模型与私有知识库的搜索引擎,希望可以基于大语言模型进行搜索总结"
project_repo = ProjectRepo(CONTEXT.git_repo)
await project_repo.docs.save(filename=REQUIREMENT_FILENAME, content=requirements)
await context.repo.docs.save(filename=REQUIREMENT_FILENAME, content=requirements)
product_manager.rc.react_mode = RoleReactMode.BY_ORDER
prd = await product_manager.run(Message(content=requirements, cause_by=UserRequirement))
assert prd.cause_by == any_to_str(WritePRD)
@ -34,7 +31,7 @@ async def test_write_prd(new_filename):
# Assert the prd is not None or empty
assert prd is not None
assert prd.content != ""
assert ProjectRepo(product_manager.context.git_repo).docs.prd.changed_files
assert product_manager.context.repo.docs.prd.changed_files
if __name__ == "__main__":

View file

@ -11,7 +11,7 @@ from metagpt.actions.write_prd_review import WritePRDReview
@pytest.mark.asyncio
async def test_write_prd_review():
async def test_write_prd_review(context):
prd = """
Introduction: This is a new feature for our product.
Goals: The goal is to improve user engagement.
@ -23,7 +23,7 @@ async def test_write_prd_review():
Timeline: The feature should be ready for testing in 1.5 months.
"""
write_prd_review = WritePRDReview(name="write_prd_review")
write_prd_review = WritePRDReview(name="write_prd_review", context=context)
prd_review = await write_prd_review.run(prd)

View file

@ -9,7 +9,7 @@ import pytest
from metagpt.actions.write_review import WriteReview
CONTEXT = """
TEMPLATE_CONTEXT = """
{
"Language": "zh_cn",
"Programming Language": "Python",
@ -46,8 +46,8 @@ CONTEXT = """
@pytest.mark.asyncio
async def test_write_review():
write_review = WriteReview()
review = await write_review.run(CONTEXT)
async def test_write_review(context):
write_review = WriteReview(context=context)
review = await write_review.run(TEMPLATE_CONTEXT)
assert review.instruct_content
assert review.get("LGTM") in ["LGTM", "LBTM"]

View file

@ -13,11 +13,11 @@ from metagpt.actions.write_teaching_plan import WriteTeachingPlanPart
@pytest.mark.asyncio
@pytest.mark.parametrize(
("topic", "context"),
("topic", "content"),
[("Title", "Lesson 1: Learn to draw an apple."), ("Teaching Content", "Lesson 1: Learn to draw an apple.")],
)
async def test_write_teaching_plan_part(topic, context):
action = WriteTeachingPlanPart(topic=topic, i_context=context)
async def test_write_teaching_plan_part(topic, content, context):
action = WriteTeachingPlanPart(topic=topic, i_context=content, context=context)
rsp = await action.run()
assert rsp

View file

@ -13,7 +13,7 @@ from metagpt.schema import Document, TestingContext
@pytest.mark.asyncio
async def test_write_test():
async def test_write_test(context):
code = """
import random
from typing import Tuple
@ -25,8 +25,8 @@ async def test_write_test():
def generate(self, max_y: int, max_x: int):
self.position = (random.randint(1, max_y - 1), random.randint(1, max_x - 1))
"""
context = TestingContext(filename="food.py", code_doc=Document(filename="food.py", content=code))
write_test = WriteTest(i_context=context)
testing_context = TestingContext(filename="food.py", code_doc=Document(filename="food.py", content=code))
write_test = WriteTest(i_context=testing_context, context=context)
context = await write_test.run()
logger.info(context.model_dump_json())
@ -39,12 +39,12 @@ async def test_write_test():
@pytest.mark.asyncio
async def test_write_code_invalid_code(mocker):
async def test_write_code_invalid_code(mocker, context):
# Mock the _aask method to return an invalid code string
mocker.patch.object(WriteTest, "_aask", return_value="Invalid Code String")
# Create an instance of WriteTest
write_test = WriteTest()
write_test = WriteTest(context=context)
# Call the write_code method
code = await write_test.write_code("Some prompt:")

View file

@ -14,8 +14,8 @@ from metagpt.actions.write_tutorial import WriteContent, WriteDirectory
@pytest.mark.asyncio
@pytest.mark.parametrize(("language", "topic"), [("English", "Write a tutorial about Python")])
async def test_write_directory(language: str, topic: str):
ret = await WriteDirectory(language=language).run(topic=topic)
async def test_write_directory(language: str, topic: str, context):
ret = await WriteDirectory(language=language, context=context).run(topic=topic)
assert isinstance(ret, dict)
assert "title" in ret
assert "directory" in ret
@ -29,8 +29,8 @@ async def test_write_directory(language: str, topic: str):
("language", "topic", "directory"),
[("English", "Write a tutorial about Python", {"Introduction": ["What is Python?", "Why learn Python?"]})],
)
async def test_write_content(language: str, topic: str, directory: Dict):
ret = await WriteContent(language=language, directory=directory).run(topic=topic)
async def test_write_content(language: str, topic: str, directory: Dict, context):
ret = await WriteContent(language=language, directory=directory, context=context).run(topic=topic)
assert isinstance(ret, str)
assert list(directory.keys())[0] in ret
for value in list(directory.values())[0]:

View file

@ -10,13 +10,12 @@ from pathlib import Path
import pytest
from metagpt.context import CONTEXT
from metagpt.learn.skill_loader import SkillsDeclaration
@pytest.mark.asyncio
async def test_suite():
CONTEXT.kwargs.agent_skills = [
async def test_suite(context):
context.kwargs.agent_skills = [
{"id": 1, "name": "text_to_speech", "type": "builtin", "config": {}, "enabled": True},
{"id": 2, "name": "text_to_image", "type": "builtin", "config": {}, "enabled": True},
{"id": 3, "name": "ai_call", "type": "builtin", "config": {}, "enabled": True},
@ -27,7 +26,7 @@ async def test_suite():
]
pathname = Path(__file__).parent / "../../../docs/.well-known/skills.yaml"
loader = await SkillsDeclaration.load(skill_yaml_file_name=pathname)
skills = loader.get_skill_list()
skills = loader.get_skill_list(context=context)
assert skills
assert len(skills) >= 3
for desc, name in skills.items():

View file

@ -13,7 +13,6 @@ import pytest
from metagpt.actions import WriteDesign, WritePRD
from metagpt.const import PRDS_FILE_REPO
from metagpt.context import CONTEXT
from metagpt.logs import logger
from metagpt.roles import Architect
from metagpt.schema import Message
@ -22,12 +21,12 @@ from tests.metagpt.roles.mock import MockMessages
@pytest.mark.asyncio
async def test_architect():
async def test_architect(context):
# Prerequisites
filename = uuid.uuid4().hex + ".json"
await awrite(CONTEXT.git_repo.workdir / PRDS_FILE_REPO / filename, data=MockMessages.prd.content)
await awrite(context.repo.workdir / PRDS_FILE_REPO / filename, data=MockMessages.prd.content)
role = Architect()
role = Architect(context=context)
rsp = await role.run(with_message=Message(content="", cause_by=WritePRD))
logger.info(rsp)
assert len(rsp.content) > 0

View file

@ -12,7 +12,6 @@ from pydantic import BaseModel
from metagpt.actions.skill_action import SkillAction
from metagpt.actions.talk_action import TalkAction
from metagpt.context import CONTEXT
from metagpt.memory.brain_memory import BrainMemory
from metagpt.roles.assistant import Assistant
from metagpt.schema import Message
@ -20,11 +19,11 @@ from metagpt.utils.common import any_to_str
@pytest.mark.asyncio
async def test_run(mocker):
async def test_run(mocker, context):
# mock
mocker.patch("metagpt.learn.text_to_image", return_value="http://mock.com/1.png")
CONTEXT.kwargs.language = "Chinese"
context.kwargs.language = "Chinese"
class Input(BaseModel):
memory: BrainMemory
@ -80,7 +79,7 @@ async def test_run(mocker):
for i in inputs:
seed = Input(**i)
role = Assistant(language="Chinese")
role = Assistant(language="Chinese", context=context)
role.context.kwargs.language = seed.language
role.context.kwargs.agent_description = seed.agent_description
role.context.kwargs.agent_skills = agent_skills
@ -115,8 +114,8 @@ async def test_run(mocker):
],
)
@pytest.mark.asyncio
async def test_memory(memory):
role = Assistant()
async def test_memory(memory, context):
role = Assistant(context=context)
role.context.kwargs.agent_skills = []
role.load_memory(memory)

View file

@ -8,44 +8,35 @@
distribution feature for message handling.
"""
import json
import uuid
from pathlib import Path
import pytest
from metagpt.actions import WriteCode, WriteTasks
from metagpt.const import (
DEFAULT_WORKSPACE_ROOT,
REQUIREMENT_FILENAME,
SYSTEM_DESIGN_FILE_REPO,
TASK_FILE_REPO,
)
from metagpt.context import CONTEXT, Context
from metagpt.const import REQUIREMENT_FILENAME, SYSTEM_DESIGN_FILE_REPO, TASK_FILE_REPO
from metagpt.logs import logger
from metagpt.roles.engineer import Engineer
from metagpt.schema import CodingContext, Message
from metagpt.utils.common import CodeParser, any_to_name, any_to_str, aread, awrite
from metagpt.utils.git_repository import ChangeType, GitRepository
from metagpt.utils.project_repo import ProjectRepo
from metagpt.utils.git_repository import ChangeType
from tests.metagpt.roles.mock import STRS_FOR_PARSING, TASKS, MockMessages
@pytest.mark.asyncio
async def test_engineer():
async def test_engineer(context):
# Prerequisites
rqno = "20231221155954.json"
project_repo = ProjectRepo(CONTEXT.git_repo)
await project_repo.save(REQUIREMENT_FILENAME, content=MockMessages.req.content)
await project_repo.docs.prd.save(rqno, content=MockMessages.prd.content)
await project_repo.docs.system_design.save(rqno, content=MockMessages.system_design.content)
await project_repo.docs.task.save(rqno, content=MockMessages.json_tasks.content)
await context.repo.save(REQUIREMENT_FILENAME, content=MockMessages.req.content)
await context.repo.docs.prd.save(rqno, content=MockMessages.prd.content)
await context.repo.docs.system_design.save(rqno, content=MockMessages.system_design.content)
await context.repo.docs.task.save(rqno, content=MockMessages.json_tasks.content)
engineer = Engineer()
engineer = Engineer(context=context)
rsp = await engineer.run(Message(content="", cause_by=WriteTasks))
logger.info(rsp)
assert rsp.cause_by == any_to_str(WriteCode)
assert project_repo.with_src_path(CONTEXT.src_workspace).srcs.changed_files
assert context.repo.with_src_path(context.src_workspace).srcs.changed_files
def test_parse_str():
@ -112,10 +103,8 @@ def test_todo():
@pytest.mark.asyncio
async def test_new_coding_context():
async def test_new_coding_context(context):
# Prerequisites
context = Context()
context.git_repo = GitRepository(local_path=DEFAULT_WORKSPACE_ROOT / f"unittest/{uuid.uuid4().hex}")
demo_path = Path(__file__).parent / "../../data/demo_project"
deps = json.loads(await aread(demo_path / "dependencies.json"))
dependency = await context.git_repo.get_dependency()
@ -123,11 +112,11 @@ async def test_new_coding_context():
await dependency.update(k, set(v))
data = await aread(demo_path / "system_design.json")
rqno = "20231221155954.json"
await awrite(context.git_repo.workdir / SYSTEM_DESIGN_FILE_REPO / rqno, data)
await awrite(context.repo.workdir / SYSTEM_DESIGN_FILE_REPO / rqno, data)
data = await aread(demo_path / "tasks.json")
await awrite(context.git_repo.workdir / TASK_FILE_REPO / rqno, data)
await awrite(context.repo.workdir / TASK_FILE_REPO / rqno, data)
context.src_workspace = Path(context.git_repo.workdir) / "game_2048"
context.src_workspace = Path(context.repo.workdir) / "game_2048"
try:
filename = "game.py"
@ -149,9 +138,7 @@ async def test_new_coding_context():
context.git_repo.add_change({f"{TASK_FILE_REPO}/{rqno}": ChangeType.UNTRACTED})
context.git_repo.commit("mock env")
await ProjectRepo(context.git_repo).with_src_path(context.src_workspace).srcs.save(
filename=filename, content="content"
)
await context.repo.with_src_path(context.src_workspace).srcs.save(filename=filename, content="content")
role = Engineer(context=context)
assert not role.code_todos
await role._new_code_actions()

View file

@ -41,9 +41,11 @@ from metagpt.schema import Message
),
],
)
async def test_invoice_ocr_assistant(query: str, invoice_path: Path, invoice_table_path: Path, expected_result: dict):
async def test_invoice_ocr_assistant(
query: str, invoice_path: Path, invoice_table_path: Path, expected_result: dict, context
):
invoice_path = TEST_DATA_PATH / invoice_path
role = InvoiceOCRAssistant()
role = InvoiceOCRAssistant(context=context)
await role.run(Message(content=query, instruct_content=InvoicePath(file_path=invoice_path)))
invoice_table_path = DATA_PATH / invoice_table_path
df = pd.read_excel(invoice_table_path)

View file

@ -5,17 +5,51 @@
@Author : alexanderwu
@File : test_product_manager.py
"""
import json
import pytest
from metagpt.actions import WritePRD
from metagpt.actions.prepare_documents import PrepareDocuments
from metagpt.const import REQUIREMENT_FILENAME
from metagpt.context import Context
from metagpt.logs import logger
from metagpt.roles import ProductManager
from metagpt.utils.common import any_to_str
from tests.metagpt.roles.mock import MockMessages
@pytest.mark.asyncio
async def test_product_manager(new_filename):
product_manager = ProductManager()
rsp = await product_manager.run(MockMessages.req)
logger.info(rsp)
assert len(rsp.content) > 0
assert rsp.content == MockMessages.req.content
context = Context()
try:
assert context.git_repo is None
assert context.repo is None
product_manager = ProductManager(context=context)
# prepare documents
rsp = await product_manager.run(MockMessages.req)
assert context.git_repo
assert context.repo
assert rsp.cause_by == any_to_str(PrepareDocuments)
assert REQUIREMENT_FILENAME in context.repo.docs.changed_files
# write prd
rsp = await product_manager.run(rsp)
assert rsp.cause_by == any_to_str(WritePRD)
logger.info(rsp)
assert len(rsp.content) > 0
doc = list(rsp.instruct_content.docs.values())[0]
m = json.loads(doc.content)
assert m["Original Requirements"] == MockMessages.req.content
# nothing to do
rsp = await product_manager.run(rsp)
assert rsp is None
except Exception as e:
assert not e
finally:
context.git_repo.delete_repository()
if __name__ == "__main__":
pytest.main([__file__, "-s"])

View file

@ -13,7 +13,7 @@ from tests.metagpt.roles.mock import MockMessages
@pytest.mark.asyncio
async def test_project_manager():
project_manager = ProjectManager()
async def test_project_manager(context):
project_manager = ProjectManager(context=context)
rsp = await project_manager.run(MockMessages.system_design)
logger.info(rsp)

View file

@ -13,20 +13,19 @@ from pydantic import Field
from metagpt.actions import DebugError, RunCode, WriteTest
from metagpt.actions.summarize_code import SummarizeCode
from metagpt.context import CONTEXT
from metagpt.environment import Environment
from metagpt.roles import QaEngineer
from metagpt.schema import Message
from metagpt.utils.common import any_to_str, aread, awrite
async def test_qa():
async def test_qa(context):
# Prerequisites
demo_path = Path(__file__).parent / "../../data/demo_project"
CONTEXT.src_workspace = Path(CONTEXT.git_repo.workdir) / "qa/game_2048"
context.src_workspace = Path(context.repo.workdir) / "qa/game_2048"
data = await aread(filename=demo_path / "game.py", encoding="utf-8")
await awrite(filename=CONTEXT.src_workspace / "game.py", data=data, encoding="utf-8")
await awrite(filename=Path(CONTEXT.git_repo.workdir) / "requirements.txt", data="")
await awrite(filename=context.src_workspace / "game.py", data=data, encoding="utf-8")
await awrite(filename=Path(context.repo.workdir) / "requirements.txt", data="")
class MockEnv(Environment):
msgs: List[Message] = Field(default_factory=list)
@ -37,7 +36,7 @@ async def test_qa():
env = MockEnv()
role = QaEngineer()
role = QaEngineer(context=context)
role.set_env(env)
await role.run(with_message=Message(content="", cause_by=SummarizeCode))
assert env.msgs

View file

@ -28,12 +28,12 @@ async def mock_llm_ask(self, prompt: str, system_msgs):
@pytest.mark.asyncio
async def test_researcher(mocker, search_engine_mocker):
async def test_researcher(mocker, search_engine_mocker, context):
with TemporaryDirectory() as dirname:
topic = "dataiku vs. datarobot"
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_llm_ask)
researcher.RESEARCH_PATH = Path(dirname)
role = researcher.Researcher()
role = researcher.Researcher(context=context)
for i in role.actions:
if isinstance(i, CollectLinks):
i.search_engine = SearchEngine(SearchEngineType.DUCK_DUCK_GO)
@ -41,7 +41,7 @@ async def test_researcher(mocker, search_engine_mocker):
assert (researcher.RESEARCH_PATH / f"{topic}.md").read_text().startswith("# Research Report")
def test_write_report(mocker):
def test_write_report(mocker, context):
with TemporaryDirectory() as dirname:
for i, topic in enumerate(
[
@ -53,7 +53,7 @@ def test_write_report(mocker):
):
researcher.RESEARCH_PATH = Path(dirname)
content = "# Research Report"
researcher.Researcher().write_report(topic, content)
researcher.Researcher(context=context).write_report(topic, content)
assert (researcher.RESEARCH_PATH / f"{i+1}. metagpt.md").read_text().startswith("# Research Report")

View file

@ -13,8 +13,8 @@ def test_role_desc():
assert role.desc == "Best Seller"
def test_role_human():
role = Role(is_human=True)
def test_role_human(context):
role = Role(is_human=True, context=context)
assert isinstance(role.llm, HumanProvider)

View file

@ -15,8 +15,8 @@ from metagpt.roles.tutorial_assistant import TutorialAssistant
@pytest.mark.asyncio
@pytest.mark.parametrize(("language", "topic"), [("Chinese", "Write a tutorial about pip")])
async def test_tutorial_assistant(language: str, topic: str):
role = TutorialAssistant(language=language)
async def test_tutorial_assistant(language: str, topic: str, context):
role = TutorialAssistant(language=language, context=context)
msg = await role.run(topic)
assert TUTORIAL_PATH.exists()
filename = msg.content

View file

@ -5,23 +5,22 @@
import pytest
from metagpt.actions import Action
from metagpt.llm import LLM
@pytest.mark.asyncio
async def test_action_serdeser():
action = Action()
async def test_action_serdeser(context):
action = Action(context=context)
ser_action_dict = action.model_dump()
assert "name" in ser_action_dict
assert "llm" not in ser_action_dict # not export
assert "__module_class_name" in ser_action_dict
action = Action(name="test")
action = Action(name="test", context=context)
ser_action_dict = action.model_dump()
assert "test" in ser_action_dict["name"]
new_action = Action(**ser_action_dict)
new_action = Action(**ser_action_dict, context=context)
assert new_action.name == "test"
assert isinstance(new_action.llm, type(LLM()))
assert isinstance(new_action.llm, type(context.llm()))
assert len(await new_action._aask("who are you")) > 0

View file

@ -9,16 +9,20 @@ from metagpt.roles.architect import Architect
@pytest.mark.asyncio
async def test_architect_serdeser():
role = Architect()
async def test_architect_serdeser(context):
role = Architect(context=context)
ser_role_dict = role.model_dump(by_alias=True)
assert "name" in ser_role_dict
assert "states" in ser_role_dict
assert "actions" in ser_role_dict
new_role = Architect(**ser_role_dict)
new_role = Architect(**ser_role_dict, context=context)
assert new_role.name == "Bob"
assert len(new_role.actions) == 1
assert len(new_role.rc.watch) == 1
assert isinstance(new_role.actions[0], Action)
await new_role.actions[0].run(with_messages="write a cli snake game")
if __name__ == "__main__":
pytest.main([__file__, "-s"])

View file

@ -18,20 +18,20 @@ from tests.metagpt.serialize_deserialize.test_serdeser_base import (
)
def test_env_serdeser():
env = Environment()
def test_env_serdeser(context):
env = Environment(context=context)
env.publish_message(message=Message(content="test env serialize"))
ser_env_dict = env.model_dump()
assert "roles" in ser_env_dict
assert len(ser_env_dict["roles"]) == 0
new_env = Environment(**ser_env_dict)
new_env = Environment(**ser_env_dict, context=context)
assert len(new_env.roles) == 0
assert len(new_env.history) == 25
def test_environment_serdeser():
def test_environment_serdeser(context):
out_mapping = {"field1": (list[str], ...)}
out_data = {"field1": ["field1 value1", "field1 value2"]}
ic_obj = ActionNode.create_model_class("prd", out_mapping)
@ -40,7 +40,7 @@ def test_environment_serdeser():
content="prd", instruct_content=ic_obj(**out_data), role="product manager", cause_by=any_to_str(UserRequirement)
)
environment = Environment()
environment = Environment(context=context)
role_c = RoleC()
environment.add_role(role_c)
environment.publish_message(message)
@ -48,7 +48,7 @@ def test_environment_serdeser():
ser_data = environment.model_dump()
assert ser_data["roles"]["Role C"]["name"] == "RoleC"
new_env: Environment = Environment(**ser_data)
new_env: Environment = Environment(**ser_data, context=context)
assert len(new_env.roles) == 1
assert list(new_env.roles.values())[0].states == list(environment.roles.values())[0].states
@ -57,22 +57,22 @@ def test_environment_serdeser():
assert type(list(new_env.roles.values())[0].actions[1]) == ActionRaise
def test_environment_serdeser_v2():
environment = Environment()
def test_environment_serdeser_v2(context):
environment = Environment(context=context)
pm = ProjectManager()
environment.add_role(pm)
ser_data = environment.model_dump()
new_env: Environment = Environment(**ser_data)
new_env: Environment = Environment(**ser_data, context=context)
role = new_env.get_role(pm.profile)
assert isinstance(role, ProjectManager)
assert isinstance(role.actions[0], WriteTasks)
assert isinstance(list(new_env.roles.values())[0].actions[0], WriteTasks)
def test_environment_serdeser_save():
environment = Environment()
def test_environment_serdeser_save(context):
environment = Environment(context=context)
role_c = RoleC()
stg_path = serdeser_path.joinpath("team", "environment")
@ -82,6 +82,6 @@ def test_environment_serdeser_save():
write_json_file(env_path, environment.model_dump())
env_dict = read_json_file(env_path)
new_env: Environment = Environment(**env_dict)
new_env: Environment = Environment(**env_dict, context=context)
assert len(new_env.roles) == 1
assert type(list(new_env.roles.values())[0].actions[0]) == ActionOK

View file

@ -13,7 +13,7 @@ from metagpt.utils.common import any_to_str, read_json_file, write_json_file
from tests.metagpt.serialize_deserialize.test_serdeser_base import serdeser_path
def test_memory_serdeser():
def test_memory_serdeser(context):
msg1 = Message(role="Boss", content="write a snake game", cause_by=UserRequirement)
out_mapping = {"field2": (list[str], ...)}
@ -39,7 +39,7 @@ def test_memory_serdeser():
assert memory.count() == 2
def test_memory_serdeser_save():
def test_memory_serdeser_save(context):
msg1 = Message(role="User", content="write a 2048 game", cause_by=UserRequirement)
out_mapping = {"field1": (list[str], ...)}

View file

@ -8,12 +8,12 @@ from metagpt.actions.prepare_interview import PrepareInterview
@pytest.mark.asyncio
async def test_action_serdeser():
action = PrepareInterview()
async def test_action_serdeser(context):
action = PrepareInterview(context=context)
serialized_data = action.model_dump()
assert serialized_data["name"] == "PrepareInterview"
new_action = PrepareInterview(**serialized_data)
new_action = PrepareInterview(**serialized_data, context=context)
assert new_action.name == "PrepareInterview"
assert type(await new_action.run("python developer")) == ActionNode

View file

@ -10,10 +10,10 @@ from metagpt.schema import Message
@pytest.mark.asyncio
async def test_product_manager_serdeser(new_filename):
role = ProductManager()
async def test_product_manager_serdeser(new_filename, context):
role = ProductManager(context=context)
ser_role_dict = role.model_dump(by_alias=True)
new_role = ProductManager(**ser_role_dict)
new_role = ProductManager(**ser_role_dict, context=context)
assert new_role.name == "Alice"
assert len(new_role.actions) == 2

View file

@ -10,14 +10,14 @@ from metagpt.roles.project_manager import ProjectManager
@pytest.mark.asyncio
async def test_project_manager_serdeser():
role = ProjectManager()
async def test_project_manager_serdeser(context):
role = ProjectManager(context=context)
ser_role_dict = role.model_dump(by_alias=True)
assert "name" in ser_role_dict
assert "states" in ser_role_dict
assert "actions" in ser_role_dict
new_role = ProjectManager(**ser_role_dict)
new_role = ProjectManager(**ser_role_dict, context=context)
assert new_role.name == "Eve"
assert len(new_role.actions) == 1
assert isinstance(new_role.actions[0], Action)

View file

@ -8,13 +8,13 @@ from metagpt.roles.researcher import Researcher
@pytest.mark.asyncio
async def test_tutorial_assistant_serdeser():
role = Researcher()
async def test_tutorial_assistant_serdeser(context):
role = Researcher(context=context)
ser_role_dict = role.model_dump()
assert "name" in ser_role_dict
assert "language" in ser_role_dict
new_role = Researcher(**ser_role_dict)
new_role = Researcher(**ser_role_dict, context=context)
assert new_role.language == "en-us"
assert len(new_role.actions) == 3
assert isinstance(new_role.actions[0], CollectLinks)

View file

@ -26,7 +26,7 @@ from tests.metagpt.serialize_deserialize.test_serdeser_base import (
)
def test_roles():
def test_roles(context):
role_a = RoleA()
assert len(role_a.rc.watch) == 1
role_b = RoleB()
@ -37,7 +37,7 @@ def test_roles():
assert len(role_d.actions) == 1
def test_role_subclasses():
def test_role_subclasses(context):
"""test subclasses of role with same fields in ser&deser"""
class RoleSubClasses(BaseModel):
@ -51,7 +51,7 @@ def test_role_subclasses():
assert isinstance(new_role_subcls.roles[1], RoleB)
def test_role_serialize():
def test_role_serialize(context):
role = Role()
ser_role_dict = role.model_dump()
assert "name" in ser_role_dict
@ -59,7 +59,7 @@ def test_role_serialize():
assert "actions" in ser_role_dict
def test_engineer_serdeser():
def test_engineer_serdeser(context):
role = Engineer()
ser_role_dict = role.model_dump()
assert "name" in ser_role_dict
@ -73,7 +73,7 @@ def test_engineer_serdeser():
assert isinstance(new_role.actions[0], WriteCode)
def test_role_serdeser_save():
def test_role_serdeser_save(context):
shutil.rmtree(serdeser_path.joinpath("team"), ignore_errors=True)
pm = ProductManager()
@ -89,7 +89,7 @@ def test_role_serdeser_save():
@pytest.mark.asyncio
async def test_role_serdeser_interrupt():
async def test_role_serdeser_interrupt(context):
role_c = RoleC()
shutil.rmtree(serdeser_path.joinpath("team"), ignore_errors=True)

View file

@ -21,8 +21,8 @@ from tests.metagpt.serialize_deserialize.test_serdeser_base import (
)
def test_team_deserialize():
company = Team()
def test_team_deserialize(context):
company = Team(context=context)
pm = ProductManager()
arch = Architect()
@ -52,10 +52,10 @@ def mock_team_serialize(self, stg_path: Path = serdeser_path.joinpath("team")):
write_json_file(team_info_path, self.model_dump())
def test_team_serdeser_save(mocker):
def test_team_serdeser_save(mocker, context):
mocker.patch("metagpt.team.Team.serialize", mock_team_serialize)
company = Team()
company = Team(context=context)
company.hire([RoleC()])
stg_path = serdeser_path.joinpath("team")
@ -69,14 +69,14 @@ def test_team_serdeser_save(mocker):
@pytest.mark.asyncio
async def test_team_recover(mocker):
async def test_team_recover(mocker, context):
mocker.patch("metagpt.team.Team.serialize", mock_team_serialize)
idea = "write a snake game"
stg_path = serdeser_path.joinpath("team")
shutil.rmtree(stg_path, ignore_errors=True)
company = Team()
company = Team(context=context)
role_c = RoleC()
company.hire([role_c])
company.run_project(idea)
@ -95,14 +95,14 @@ async def test_team_recover(mocker):
@pytest.mark.asyncio
async def test_team_recover_save(mocker):
async def test_team_recover_save(mocker, context):
mocker.patch("metagpt.team.Team.serialize", mock_team_serialize)
idea = "write a 2048 web game"
stg_path = serdeser_path.joinpath("team")
shutil.rmtree(stg_path, ignore_errors=True)
company = Team()
company = Team(context=context)
role_c = RoleC()
company.hire([role_c])
company.run_project(idea)
@ -121,7 +121,7 @@ async def test_team_recover_save(mocker):
@pytest.mark.asyncio
async def test_team_recover_multi_roles_save(mocker):
async def test_team_recover_multi_roles_save(mocker, context):
mocker.patch("metagpt.team.Team.serialize", mock_team_serialize)
idea = "write a snake game"
@ -131,7 +131,7 @@ async def test_team_recover_multi_roles_save(mocker):
role_a = RoleA()
role_b = RoleB()
company = Team()
company = Team(context=context)
company.hire([role_a, role_b])
company.run_project(idea)
await company.run(n_round=4)

View file

@ -7,7 +7,7 @@ from metagpt.roles.tutorial_assistant import TutorialAssistant
@pytest.mark.asyncio
async def test_tutorial_assistant_serdeser():
async def test_tutorial_assistant_serdeser(context):
role = TutorialAssistant()
ser_role_dict = role.model_dump()
assert "name" in ser_role_dict

View file

@ -9,22 +9,23 @@ from metagpt.actions import WriteCode
from metagpt.schema import CodingContext, Document
def test_write_design_serdeser():
action = WriteCode()
def test_write_design_serdeser(context):
action = WriteCode(context=context)
ser_action_dict = action.model_dump()
assert ser_action_dict["name"] == "WriteCode"
assert "llm" not in ser_action_dict # not export
@pytest.mark.asyncio
async def test_write_code_serdeser():
context = CodingContext(
async def test_write_code_serdeser(context):
context.src_workspace = context.repo.workdir / "srcs"
coding_context = CodingContext(
filename="test_code.py", design_doc=Document(content="write add function to calculate two numbers")
)
doc = Document(content=context.model_dump_json())
action = WriteCode(i_context=doc)
doc = Document(content=coding_context.model_dump_json())
action = WriteCode(i_context=doc, context=context)
serialized_data = action.model_dump()
new_action = WriteCode(**serialized_data)
new_action = WriteCode(**serialized_data, context=context)
assert new_action.name == "WriteCode"
await action.run()

View file

@ -9,22 +9,23 @@ from metagpt.schema import CodingContext, Document
@pytest.mark.asyncio
async def test_write_code_review_serdeser():
async def test_write_code_review_serdeser(context):
context.src_workspace = context.repo.workdir / "srcs"
code_content = """
def div(a: int, b: int = 0):
return a / b
"""
context = CodingContext(
coding_context = CodingContext(
filename="test_op.py",
design_doc=Document(content="divide two numbers"),
code_doc=Document(content=code_content),
)
action = WriteCodeReview(i_context=context)
action = WriteCodeReview(i_context=coding_context)
serialized_data = action.model_dump()
assert serialized_data["name"] == "WriteCodeReview"
new_action = WriteCodeReview(**serialized_data)
new_action = WriteCodeReview(**serialized_data, context=context)
assert new_action.name == "WriteCodeReview"
await new_action.run()

View file

@ -8,24 +8,24 @@ from metagpt.actions import WriteDesign, WriteTasks
@pytest.mark.asyncio
async def test_write_design_serialize():
action = WriteDesign()
async def test_write_design_serialize(context):
action = WriteDesign(context=context)
ser_action_dict = action.model_dump()
assert "name" in ser_action_dict
assert "llm" not in ser_action_dict # not export
new_action = WriteDesign(**ser_action_dict)
new_action = WriteDesign(**ser_action_dict, context=context)
assert new_action.name == "WriteDesign"
await new_action.run(with_messages="write a cli snake game")
@pytest.mark.asyncio
async def test_write_task_serialize():
action = WriteTasks()
async def test_write_task_serialize(context):
action = WriteTasks(context=context)
ser_action_dict = action.model_dump()
assert "name" in ser_action_dict
assert "llm" not in ser_action_dict # not export
new_action = WriteTasks(**ser_action_dict)
new_action = WriteTasks(**ser_action_dict, context=context)
assert new_action.name == "WriteTasks"
await new_action.run(with_messages="write a cli snake game")

View file

@ -29,14 +29,14 @@ class Person:
],
ids=["google", "numpy", "sphinx"],
)
async def test_action_serdeser(style: str, part: str):
action = WriteDocstring()
async def test_action_serdeser(style: str, part: str, context):
action = WriteDocstring(context=context)
serialized_data = action.model_dump()
assert "name" in serialized_data
assert serialized_data["desc"] == "Write docstring for code."
new_action = WriteDocstring(**serialized_data)
new_action = WriteDocstring(**serialized_data, context=context)
assert new_action.name == "WriteDocstring"
assert new_action.desc == "Write docstring for code."

View file

@ -10,13 +10,13 @@ from metagpt.schema import Message
@pytest.mark.asyncio
async def test_action_serdeser(new_filename):
action = WritePRD()
async def test_action_serdeser(new_filename, context):
action = WritePRD(context=context)
ser_action_dict = action.model_dump()
assert "name" in ser_action_dict
assert "llm" not in ser_action_dict # not export
new_action = WritePRD(**ser_action_dict)
new_action = WritePRD(**ser_action_dict, context=context)
assert new_action.name == "WritePRD"
with pytest.raises(FileNotFoundError):
await new_action.run(with_messages=Message(content="write a cli snake game"))

View file

@ -5,7 +5,7 @@ import pytest
from metagpt.actions.action_node import ActionNode
from metagpt.actions.write_review import WriteReview
CONTEXT = """
TEMPLATE_CONTEXT = """
{
"Language": "zh_cn",
"Programming Language": "Python",
@ -42,13 +42,13 @@ CONTEXT = """
@pytest.mark.asyncio
async def test_action_serdeser():
action = WriteReview()
async def test_action_serdeser(context):
action = WriteReview(context=context)
serialized_data = action.model_dump()
assert serialized_data["name"] == "WriteReview"
new_action = WriteReview(**serialized_data)
review = await new_action.run(CONTEXT)
new_action = WriteReview(**serialized_data, context=context)
review = await new_action.run(TEMPLATE_CONTEXT)
assert new_action.name == "WriteReview"
assert type(review) == ActionNode

View file

@ -9,13 +9,13 @@ from metagpt.actions.write_tutorial import WriteContent, WriteDirectory
@pytest.mark.asyncio
@pytest.mark.parametrize(("language", "topic"), [("English", "Write a tutorial about Python")])
async def test_write_directory_serdeser(language: str, topic: str):
action = WriteDirectory()
async def test_write_directory_serdeser(language: str, topic: str, context):
action = WriteDirectory(context=context)
serialized_data = action.model_dump()
assert serialized_data["name"] == "WriteDirectory"
assert serialized_data["language"] == "Chinese"
new_action = WriteDirectory(**serialized_data)
new_action = WriteDirectory(**serialized_data, context=context)
ret = await new_action.run(topic=topic)
assert isinstance(ret, dict)
assert "title" in ret
@ -30,12 +30,12 @@ async def test_write_directory_serdeser(language: str, topic: str):
("language", "topic", "directory"),
[("English", "Write a tutorial about Python", {"Introduction": ["What is Python?", "Why learn Python?"]})],
)
async def test_write_content_serdeser(language: str, topic: str, directory: Dict):
action = WriteContent(language=language, directory=directory)
async def test_write_content_serdeser(language: str, topic: str, directory: Dict, context):
action = WriteContent(language=language, directory=directory, context=context)
serialized_data = action.model_dump()
assert serialized_data["name"] == "WriteContent"
new_action = WriteContent(**serialized_data)
new_action = WriteContent(**serialized_data, context=context)
ret = await new_action.run(topic=topic)
assert isinstance(ret, str)
assert list(directory.keys())[0] in ret

View file

@ -5,11 +5,14 @@
@Author : alexanderwu
@File : test_context_mixin.py
"""
from pathlib import Path
import pytest
from pydantic import BaseModel
from metagpt.actions import Action
from metagpt.config2 import Config
from metagpt.const import CONFIG_ROOT
from metagpt.context_mixin import ContextMixin
from metagpt.environment import Environment
from metagpt.roles import Role
@ -101,7 +104,10 @@ def test_config_mixin_4_multi_inheritance_override_config():
@pytest.mark.asyncio
async def test_config_priority():
"""If action's config is set, then its llm will be set, otherwise, it will use the role's llm"""
home_dir = Path.home() / CONFIG_ROOT
gpt4t = Config.from_home("gpt-4-1106-preview.yaml")
if not home_dir.exists():
assert gpt4t is None
gpt35 = Config.default()
gpt4 = Config.default()
gpt4.llm.model = "gpt-4-0613"
@ -120,7 +126,7 @@ async def test_config_priority():
env = Environment(desc="US election live broadcast")
Team(investment=10.0, env=env, roles=[A, B, C])
assert a1.llm.model == "gpt-4-1106-preview"
assert a1.llm.model == "gpt-4-1106-preview" if Path(home_dir / "gpt-4-1106-preview.yaml").exists() else "gpt-4-0613"
assert a2.llm.model == "gpt-4-0613"
assert a3.llm.model == "gpt-3.5-turbo-1106"

View file

@ -12,14 +12,12 @@ from pathlib import Path
import pytest
import requests
from metagpt.context import CONTEXT
@pytest.mark.asyncio
async def test_oas2_svc():
async def test_oas2_svc(context):
workdir = Path(__file__).parent.parent.parent.parent
script_pathname = workdir / "metagpt/tools/metagpt_oas3_api_svc.py"
env = CONTEXT.new_environ()
env = context.new_environ()
env["PYTHONPATH"] = str(workdir) + ":" + env.get("PYTHONPATH", "")
process = subprocess.Popen(["python", str(script_pathname)], cwd=str(workdir), env=env)
await asyncio.sleep(5)

View file

@ -12,14 +12,12 @@ from pathlib import Path
import pytest
import requests
from metagpt.context import CONTEXT
@pytest.mark.asyncio
async def test_hello():
async def test_hello(context):
workdir = Path(__file__).parent.parent.parent.parent
script_pathname = workdir / "metagpt/tools/openapi_v3_hello.py"
env = CONTEXT.new_environ()
env = context.new_environ()
env["PYTHONPATH"] = str(workdir) + ":" + env.get("PYTHONPATH", "")
process = subprocess.Popen(["python", str(script_pathname)], cwd=workdir, env=env)
await asyncio.sleep(5)

View file

@ -8,20 +8,19 @@
import pytest
from metagpt.context import CONTEXT
from metagpt.utils.common import check_cmd_exists
from metagpt.utils.mermaid import MMC1, mermaid_to_file
@pytest.mark.asyncio
@pytest.mark.parametrize("engine", ["nodejs", "ink"]) # TODO: playwright and pyppeteer
async def test_mermaid(engine):
async def test_mermaid(engine, context):
# nodejs prerequisites: npm install -g @mermaid-js/mermaid-cli
# ink prerequisites: connected to internet
# playwright prerequisites: playwright install --with-deps chromium
assert check_cmd_exists("npm") == 0
save_to = CONTEXT.git_repo.workdir / f"{engine}/1"
save_to = context.git_repo.workdir / f"{engine}/1"
await mermaid_to_file(engine, MMC1, save_to)
# ink does not support pdf