diff --git a/metagpt/actions/write_docstring.py b/metagpt/actions/write_docstring.py index 68856c360..728b49fab 100644 --- a/metagpt/actions/write_docstring.py +++ b/metagpt/actions/write_docstring.py @@ -21,7 +21,10 @@ Example: This script uses the 'fire' library to create a command-line interface. It generates docstrings for the given Python code using the specified docstring style and adds them to the code. """ +from __future__ import annotations + import ast +from pathlib import Path from typing import Literal, Optional from pydantic import Field @@ -29,7 +32,7 @@ from pydantic import Field from metagpt.actions.action import Action from metagpt.llm import LLM from metagpt.provider.base_llm import BaseLLM -from metagpt.utils.common import OutputParser +from metagpt.utils.common import OutputParser, aread, awrite from metagpt.utils.pycst import merge_docstring PYTHON_DOCSTRING_SYSTEM = """### Requirements @@ -187,6 +190,16 @@ class WriteDocstring(Action): documented_code = OutputParser.parse_python_code(documented_code) return merge_docstring(code, documented_code) + @staticmethod + async def write_docstring( + filename: str | Path, overwrite: bool = False, style: Literal["google", "numpy", "sphinx"] = "google" + ) -> str: + data = await aread(str(filename)) + code = await WriteDocstring().run(data, style=style) + if overwrite: + await awrite(filename, code) + return code + def _simplify_python_code(code: str) -> None: """Simplifies the given Python code by removing expressions and the last if statement. @@ -207,13 +220,4 @@ def _simplify_python_code(code: str) -> None: if __name__ == "__main__": import fire - async def run(filename: str, overwrite: bool = False, style: Literal["google", "numpy", "sphinx"] = "google"): - with open(filename) as f: - code = f.read() - code = await WriteDocstring().run(code, style=style) - if overwrite: - with open(filename, "w") as f: - f.write(code) - return code - - fire.Fire(run) + fire.Fire(WriteDocstring.write_docstring) diff --git a/tests/data/demo_project/prd.json b/tests/data/demo_project/prd.json new file mode 100644 index 000000000..2dd26b384 --- /dev/null +++ b/tests/data/demo_project/prd.json @@ -0,0 +1 @@ +{"Language": "en_us", "Programming Language": "Python", "Original Requirements": "write a 2048 game", "Project Name": "game_2048", "Product Goals": ["Create an addictive and engaging gaming experience", "Ensure smooth performance and responsiveness", "Offer customizable game settings and features"], "User Stories": ["As a player, I want to be able to play the game on different devices and screen sizes", "As a gamer, I want to be challenged with increasing difficulty levels as I progress", "As a user, I want to be able to undo my last move in the game"], "Competitive Analysis": ["2048 Game by Gabriele Cirulli: Popular and addictive, lacks advanced customization options"], "Competitive Quadrant Chart": "quadrantChart\n title \"Engagement and Customization of 2048 Games\"\n x-axis \"Low Customization\" --> \"High Customization\"\n y-axis \"Low Engagement\" --> \"High Engagement\"\n quadrant-1 \"Enhance Customization\"\n quadrant-2 \"Improve Engagement\"\n quadrant-3 \"Maintain Customization, Enhance Engagement\"\n quadrant-4 \"Highly Engaging and Customizable\"\n \"2048 Game by Gabriele Cirulli\": [0.4, 0.7]\n \"Our Target Product\": [0.6, 0.8]", "Requirement Analysis": "The product should provide an intuitive and seamless gaming experience with customizable features to enhance user engagement.", "Requirement Pool": [["P0", "Implement game logic and user interface"], ["P1", "Incorporate multiple difficulty levels and scoring system"], ["P2", "Integrate customizable game settings and undo feature"]], "UI Design draft": "The UI should have a clean and modern design with intuitive game controls and customizable settings for difficulty levels and game themes.", "Anything UNCLEAR": "..."} \ No newline at end of file diff --git a/tests/metagpt/actions/test_write_docstring.py b/tests/metagpt/actions/test_write_docstring.py index a8a80b36d..a0fc46ebd 100644 --- a/tests/metagpt/actions/test_write_docstring.py +++ b/tests/metagpt/actions/test_write_docstring.py @@ -30,3 +30,13 @@ class Person: async def test_write_docstring(style: str, part: str): ret = await WriteDocstring().run(code, style=style) assert part in ret + + +@pytest.mark.asyncio +async def test_write(): + code = await WriteDocstring.write_docstring(__file__) + assert code + + +if __name__ == "__main__": + pytest.main([__file__, "-s"]) diff --git a/tests/metagpt/actions/test_write_prd_review.py b/tests/metagpt/actions/test_write_prd_review.py index 5077fa465..9b3f0a285 100644 --- a/tests/metagpt/actions/test_write_prd_review.py +++ b/tests/metagpt/actions/test_write_prd_review.py @@ -23,10 +23,14 @@ async def test_write_prd_review(): Timeline: The feature should be ready for testing in 1.5 months. """ - write_prd_review = WritePRDReview("write_prd_review") + write_prd_review = WritePRDReview(name="write_prd_review") prd_review = await write_prd_review.run(prd) # We cannot exactly predict the generated PRD review, but we can check if it is a string and if it is not empty assert isinstance(prd_review, str) assert len(prd_review) > 0 + + +if __name__ == "__main__": + pytest.main([__file__, "-s"]) diff --git a/tests/metagpt/actions/test_write_teaching_plan.py b/tests/metagpt/actions/test_write_teaching_plan.py index 3f25b2167..57a4f5eb0 100644 --- a/tests/metagpt/actions/test_write_teaching_plan.py +++ b/tests/metagpt/actions/test_write_teaching_plan.py @@ -6,53 +6,21 @@ @File : test_write_teaching_plan.py """ -import asyncio -from typing import Optional - -from langchain.llms.base import LLM -from pydantic import BaseModel +import pytest from metagpt.actions.write_teaching_plan import WriteTeachingPlanPart -from metagpt.config import Config -from metagpt.schema import Message -class MockWriteTeachingPlanPart(WriteTeachingPlanPart): - def __init__(self, options, name: str = "", context=None, llm: LLM = None, topic="", language="Chinese"): - super().__init__(options, name, context, llm, topic, language) - - async def _aask(self, prompt: str, system_msgs: Optional[list[str]] = None) -> str: - return f"{WriteTeachingPlanPart.DATA_BEGIN_TAG}\nprompt\n{WriteTeachingPlanPart.DATA_END_TAG}" - - -async def mock_write_teaching_plan_part(): - class Inputs(BaseModel): - input: str - name: str - topic: str - language: str - - inputs = [ - {"input": "AABBCC", "name": "A", "topic": WriteTeachingPlanPart.COURSE_TITLE, "language": "C"}, - {"input": "DDEEFFF", "name": "A1", "topic": "B1", "language": "C1"}, - ] - - for i in inputs: - seed = Inputs(**i) - options = Config().runtime_options - act = MockWriteTeachingPlanPart(options=options, name=seed.name, topic=seed.topic, language=seed.language) - await act.run([Message(content="")]) - assert act.topic == seed.topic - assert str(act) == seed.topic - assert act.name == seed.name - assert act.rsp == "# prompt" if seed.topic == WriteTeachingPlanPart.COURSE_TITLE else "prompt" - - -def test_suite(): - loop = asyncio.get_event_loop() - task = loop.create_task(mock_write_teaching_plan_part()) - loop.run_until_complete(task) +@pytest.mark.asyncio +@pytest.mark.parametrize( + ("topic", "context"), + [("Title", "Lesson 1: Learn to draw an apple."), ("Teaching Content", "Lesson 1: Learn to draw an apple.")], +) +async def test_write_teaching_plan_part(topic, context): + action = WriteTeachingPlanPart(topic=topic, context=context) + rsp = await action.run() + assert rsp if __name__ == "__main__": - test_suite() + pytest.main([__file__, "-s"]) diff --git a/tests/metagpt/learn/test_text_to_image.py b/tests/metagpt/learn/test_text_to_image.py index a6cbc45bf..626945218 100644 --- a/tests/metagpt/learn/test_text_to_image.py +++ b/tests/metagpt/learn/test_text_to_image.py @@ -7,35 +7,26 @@ @Desc : Unit tests. """ -import base64 import pytest -from pydantic import BaseModel +from metagpt.config import CONFIG from metagpt.learn.text_to_image import text_to_image @pytest.mark.asyncio async def test(): - class Input(BaseModel): - input: str - size_type: str + # Prerequisites + assert CONFIG.METAGPT_TEXT_TO_IMAGE_MODEL_URL + assert CONFIG.OPENAI_API_KEY - inputs = [{"input": "Panda emoji", "size_type": "512x512"}] - - for i in inputs: - seed = Input(**i) - base64_data = await text_to_image(seed.input) - assert base64_data != "" - print(f"{seed.input} -> {base64_data}") - flags = ";base64," - assert flags in base64_data - ix = base64_data.find(flags) + len(flags) - declaration = base64_data[0:ix] - assert declaration - data = base64_data[ix:] - assert data - assert base64.b64decode(data, validate=True) + data = await text_to_image("Panda emoji", size_type="512x512") + assert "base64" in data or "http" in data + key = CONFIG.METAGPT_TEXT_TO_IMAGE_MODEL_URL + CONFIG.METAGPT_TEXT_TO_IMAGE_MODEL_URL = None + data = await text_to_image("Panda emoji", size_type="512x512") + assert "base64" in data or "http" in data + CONFIG.METAGPT_TEXT_TO_IMAGE_MODEL_URL = key if __name__ == "__main__": diff --git a/tests/metagpt/provider/test_azure_openai_api.py b/tests/metagpt/provider/test_azure_openai_api.py new file mode 100644 index 000000000..a1f1effeb --- /dev/null +++ b/tests/metagpt/provider/test_azure_openai_api.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +@Time : 2023/12/28 +@Author : mashenquan +@File : test_azure_openai.py +""" +from metagpt.config import CONFIG, LLMProviderEnum +from metagpt.llm import LLM + + +def test_llm(): + # Prerequisites + assert CONFIG.DEPLOYMENT_NAME and CONFIG.DEPLOYMENT_NAME != "YOUR_DEPLOYMENT_NAME" + assert CONFIG.OPENAI_API_KEY and CONFIG.OPENAI_API_KEY != "YOUR_AZURE_API_KEY" + assert CONFIG.OPENAI_API_VERSION + assert CONFIG.OPENAI_BASE_URL + + llm = LLM(provider=LLMProviderEnum.AZURE_OPENAI) + assert llm diff --git a/tests/metagpt/provider/test_metagpt_api.py b/tests/metagpt/provider/test_metagpt_api.py new file mode 100644 index 000000000..1f00cb653 --- /dev/null +++ b/tests/metagpt/provider/test_metagpt_api.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +@Time : 2023/12/28 +@Author : mashenquan +@File : test_metagpt_api.py +""" +from metagpt.config import LLMProviderEnum +from metagpt.llm import LLM + + +def test_llm(): + llm = LLM(provider=LLMProviderEnum.METAGPT) + assert llm diff --git a/tests/metagpt/provider/test_open_llm_api.py b/tests/metagpt/provider/test_open_llm_api.py new file mode 100644 index 000000000..b8be68504 --- /dev/null +++ b/tests/metagpt/provider/test_open_llm_api.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +@Time : 2023/12/28 +@Author : mashenquan +@File : test_open_llm_api.py +""" +from metagpt.config import CONFIG, LLMProviderEnum +from metagpt.llm import LLM +from metagpt.provider.open_llm_api import OpenLLMCostManager + + +def test_llm(): + llm = LLM(provider=LLMProviderEnum.OPEN_LLM) + assert llm + + +def test_cost(): + # Prerequisites + CONFIG.max_budget = 10 + + cost = OpenLLMCostManager() + cost.update_cost(prompt_tokens=10, completion_tokens=1, model="gpt-35-turbo") + assert cost.get_total_prompt_tokens() > 0 + assert cost.get_total_completion_tokens() > 0 diff --git a/tests/metagpt/utils/test_s3.py b/tests/metagpt/utils/test_s3.py index e4154b957..0a654f2da 100644 --- a/tests/metagpt/utils/test_s3.py +++ b/tests/metagpt/utils/test_s3.py @@ -45,9 +45,11 @@ async def test_s3(): @pytest.mark.asyncio async def test_s3_no_error(): conn = S3() + key = conn.auth_config["aws_secret_access_key"] conn.auth_config["aws_secret_access_key"] = "" res = await conn.cache("ABC", ".bak", "script") assert not res + conn.auth_config["aws_secret_access_key"] = key if __name__ == "__main__":