diff --git a/config/config.yaml b/config/config.yaml index ff1ae769d..3aeabf251 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -105,15 +105,15 @@ PROMPT_FORMAT: json #json or markdown #METAGPT_TEXT_TO_IMAGE_MODEL: MODEL_URL ### S3 config -S3_ACCESS_KEY: "YOUR_S3_ACCESS_KEY" -S3_SECRET_KEY: "YOUR_S3_SECRET_KEY" -S3_ENDPOINT_URL: "YOUR_S3_ENDPOINT_URL" -S3_SECURE: true # true/false -S3_BUCKET: "YOUR_S3_BUCKET" +#S3_ACCESS_KEY: "YOUR_S3_ACCESS_KEY" +#S3_SECRET_KEY: "YOUR_S3_SECRET_KEY" +#S3_ENDPOINT_URL: "YOUR_S3_ENDPOINT_URL" +#S3_SECURE: true # true/false +#S3_BUCKET: "YOUR_S3_BUCKET" ### Redis config -REDIS_HOST: "YOUR_REDIS_HOST" -REDIS_PORT: "YOUR_REDIS_PORT" -REDIS_PASSWORD: "YOUR_REDIS_PASSWORD" -REDIS_DB: "YOUR_REDIS_DB_INDEX, str, 0-based" +#REDIS_HOST: "YOUR_REDIS_HOST" +#REDIS_PORT: "YOUR_REDIS_PORT" +#REDIS_PASSWORD: "YOUR_REDIS_PASSWORD" +#REDIS_DB: "YOUR_REDIS_DB_INDEX, str, 0-based" diff --git a/examples/search_kb.py b/examples/search_kb.py index c2ded1769..85d99854e 100644 --- a/examples/search_kb.py +++ b/examples/search_kb.py @@ -5,14 +5,8 @@ @Modified By: mashenquan, 2023-8-9, fix-bug: cannot find metagpt module. """ import asyncio -<<<<<<< HEAD from metagpt.actions import Action -======= -from pathlib import Path -import sys -sys.path.append(str(Path(__file__).resolve().parent.parent)) ->>>>>>> send18/dev from metagpt.const import DATA_PATH from metagpt.document_store import FaissStore from metagpt.logs import logger diff --git a/examples/search_with_specific_engine.py b/examples/search_with_specific_engine.py index c7c455b7e..97db1624a 100644 --- a/examples/search_with_specific_engine.py +++ b/examples/search_with_specific_engine.py @@ -4,9 +4,7 @@ @Modified By: mashenquan, 2023-8-9, fix-bug: cannot find metagpt module. """ import asyncio -from pathlib import Path -import sys -sys.path.append(str(Path(__file__).resolve().parent.parent)) + from metagpt.roles import Searcher from metagpt.tools import SearchEngineType diff --git a/examples/write_teaching_plan.py b/examples/write_teaching_plan.py index c3a647b94..01181dc2b 100644 --- a/examples/write_teaching_plan.py +++ b/examples/write_teaching_plan.py @@ -15,14 +15,15 @@ import asyncio from pathlib import Path -from metagpt.config import CONFIG - import aiofiles import fire -from metagpt.logs import logger + from metagpt.actions.write_teaching_plan import TeachingPlanRequirement +from metagpt.config import CONFIG +from metagpt.logs import logger from metagpt.roles.teacher import Teacher -from metagpt.software_company import SoftwareCompany +from metagpt.schema import Message +from metagpt.team import Team async def startup(lesson_file: str, investment: float = 3.0, n_round: int = 1, *args, **kwargs): @@ -82,10 +83,10 @@ async def startup(lesson_file: str, investment: float = 3.0, n_round: int = 1, * logger.info("No course content provided, using the demo course.") lesson = demo_lesson - company = SoftwareCompany() + company = Team() company.hire([Teacher(*args, **kwargs)]) company.invest(investment) - company.start_project(lesson, cause_by=TeachingPlanRequirement, role="Teacher", **kwargs) + company.env.publish_message(Message(content=lesson, cause_by=TeachingPlanRequirement)) await company.run(n_round=1) @@ -102,7 +103,7 @@ def main(idea: str, investment: float = 3.0, n_round: int = 5, *args, **kwargs): asyncio.run(startup(idea, investment, n_round, *args, **kwargs)) -if __name__ == '__main__': +if __name__ == "__main__": """ Formats: ``` diff --git a/metagpt/__init__.py b/metagpt/__init__.py index aa1965e31..71ddd1aff 100644 --- a/metagpt/__init__.py +++ b/metagpt/__init__.py @@ -1,22 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -<<<<<<< HEAD # @Time : 2023/4/24 22:26 # @Author : alexanderwu # @File : __init__.py from metagpt import _compat as _ # noqa: F401 -======= -""" -@Time : 2023/4/24 22:26 -@Author : alexanderwu -@File : __init__.py -@Desc : mashenquan, 2023/8/22. Add `Message` for importing by external projects. -""" - -from metagpt.schema import Message - -__all__ = [ - "Message", -] ->>>>>>> send18/dev diff --git a/metagpt/actions/action.py b/metagpt/actions/action.py index 442004e09..2b4317736 100644 --- a/metagpt/actions/action.py +++ b/metagpt/actions/action.py @@ -8,19 +8,21 @@ @Modified By: mashenquan, 2023/9/8. Replace LLM with LLMFactory """ -import re from __future__ import annotations + +import re from abc import ABC from typing import Optional + from tenacity import retry, stop_after_attempt, wait_random_exponential + from metagpt.actions.action_output import ActionOutput from metagpt.llm import LLM from metagpt.logs import logger -from metagpt.utils.common import OutputParser -from metagpt.utils.custom_decoder import CustomDecoder -from metagpt.logs import logger from metagpt.provider.base_gpt_api import BaseGPTAPI from metagpt.utils.common import OutputParser +from metagpt.utils.custom_decoder import CustomDecoder + class Action(ABC): def __init__(self, name: str = "", context=None, llm: BaseGPTAPI = None): diff --git a/metagpt/actions/action_output.py b/metagpt/actions/action_output.py index 49c7dea2e..87d1c31ff 100644 --- a/metagpt/actions/action_output.py +++ b/metagpt/actions/action_output.py @@ -7,7 +7,7 @@ @Modified By: mashenquan, 2023/8/20. Allow 'instruct_content' to be blank. """ -from typing import Dict, Type, Optional +from typing import Dict, Optional, Type from pydantic import BaseModel, create_model, root_validator, validator @@ -16,7 +16,7 @@ class ActionOutput: content: str instruct_content: Optional[BaseModel] = None - def __init__(self, content: str, instruct_content: BaseModel=None): + def __init__(self, content: str, instruct_content: BaseModel = None): self.content = content self.instruct_content = instruct_content diff --git a/metagpt/actions/design_api.py b/metagpt/actions/design_api.py index bccbc1261..557ebcbbd 100644 --- a/metagpt/actions/design_api.py +++ b/metagpt/actions/design_api.py @@ -4,7 +4,6 @@ @Time : 2023/5/11 19:26 @Author : alexanderwu @File : design_api.py -<<<<<<< HEAD @Modified By: mashenquan, 2023/11/27. 1. According to Section 2.2.3.1 of RFC 135, replace file data in the message with the file name. 2. According to the design in Section 2.2.3.5.3 of RFC 135, add incremental iteration functionality. @@ -23,16 +22,6 @@ from metagpt.const import ( SYSTEM_DESIGN_FILE_REPO, SYSTEM_DESIGN_PDF_FILE_REPO, ) -======= -@Modified By: mashenquan, 2023-8-9, align `run` parameters with the parent :class:`Action` class. -""" -from typing import List - -import aiofiles - -from metagpt.actions import Action -from metagpt.config import CONFIG ->>>>>>> send18/dev from metagpt.logs import logger from metagpt.schema import Document, Documents from metagpt.utils.file_repository import FileRepository @@ -208,7 +197,6 @@ class WriteDesign(Action): "clearly and in detail." ) -<<<<<<< HEAD async def run(self, with_messages, format=CONFIG.prompt_format): # Use `git diff` to identify which PRD documents have been modified in the `docs/prds` directory. prds_file_repo = CONFIG.git_repo.new_file_repository(PRDS_FILE_REPO) @@ -244,30 +232,6 @@ class WriteDesign(Action): format_example = format_example.format(project_name=CONFIG.project_name) prompt = prompt_template.format(context=context, format_example=format_example) system_design = await self._aask_v1(prompt, "system_design", OUTPUT_MAPPING, format=format) -======= - async def _save_system_design(self, docs_path, resources_path, content): - data_api_design = CodeParser.parse_code(block="Data structures and interface definitions", text=content) - seq_flow = CodeParser.parse_code(block="Program call flow", text=content) - await mermaid_to_file(data_api_design, resources_path / "data_api_design") - await mermaid_to_file(seq_flow, resources_path / "seq_flow") - system_design_file = docs_path / "system_design.md" - logger.info(f"Saving System Designs to {system_design_file}") - async with aiofiles.open(system_design_file, "w") as f: - await f.write(content) - - async def _save(self, system_design: str): - workspace = CONFIG.workspace - docs_path = workspace / "docs" - resources_path = workspace / "resources" - docs_path.mkdir(parents=True, exist_ok=True) - resources_path.mkdir(parents=True, exist_ok=True) - await self._save_system_design(docs_path, resources_path, system_design) - - async def run(self, context, **kwargs): - prompt = PROMPT_TEMPLATE.format(context=context, format_example=FORMAT_EXAMPLE) - system_design = await self._aask_v1(prompt, "system_design", OUTPUT_MAPPING) - await self._save(system_design.content) ->>>>>>> send18/dev return system_design async def _merge(self, prd_doc, system_design_doc, format=CONFIG.prompt_format): diff --git a/metagpt/actions/project_management.py b/metagpt/actions/project_management.py index 53ef872e2..40965ab5c 100644 --- a/metagpt/actions/project_management.py +++ b/metagpt/actions/project_management.py @@ -4,19 +4,14 @@ @Time : 2023/5/11 19:12 @Author : alexanderwu @File : project_management.py -<<<<<<< HEAD @Modified By: mashenquan, 2023/11/27. 1. Divide the context into three components: legacy code, unit test code, and console log. 2. Move the document storage operations related to WritePRD from the save operation of WriteDesign. 3. According to the design in Section 2.2.3.5.4 of RFC 135, add incremental iteration functionality. -======= -@Modified By: mashenquan, 2023-8-9, align `run` parameters with the parent :class:`Action` class. ->>>>>>> send18/dev """ import json from typing import List -<<<<<<< HEAD from metagpt.actions import ActionOutput from metagpt.actions.action import Action from metagpt.config import CONFIG @@ -91,14 +86,6 @@ and only output the json inside this tag, nothing else }, "markdown": { "PROMPT_TEMPLATE": """ -======= -import aiofiles - -from metagpt.actions.action import Action -from metagpt.config import CONFIG - -PROMPT_TEMPLATE = """ ->>>>>>> send18/dev # Context {context} @@ -121,11 +108,7 @@ Attention: Use '##' to split sections, not '#', and '## ' SHOULD W ## Shared Knowledge: Anything that should be public like utils' functions, config's variables details that should make clear first. -<<<<<<< HEAD ## Anything UNCLEAR: Provide as Plain text. Try to clarify it. For example, don't forget a main entry. don't forget to init 3rd party libs. -======= -""" ->>>>>>> send18/dev """, "FORMAT_EXAMPLE": ''' @@ -197,7 +180,6 @@ MERGE_PROMPT = """ # Context {context} -<<<<<<< HEAD ## Old Tasks {old_tasks} ----- @@ -228,13 +210,10 @@ and only output the json inside this tag, nothing else """ -======= ->>>>>>> send18/dev class WriteTasks(Action): def __init__(self, name="CreateTasks", context=None, llm=None): super().__init__(name, context, llm) -<<<<<<< HEAD async def run(self, with_messages, format=CONFIG.prompt_format): system_design_file_repo = CONFIG.git_repo.new_file_repository(SYSTEM_DESIGN_FILE_REPO) changed_system_designs = system_design_file_repo.changed_files @@ -286,29 +265,13 @@ class WriteTasks(Action): prompt_template, format_example = get_template(templates, format) prompt = prompt_template.format(context=context, format_example=format_example) rsp = await self._aask_v1(prompt, "task", OUTPUT_MAPPING, format=format) -======= - async def _save(self, rsp): - file_path = CONFIG.workspace / "docs/api_spec_and_tasks.md" - async with aiofiles.open(file_path, "w") as f: - await f.write(rsp.content) - - # Write requirements.txt - requirements_path = CONFIG.workspace / "requirements.txt" - - async with aiofiles.open(requirements_path, "w") as f: - await f.write(rsp.instruct_content.dict().get("Required Python third-party packages").strip('"\n')) - - async def run(self, context, **kwargs): - prompt = PROMPT_TEMPLATE.format(context=context, format_example=FORMAT_EXAMPLE) - rsp = await self._aask_v1(prompt, "task", OUTPUT_MAPPING) - await self._save(rsp) ->>>>>>> send18/dev return rsp async def _merge(self, system_design_doc, task_doc, format=CONFIG.prompt_format) -> Document: _, format_example = get_template(templates, format) - prompt = MERGE_PROMPT.format(context=system_design_doc.content, old_tasks=task_doc.content, - format_example=format_example) + prompt = MERGE_PROMPT.format( + context=system_design_doc.content, old_tasks=task_doc.content, format_example=format_example + ) rsp = await self._aask_v1(prompt, "task", OUTPUT_MAPPING, format=format) task_doc.content = rsp.instruct_content.json(ensure_ascii=False) return task_doc diff --git a/metagpt/actions/write_code.py b/metagpt/actions/write_code.py index b61e3886c..a2501db2a 100644 --- a/metagpt/actions/write_code.py +++ b/metagpt/actions/write_code.py @@ -14,27 +14,23 @@ 3. Encapsulate the input of RunCode into RunCodeContext and encapsulate the output of RunCode into RunCodeResult to standardize and unify parameter passing between WriteCode, RunCode, and DebugError. """ -<<<<<<< HEAD import json from tenacity import retry, stop_after_attempt, wait_random_exponential from metagpt.actions.action import Action from metagpt.config import CONFIG -from metagpt.const import CODE_SUMMARIES_FILE_REPO, TEST_OUTPUTS_FILE_REPO, TASK_FILE_REPO, BUGFIX_FILENAME, \ - DOCS_FILE_REPO -======= -from tenacity import retry, stop_after_attempt, wait_fixed - -from metagpt.actions.action import Action ->>>>>>> send18/dev +from metagpt.const import ( + BUGFIX_FILENAME, + CODE_SUMMARIES_FILE_REPO, + DOCS_FILE_REPO, + TASK_FILE_REPO, + TEST_OUTPUTS_FILE_REPO, +) from metagpt.logs import logger from metagpt.schema import CodingContext, Document, RunCodeResult from metagpt.utils.common import CodeParser -<<<<<<< HEAD from metagpt.utils.file_repository import FileRepository -======= ->>>>>>> send18/dev PROMPT_TEMPLATE = """ NOTICE @@ -98,21 +94,12 @@ class WriteCode(Action): def __init__(self, name="WriteCode", context=None, llm=None): super().__init__(name, context, llm) -<<<<<<< HEAD @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6)) async def write_code(self, prompt) -> str: -======= - def _is_invalid(self, filename): - return any(i in filename for i in ["mp3", "wav"]) - - @retry(stop=stop_after_attempt(2), wait=wait_fixed(1)) - async def write_code(self, prompt): ->>>>>>> send18/dev code_rsp = await self._aask(prompt) code = CodeParser.parse_code(block="", text=code_rsp) return code -<<<<<<< HEAD async def run(self, *args, **kwargs) -> CodingContext: bug_feedback = await FileRepository.get_file(filename=BUGFIX_FILENAME, relative_path=DOCS_FILE_REPO) coding_context = CodingContext.loads(self.context.content) @@ -139,11 +126,6 @@ class WriteCode(Action): summary_log=summary_doc.content if summary_doc else "", ) logger.info(f"Writing {coding_context.filename}..") -======= - async def run(self, context, filename): - prompt = PROMPT_TEMPLATE.format(context=context, filename=filename) - logger.info(f"Writing {filename}..") ->>>>>>> send18/dev code = await self.write_code(prompt) if not coding_context.code_doc: coding_context.code_doc = Document(filename=coding_context.filename, root_path=CONFIG.src_workspace) @@ -166,4 +148,3 @@ class WriteCode(Action): continue codes.append(doc.content) return "\n----------\n".join(codes) - diff --git a/metagpt/actions/write_prd.py b/metagpt/actions/write_prd.py index d8042b3ed..9aacb0751 100644 --- a/metagpt/actions/write_prd.py +++ b/metagpt/actions/write_prd.py @@ -16,22 +16,20 @@ import json from pathlib import Path from typing import List -import aiofiles - from metagpt.actions import Action, ActionOutput from metagpt.actions.fix_bug import FixBug from metagpt.actions.search_and_summarize import SearchAndSummarize from metagpt.config import CONFIG -<<<<<<< HEAD from metagpt.const import ( + BUGFIX_FILENAME, COMPETITIVE_ANALYSIS_FILE_REPO, DOCS_FILE_REPO, PRD_PDF_FILE_REPO, PRDS_FILE_REPO, - REQUIREMENT_FILENAME, BUGFIX_FILENAME, + REQUIREMENT_FILENAME, ) from metagpt.logs import logger -from metagpt.schema import Document, Documents, Message, BugFixContext +from metagpt.schema import BugFixContext, Document, Documents, Message from metagpt.utils.common import CodeParser from metagpt.utils.file_repository import FileRepository from metagpt.utils.get_template import get_template @@ -55,11 +53,6 @@ Requirements: According to the context, fill in the following missing informatio ATTENTION: Output carefully referenced "Format example" in format. ## YOU NEED TO FULFILL THE BELOW JSON DOC -======= -from metagpt.logs import logger -from metagpt.utils.common import CodeParser -from metagpt.utils.mermaid import mermaid_to_file ->>>>>>> send18/dev {{ "Language": "", # str, use the same language as the user requirement. en_us / zh_cn etc. @@ -245,11 +238,7 @@ OUTPUT_MAPPING = { "Competitive Analysis": (List[str], ...), "Competitive Quadrant Chart": (str, ...), "Requirement Analysis": (str, ...), -<<<<<<< HEAD "Requirement Pool": (List[List[str]], ...), -======= - "Requirement Pool": (List[Tuple[str, str]], ...), ->>>>>>> send18/dev "UI Design draft": (str, ...), "Anything UNCLEAR": (str, ...), } @@ -346,12 +335,14 @@ class WritePRD(Action): await docs_file_repo.save(filename=BUGFIX_FILENAME, content=requirement_doc.content) await docs_file_repo.save(filename=REQUIREMENT_FILENAME, content="") bug_fix = BugFixContext(filename=BUGFIX_FILENAME) - return Message(content=bug_fix.json(), instruct_content=bug_fix, - role=self.profile, - cause_by=FixBug, - sent_from=self, - send_to="Alex", # the name of Engineer - ) + return Message( + content=bug_fix.json(), + instruct_content=bug_fix, + role=self.profile, + cause_by=FixBug, + sent_from=self, + send_to="Alex", # the name of Engineer + ) else: await docs_file_repo.delete(filename=BUGFIX_FILENAME) @@ -388,7 +379,6 @@ class WritePRD(Action): logger.info(sas.result) logger.info(rsp) -<<<<<<< HEAD # logger.info(format) prompt_template, format_example = get_template(templates, format) project_name = CONFIG.project_name if CONFIG.project_name else "" @@ -447,7 +437,7 @@ class WritePRD(Action): if not quadrant_chart: return pathname = ( - CONFIG.git_repo.workdir / Path(COMPETITIVE_ANALYSIS_FILE_REPO) / Path(prd_doc.filename).with_suffix("") + CONFIG.git_repo.workdir / Path(COMPETITIVE_ANALYSIS_FILE_REPO) / Path(prd_doc.filename).with_suffix("") ) if not pathname.parent.exists(): pathname.parent.mkdir(parents=True, exist_ok=True) @@ -480,33 +470,3 @@ class WritePRD(Action): if "YES" in res: return True return False -======= - prompt = PROMPT_TEMPLATE.format( - requirements=requirements, search_information=info, format_example=FORMAT_EXAMPLE - ) - logger.debug(prompt) - prd = await self._aask_v1(prompt, "prd", OUTPUT_MAPPING) - - await self._save(prd.content) - return prd - - async def _save_prd(self, docs_path, resources_path, prd): - prd_file = docs_path / "prd.md" - quadrant_chart = CodeParser.parse_code(block="Competitive Quadrant Chart", text=prd) - await mermaid_to_file( - mermaid_code=quadrant_chart, output_file_without_suffix=resources_path / "competitive_analysis" - ) - async with aiofiles.open(prd_file, "w") as f: - await f.write(prd) - logger.info(f"Saving PRD to {prd_file}") - - async def _save(self, prd): - workspace = CONFIG.workspace - workspace.mkdir(parents=True, exist_ok=True) - - docs_path = workspace / "docs" - resources_path = workspace / "resources" - docs_path.mkdir(parents=True, exist_ok=True) - resources_path.mkdir(parents=True, exist_ok=True) - await self._save_prd(docs_path, resources_path, prd) ->>>>>>> send18/dev diff --git a/metagpt/actions/write_teaching_plan.py b/metagpt/actions/write_teaching_plan.py index 7c959ce85..529c563db 100644 --- a/metagpt/actions/write_teaching_plan.py +++ b/metagpt/actions/write_teaching_plan.py @@ -5,9 +5,10 @@ @Author : mashenquan @File : write_teaching_plan.py """ -from metagpt.logs import logger from metagpt.actions import Action +from metagpt.logs import logger from metagpt.schema import Message +from metagpt.utils.common import format_value class TeachingPlanRequirement(Action): @@ -40,17 +41,18 @@ class WriteTeachingPlanPart(Action): statement_patterns = self.TOPIC_STATEMENTS.get(self.topic, []) statements = [] - from metagpt.roles import Role for p in statement_patterns: - s = Role.format_value(p) + s = format_value(p) statements.append(s) formatter = self.PROMPT_TITLE_TEMPLATE if self.topic == self.COURSE_TITLE else self.PROMPT_TEMPLATE - prompt = formatter.format(formation=self.FORMATION, - role=self.prefix, - statements="\n".join(statements), - lesson=messages[0].content, - topic=self.topic, - language=self.language) + prompt = formatter.format( + formation=self.FORMATION, + role=self.prefix, + statements="\n".join(statements), + lesson=messages[0].content, + topic=self.topic, + language=self.language, + ) logger.debug(prompt) rsp = await self._aask(prompt=prompt) @@ -61,14 +63,14 @@ class WriteTeachingPlanPart(Action): def _set_result(self, rsp): if self.DATA_BEGIN_TAG in rsp: ix = rsp.index(self.DATA_BEGIN_TAG) - rsp = rsp[ix + len(self.DATA_BEGIN_TAG):] + rsp = rsp[ix + len(self.DATA_BEGIN_TAG) :] if self.DATA_END_TAG in rsp: ix = rsp.index(self.DATA_END_TAG) rsp = rsp[0:ix] self.rsp = rsp.strip() if self.topic != self.COURSE_TITLE: return - if '#' not in self.rsp or self.rsp.index('#') != 0: + if "#" not in self.rsp or self.rsp.index("#") != 0: self.rsp = "# " + self.rsp def __str__(self): @@ -79,81 +81,102 @@ class WriteTeachingPlanPart(Action): """Show `topic` value when debug""" return self.topic - FORMATION = "\"Capacity and role\" defines the role you are currently playing;\n" \ - "\t\"[LESSON_BEGIN]\" and \"[LESSON_END]\" tags enclose the content of textbook;\n" \ - "\t\"Statement\" defines the work detail you need to complete at this stage;\n" \ - "\t\"Answer options\" defines the format requirements for your responses;\n" \ - "\t\"Constraint\" defines the conditions that your responses must comply with." + FORMATION = ( + '"Capacity and role" defines the role you are currently playing;\n' + '\t"[LESSON_BEGIN]" and "[LESSON_END]" tags enclose the content of textbook;\n' + '\t"Statement" defines the work detail you need to complete at this stage;\n' + '\t"Answer options" defines the format requirements for your responses;\n' + '\t"Constraint" defines the conditions that your responses must comply with.' + ) COURSE_TITLE = "Title" TOPICS = [ - COURSE_TITLE, "Teaching Hours", "Teaching Objectives", "Teaching Content", - "Teaching Methods and Strategies", "Learning Activities", - "Teaching Time Allocation", "Assessment and Feedback", "Teaching Summary and Improvement", - "Vocabulary Cloze", "Choice Questions", "Grammar Questions", "Translation Questions" + COURSE_TITLE, + "Teaching Hours", + "Teaching Objectives", + "Teaching Content", + "Teaching Methods and Strategies", + "Learning Activities", + "Teaching Time Allocation", + "Assessment and Feedback", + "Teaching Summary and Improvement", + "Vocabulary Cloze", + "Choice Questions", + "Grammar Questions", + "Translation Questions", ] TOPIC_STATEMENTS = { - COURSE_TITLE: ["Statement: Find and return the title of the lesson only in markdown first-level header format, " - "without anything else."], + COURSE_TITLE: [ + "Statement: Find and return the title of the lesson only in markdown first-level header format, " + "without anything else." + ], "Teaching Content": [ - "Statement: \"Teaching Content\" must include vocabulary, analysis, and examples of various grammar " + 'Statement: "Teaching Content" must include vocabulary, analysis, and examples of various grammar ' "structures that appear in the textbook, as well as the listening materials and key points.", - "Statement: \"Teaching Content\" must include more examples."], + 'Statement: "Teaching Content" must include more examples.', + ], "Teaching Time Allocation": [ - "Statement: \"Teaching Time Allocation\" must include how much time is allocated to each " - "part of the textbook content."], + 'Statement: "Teaching Time Allocation" must include how much time is allocated to each ' + "part of the textbook content." + ], "Teaching Methods and Strategies": [ - "Statement: \"Teaching Methods and Strategies\" must include teaching focus, difficulties, materials, " + 'Statement: "Teaching Methods and Strategies" must include teaching focus, difficulties, materials, ' "procedures, in detail." ], "Vocabulary Cloze": [ - "Statement: Based on the content of the textbook enclosed by \"[LESSON_BEGIN]\" and \"[LESSON_END]\", " + 'Statement: Based on the content of the textbook enclosed by "[LESSON_BEGIN]" and "[LESSON_END]", ' "create vocabulary cloze. The cloze should include 10 {language} questions with {teaching_language} " "answers, and it should also include 10 {teaching_language} questions with {language} answers. " "The key-related vocabulary and phrases in the textbook content must all be included in the exercises.", ], "Grammar Questions": [ - "Statement: Based on the content of the textbook enclosed by \"[LESSON_BEGIN]\" and \"[LESSON_END]\", " - "create grammar questions. 10 questions."], + 'Statement: Based on the content of the textbook enclosed by "[LESSON_BEGIN]" and "[LESSON_END]", ' + "create grammar questions. 10 questions." + ], "Choice Questions": [ - "Statement: Based on the content of the textbook enclosed by \"[LESSON_BEGIN]\" and \"[LESSON_END]\", " - "create choice questions. 10 questions."], + 'Statement: Based on the content of the textbook enclosed by "[LESSON_BEGIN]" and "[LESSON_END]", ' + "create choice questions. 10 questions." + ], "Translation Questions": [ - "Statement: Based on the content of the textbook enclosed by \"[LESSON_BEGIN]\" and \"[LESSON_END]\", " + 'Statement: Based on the content of the textbook enclosed by "[LESSON_BEGIN]" and "[LESSON_END]", ' "create translation questions. The translation should include 10 {language} questions with " "{teaching_language} answers, and it should also include 10 {teaching_language} questions with " "{language} answers." - ] + ], } # Teaching plan title - PROMPT_TITLE_TEMPLATE = "Do not refer to the context of the previous conversation records, " \ - "start the conversation anew.\n\n" \ - "Formation: {formation}\n\n" \ - "{statements}\n" \ - "Constraint: Writing in {language}.\n" \ - "Answer options: Encloses the lesson title with \"[TEACHING_PLAN_BEGIN]\" " \ - "and \"[TEACHING_PLAN_END]\" tags.\n" \ - "[LESSON_BEGIN]\n" \ - "{lesson}\n" \ - "[LESSON_END]" + PROMPT_TITLE_TEMPLATE = ( + "Do not refer to the context of the previous conversation records, " + "start the conversation anew.\n\n" + "Formation: {formation}\n\n" + "{statements}\n" + "Constraint: Writing in {language}.\n" + 'Answer options: Encloses the lesson title with "[TEACHING_PLAN_BEGIN]" ' + 'and "[TEACHING_PLAN_END]" tags.\n' + "[LESSON_BEGIN]\n" + "{lesson}\n" + "[LESSON_END]" + ) # Teaching plan parts: - PROMPT_TEMPLATE = "Do not refer to the context of the previous conversation records, " \ - "start the conversation anew.\n\n" \ - "Formation: {formation}\n\n" \ - "Capacity and role: {role}\n" \ - "Statement: Write the \"{topic}\" part of teaching plan, " \ - "WITHOUT ANY content unrelated to \"{topic}\"!!\n" \ - "{statements}\n" \ - "Answer options: Enclose the teaching plan content with \"[TEACHING_PLAN_BEGIN]\" " \ - "and \"[TEACHING_PLAN_END]\" tags.\n" \ - "Answer options: Using proper markdown format from second-level header format.\n" \ - "Constraint: Writing in {language}.\n" \ - "[LESSON_BEGIN]\n" \ - "{lesson}\n" \ - "[LESSON_END]" + PROMPT_TEMPLATE = ( + "Do not refer to the context of the previous conversation records, " + "start the conversation anew.\n\n" + "Formation: {formation}\n\n" + "Capacity and role: {role}\n" + 'Statement: Write the "{topic}" part of teaching plan, ' + 'WITHOUT ANY content unrelated to "{topic}"!!\n' + "{statements}\n" + 'Answer options: Enclose the teaching plan content with "[TEACHING_PLAN_BEGIN]" ' + 'and "[TEACHING_PLAN_END]" tags.\n' + "Answer options: Using proper markdown format from second-level header format.\n" + "Constraint: Writing in {language}.\n" + "[LESSON_BEGIN]\n" + "{lesson}\n" + "[LESSON_END]" + ) DATA_BEGIN_TAG = "[TEACHING_PLAN_BEGIN]" DATA_END_TAG = "[TEACHING_PLAN_END]" diff --git a/metagpt/config.py b/metagpt/config.py index d3123b1f7..92980ec4e 100644 --- a/metagpt/config.py +++ b/metagpt/config.py @@ -13,7 +13,9 @@ from copy import deepcopy from pathlib import Path from typing import Any from uuid import uuid4 + import yaml + from metagpt.const import DEFAULT_WORKSPACE_ROOT, METAGPT_ROOT, OPTIONS from metagpt.logs import logger from metagpt.tools import SearchEngineType, WebBrowserEngineType diff --git a/metagpt/const.py b/metagpt/const.py index c2b6c308d..03f3d8fe3 100644 --- a/metagpt/const.py +++ b/metagpt/const.py @@ -12,7 +12,9 @@ import contextvars import os from pathlib import Path + from loguru import logger + import metagpt OPTIONS = contextvars.ContextVar("OPTIONS") @@ -89,6 +91,8 @@ TEST_CODES_FILE_REPO = "tests" TEST_OUTPUTS_FILE_REPO = "test_outputs" CODE_SUMMARIES_FILE_REPO = "docs/code_summaries" CODE_SUMMARIES_PDF_FILE_REPO = "resources/code_summaries" +RESOURCES_FILE_REPO = "resources" +SD_OUTPUT_FILE_REPO = "resources/SD_Output" YAPI_URL = "http://yapi.deepwisdomai.com/" @@ -105,4 +109,3 @@ BASE64_FORMAT = "base64" # REDIS REDIS_KEY = "REDIS_KEY" - diff --git a/metagpt/document_store/faiss_store.py b/metagpt/document_store/faiss_store.py index 65685dffa..7acaa194d 100644 --- a/metagpt/document_store/faiss_store.py +++ b/metagpt/document_store/faiss_store.py @@ -21,18 +21,13 @@ from metagpt.logs import logger class FaissStore(LocalStore): -<<<<<<< HEAD - def __init__(self, raw_data_path: Path, cache_dir=None, meta_col="source", content_col="output"): - self.meta_col = meta_col - self.content_col = content_col - super().__init__(raw_data_path, cache_dir) -======= - def __init__(self, raw_data: Path, cache_dir=None, meta_col="source", content_col="output", embedding_conf=None): + def __init__( + self, raw_data_path: Path, cache_dir=None, meta_col="source", content_col="output", embedding_conf=None + ): self.meta_col = meta_col self.content_col = content_col self.embedding_conf = embedding_conf or {} - super().__init__(raw_data, cache_dir) ->>>>>>> send18/dev + super().__init__(raw_data_path, cache_dir) def _load(self) -> Optional["FaissStore"]: index_file, store_file = self._get_index_and_store_fname() @@ -46,7 +41,9 @@ class FaissStore(LocalStore): return store def _write(self, docs, metadatas): - store = FAISS.from_texts(docs, OpenAIEmbeddings(openai_api_version="2020-11-07", **self.embedding_conf), metadatas=metadatas) + store = FAISS.from_texts( + docs, OpenAIEmbeddings(openai_api_version="2020-11-07", **self.embedding_conf), metadatas=metadatas + ) return store def persist(self): @@ -92,12 +89,6 @@ class FaissStore(LocalStore): if __name__ == "__main__": faiss_store = FaissStore(DATA_PATH / "qcs/qcs_4w.json") -<<<<<<< HEAD logger.info(faiss_store.search("Oily Skin Facial Cleanser")) faiss_store.add([f"Oily Skin Facial Cleanser-{i}" for i in range(3)]) logger.info(faiss_store.search("Oily Skin Facial Cleanser")) -======= - logger.info(faiss_store.search("油皮洗面奶")) - faiss_store.add([f"油皮洗面奶-{i}" for i in range(3)]) - logger.info(faiss_store.search("油皮洗面奶")) ->>>>>>> send18/dev diff --git a/metagpt/llm.py b/metagpt/llm.py index 525d2a65e..7701ebec2 100644 --- a/metagpt/llm.py +++ b/metagpt/llm.py @@ -6,36 +6,19 @@ @File : llm.py @Modified By: mashenquan, 2023 """ -from enum import Enum + from metagpt.config import CONFIG +from metagpt.provider import LLMType from metagpt.provider.anthropic_api import Claude2 as Claude -from metagpt.provider.openai_api import OpenAIGPTAPI -from metagpt.provider.zhipuai_api import ZhiPuAIGPTAPI -from metagpt.provider.spark_api import SparkAPI from metagpt.provider.human_provider import HumanProvider from metagpt.provider.metagpt_llm_api import MetaGPTLLMAPI +from metagpt.provider.openai_api import OpenAIGPTAPI +from metagpt.provider.spark_api import SparkAPI +from metagpt.provider.zhipuai_api import ZhiPuAIGPTAPI _ = HumanProvider() # Avoid pre-commit error -class LLMType(Enum): - OPENAI = "OpenAI" - METAGPT = "MetaGPT" - CLAUDE = "Claude" - UNKNOWN = "UNKNOWN" - - @classmethod - def get(cls, value): - for member in cls: - if member.value == value: - return member - return cls.UNKNOWN - - @classmethod - def __missing__(cls, value): - return cls.UNKNOWN - - # Used in agents class LLMFactory: @staticmethod @@ -62,5 +45,5 @@ class LLMFactory: # Used in metagpt def LLM() -> "BaseGPTAPI": - """ initialize different LLM instance according to the key field existence""" + """initialize different LLM instance according to the key field existence""" return LLMFactory.new_llm() diff --git a/metagpt/management/skill_manager.py b/metagpt/management/skill_manager.py index 33f283680..e4892e3d9 100644 --- a/metagpt/management/skill_manager.py +++ b/metagpt/management/skill_manager.py @@ -18,14 +18,8 @@ class SkillManager: """Used to manage all skills""" def __init__(self): -<<<<<<< HEAD - self._llm = LLM() self._store = ChromaStore("skill_manager") self._skills: dict[str:Skill] = {} -======= - self._store = ChromaStore('skill_manager') - self._skills: dict[str: Skill] = {} ->>>>>>> send18/dev def add_skill(self, skill: Skill): """ diff --git a/metagpt/provider/__init__.py b/metagpt/provider/__init__.py index 9895aa7fc..3517e1376 100644 --- a/metagpt/provider/__init__.py +++ b/metagpt/provider/__init__.py @@ -4,11 +4,23 @@ @Time : 2023/5/5 22:59 @Author : alexanderwu @File : __init__.py -@Modified By: mashenquan, 2023/9/8. Add `MetaGPTLLMAPI` +@Modified By: mashenquan, 2023-12-15. Add LLMType """ - -from metagpt.provider.openai_api import OpenAIGPTAPI -from metagpt.provider.metagpt_llm_api import MetaGPTLLMAPI +from enum import Enum -__all__ = ["OpenAIGPTAPI", "MetaGPTLLMAPI"] +class LLMType(Enum): + OPENAI = "OpenAI" + METAGPT = "MetaGPT" + UNKNOWN = "UNKNOWN" + + @classmethod + def get(cls, value): + for member in cls: + if member.value == value: + return member + return cls.UNKNOWN + + @classmethod + def __missing__(cls, value): + return cls.UNKNOWN diff --git a/metagpt/provider/human_provider.py b/metagpt/provider/human_provider.py index ba9c93c88..5850dd8dc 100644 --- a/metagpt/provider/human_provider.py +++ b/metagpt/provider/human_provider.py @@ -21,11 +21,14 @@ class HumanProvider(BaseGPTAPI): exit() return rsp - async def aask(self, msg: str, + async def aask( + self, + msg: str, system_msgs: Optional[list[str]] = None, format_msgs: Optional[list[dict[str, str]]] = None, generator: bool = False, - timeout=3,) -> str: + timeout=3, + ) -> str: return self.ask(msg, timeout=timeout) def completion(self, messages: list[dict], timeout=3): diff --git a/metagpt/provider/metagpt_llm_api.py b/metagpt/provider/metagpt_llm_api.py index 925ac6623..994fc39ff 100644 --- a/metagpt/provider/metagpt_llm_api.py +++ b/metagpt/provider/metagpt_llm_api.py @@ -7,13 +7,14 @@ """ from metagpt.provider.openai_api import OpenAIGPTAPI + # from metagpt.provider.base_gpt_api import BaseGPTAPI # from metagpt.provider.openai_api import RateLimiter class MetaGPTLLMAPI(OpenAIGPTAPI): """MetaGPT LLM api""" - + def __init__(self): super(MetaGPTLLMAPI, self).__init__() @@ -24,7 +25,7 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # self.auto_max_tokens = False # self._cost_manager = CostManager() # RateLimiter.__init__(self, rpm=self.rpm) - # + # # def __init_openai(self, config): # openai.api_key = config.openai_api_key # if config.openai_api_base: @@ -33,10 +34,10 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # openai.api_type = config.openai_api_type # openai.api_version = config.openai_api_version # self.rpm = int(config.get("RPM", 10)) - # + # # async def _achat_completion_stream(self, messages: list[dict]) -> str: # response = await openai.ChatCompletion.acreate(**self._cons_kwargs(messages), stream=True) - # + # # # create variables to collect the stream of chunks # collected_chunks = [] # collected_messages = [] @@ -50,12 +51,12 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # if "content" in chunk_message: # print(chunk_message["content"], end="") # print() - # + # # full_reply_content = "".join([m.get("content", "") for m in collected_messages]) # usage = self._calc_usage(messages, full_reply_content) # self._update_costs(usage) # return full_reply_content - # + # # def _cons_kwargs(self, messages: list[dict], **configs) -> dict: # kwargs = { # "messages": messages, @@ -67,7 +68,7 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # } # if configs: # kwargs.update(configs) - # + # # if CONFIG.openai_api_type == "azure": # if CONFIG.deployment_name and CONFIG.deployment_id: # raise ValueError("You can only use one of the `deployment_id` or `deployment_name` model") @@ -82,27 +83,27 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # kwargs_mode = {"model": self.model} # kwargs.update(kwargs_mode) # return kwargs - # + # # async def _achat_completion(self, messages: list[dict]) -> dict: # rsp = await self.llm.ChatCompletion.acreate(**self._cons_kwargs(messages)) # self._update_costs(rsp.get("usage")) # return rsp - # + # # def _chat_completion(self, messages: list[dict]) -> dict: # rsp = self.llm.ChatCompletion.create(**self._cons_kwargs(messages)) # self._update_costs(rsp) # return rsp - # + # # def completion(self, messages: list[dict]) -> dict: # # if isinstance(messages[0], Message): # # messages = self.messages_to_dict(messages) # return self._chat_completion(messages) - # + # # async def acompletion(self, messages: list[dict]) -> dict: # # if isinstance(messages[0], Message): # # messages = self.messages_to_dict(messages) # return await self._achat_completion(messages) - # + # # @retry( # wait=wait_random_exponential(min=1, max=60), # stop=stop_after_attempt(6), @@ -116,7 +117,7 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # return await self._achat_completion_stream(messages) # rsp = await self._achat_completion(messages) # return self.get_choice_text(rsp) - # + # # def _func_configs(self, messages: list[dict], **kwargs) -> dict: # """ # Note: Keep kwargs consistent with the parameters in the https://platform.openai.com/docs/api-reference/chat/create @@ -127,25 +128,25 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # "tool_choice": GENERAL_TOOL_CHOICE, # } # kwargs.update(configs) - # + # # return self._cons_kwargs(messages, **kwargs) - # + # # def _chat_completion_function(self, messages: list[dict], **kwargs) -> dict: # rsp = self.llm.ChatCompletion.create(**self._func_configs(messages, **kwargs)) # self._update_costs(rsp.get("usage")) # return rsp - # + # # async def _achat_completion_function(self, messages: list[dict], **chat_configs) -> dict: # rsp = await self.llm.ChatCompletion.acreate(**self._func_configs(messages, **chat_configs)) # self._update_costs(rsp.get("usage")) # return rsp - # + # # def _process_message(self, messages: Union[str, Message, list[dict], list[Message], list[str]]) -> list[dict]: # """convert messages to list[dict].""" # if isinstance(messages, list): # messages = [Message(msg) if isinstance(msg, str) else msg for msg in messages] # return [msg if isinstance(msg, dict) else msg.to_dict() for msg in messages] - # + # # if isinstance(messages, Message): # messages = [messages.to_dict()] # elif isinstance(messages, str): @@ -155,14 +156,14 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # f"Only support messages type are: str, Message, list[dict], but got {type(messages).__name__}!" # ) # return messages - # + # # def ask_code(self, messages: Union[str, Message, list[dict]], **kwargs) -> dict: # """Use function of tools to ask a code. - # + # # Note: Keep kwargs consistent with the parameters in the https://platform.openai.com/docs/api-reference/chat/create - # + # # Examples: - # + # # >>> llm = OpenAIGPTAPI() # >>> llm.ask_code("Write a python hello world code.") # {'language': 'python', 'code': "print('Hello, World!')"} @@ -173,14 +174,14 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # messages = self._process_message(messages) # rsp = self._chat_completion_function(messages, **kwargs) # return self.get_choice_function_arguments(rsp) - # + # # async def aask_code(self, messages: Union[str, Message, list[dict]], **kwargs) -> dict: # """Use function of tools to ask a code. - # + # # Note: Keep kwargs consistent with the parameters in the https://platform.openai.com/docs/api-reference/chat/create - # + # # Examples: - # + # # >>> llm = OpenAIGPTAPI() # >>> rsp = await llm.ask_code("Write a python hello world code.") # >>> rsp @@ -191,7 +192,7 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # messages = self._process_message(messages) # rsp = await self._achat_completion_function(messages, **kwargs) # return self.get_choice_function_arguments(rsp) - # + # # def _calc_usage(self, messages: list[dict], rsp: str) -> dict: # usage = {} # if CONFIG.calc_usage: @@ -205,23 +206,23 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # logger.error("usage calculation failed!", e) # else: # return usage - # + # # async def acompletion_batch(self, batch: list[list[dict]]) -> list[dict]: # """Return full JSON""" # split_batches = self.split_batches(batch) # all_results = [] - # + # # for small_batch in split_batches: # logger.info(small_batch) # await self.wait_if_needed(len(small_batch)) - # + # # future = [self.acompletion(prompt) for prompt in small_batch] # results = await asyncio.gather(*future) # logger.info(results) # all_results.extend(results) - # + # # return all_results - # + # # async def acompletion_batch_text(self, batch: list[list[dict]]) -> list[str]: # """Only return plain text""" # raw_results = await self.acompletion_batch(batch) @@ -231,7 +232,7 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # results.append(result) # logger.info(f"Result of task {idx}: {result}") # return results - # + # # def _update_costs(self, usage: dict): # if CONFIG.calc_usage: # try: @@ -240,15 +241,15 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # self._cost_manager.update_cost(prompt_tokens, completion_tokens, self.model) # except Exception as e: # logger.error("updating costs failed!", e) - # + # # def get_costs(self) -> Costs: # return self._cost_manager.get_costs() - # + # # def get_max_tokens(self, messages: list[dict]): # if not self.auto_max_tokens: # return CONFIG.max_tokens_rsp # return get_max_completion_tokens(messages, self.model, CONFIG.max_tokens_rsp) - # + # # def moderation(self, content: Union[str, list[str]]): # try: # if not content: @@ -258,11 +259,11 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # return rsp # except Exception as e: # logger.error(f"moderating failed:{e}") - # + # # def _moderation(self, content: Union[str, list[str]]): # rsp = self.llm.Moderation.create(input=content) # return rsp - # + # # async def amoderation(self, content: Union[str, list[str]]): # try: # if not content: @@ -272,7 +273,7 @@ class MetaGPTLLMAPI(OpenAIGPTAPI): # return rsp # except Exception as e: # logger.error(f"moderating failed:{e}") - # + # # async def _amoderation(self, content: Union[str, list[str]]): # rsp = await self.llm.Moderation.acreate(input=content) # return rsp diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py index 58d04cf84..206be29d0 100644 --- a/metagpt/provider/openai_api.py +++ b/metagpt/provider/openai_api.py @@ -9,12 +9,13 @@ @Modified By: mashenquan, 2023/12/1. Fix bug: Unclosed connection caused by openai 0.x. """ -from typing import Union -from openai import APIConnectionError, AsyncAzureOpenAI, AsyncOpenAI, RateLimitError -from openai.types import CompletionUsage import asyncio import time +from typing import Union + import openai +from openai import APIConnectionError, AsyncAzureOpenAI, AsyncOpenAI, RateLimitError +from openai.types import CompletionUsage from tenacity import ( after_log, retry, @@ -22,9 +23,10 @@ from tenacity import ( stop_after_attempt, wait_random_exponential, ) + from metagpt.config import CONFIG -from metagpt.llm import LLMType from metagpt.logs import logger +from metagpt.provider import LLMType from metagpt.provider.base_gpt_api import BaseGPTAPI from metagpt.provider.constant import GENERAL_FUNCTION_SCHEMA, GENERAL_TOOL_CHOICE from metagpt.schema import Message @@ -348,4 +350,3 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter): memory = BrainMemory(llm_type=LLMType.OPENAI.value, historical_summary=text, cacheable=False) return await memory.summarize(llm=self, max_words=max_words, keep_language=keep_language) - diff --git a/metagpt/provider/zhipuai/async_sse_client.py b/metagpt/provider/zhipuai/async_sse_client.py index b819fdc63..d7168202a 100644 --- a/metagpt/provider/zhipuai/async_sse_client.py +++ b/metagpt/provider/zhipuai/async_sse_client.py @@ -3,11 +3,10 @@ # @Desc : async_sse_client to make keep the use of Event to access response # refs to `https://github.com/zhipuai/zhipuai-sdk-python/blob/main/zhipuai/utils/sse_client.py` -from zhipuai.utils.sse_client import SSEClient, Event, _FIELD_SEPARATOR +from zhipuai.utils.sse_client import _FIELD_SEPARATOR, Event, SSEClient class AsyncSSEClient(SSEClient): - async def _aread(self): data = b"" async for chunk in self._event_source: @@ -37,9 +36,7 @@ class AsyncSSEClient(SSEClient): # Ignore unknown fields. if field not in event.__dict__: - self._logger.debug( - "Saw invalid field %s while parsing " "Server Side Event", field - ) + self._logger.debug("Saw invalid field %s while parsing " "Server Side Event", field) continue if len(data) > 1: diff --git a/metagpt/provider/zhipuai_api.py b/metagpt/provider/zhipuai_api.py index 206f0dab9..82513f83c 100644 --- a/metagpt/provider/zhipuai_api.py +++ b/metagpt/provider/zhipuai_api.py @@ -2,8 +2,12 @@ # -*- coding: utf-8 -*- # @Desc : zhipuai LLM from https://open.bigmodel.cn/dev/api#sdk -from enum import Enum import json +from enum import Enum + +import openai +import zhipuai +from requests import ConnectionError from tenacity import ( after_log, retry, @@ -11,16 +15,13 @@ from tenacity import ( stop_after_attempt, wait_random_exponential, ) -from requests import ConnectionError - -import openai -import zhipuai from metagpt.config import CONFIG from metagpt.logs import logger from metagpt.provider.base_gpt_api import BaseGPTAPI -from metagpt.provider.openai_api import CostManager, log_and_reraise +from metagpt.provider.openai_api import log_and_reraise from metagpt.provider.zhipuai.zhipu_model_api import ZhiPuModelAPI +from metagpt.utils.cost_manager import CostManager class ZhiPuEvent(Enum): @@ -50,15 +51,11 @@ class ZhiPuAIGPTAPI(BaseGPTAPI): openai.api_key = zhipuai.api_key # due to use openai sdk, set the api_key but it will't be used. def _const_kwargs(self, messages: list[dict]) -> dict: - kwargs = { - "model": self.model, - "prompt": messages, - "temperature": 0.3 - } + kwargs = {"model": self.model, "prompt": messages, "temperature": 0.3} return kwargs def _update_costs(self, usage: dict): - """ update each request's token cost """ + """update each request's token cost""" if CONFIG.calc_usage: try: prompt_tokens = int(usage.get("prompt_tokens", 0)) @@ -68,7 +65,7 @@ class ZhiPuAIGPTAPI(BaseGPTAPI): logger.error("zhipuai updats costs failed!", e) def get_choice_text(self, resp: dict) -> str: - """ get the first text of choice from llm response """ + """get the first text of choice from llm response""" assist_msg = resp.get("data", {}).get("choices", [{"role": "error"}])[-1] assert assist_msg["role"] == "assistant" return assist_msg.get("content") @@ -129,10 +126,10 @@ class ZhiPuAIGPTAPI(BaseGPTAPI): wait=wait_random_exponential(min=1, max=60), after=after_log(logger, logger.level("WARNING").name), retry=retry_if_exception_type(ConnectionError), - retry_error_callback=log_and_reraise + retry_error_callback=log_and_reraise, ) async def acompletion_text(self, messages: list[dict], stream=False) -> str: - """ response in async with stream or non-stream mode """ + """response in async with stream or non-stream mode""" if stream: return await self._achat_completion_stream(messages) resp = await self._achat_completion(messages) diff --git a/metagpt/roles/engineer.py b/metagpt/roles/engineer.py index e1ab3b06b..4f7f0b796 100644 --- a/metagpt/roles/engineer.py +++ b/metagpt/roles/engineer.py @@ -16,19 +16,13 @@ @Modified By: mashenquan, 2023-12-5. Enhance the workflow to navigate to WriteCode or QaEngineer based on the results of SummarizeCode. """ -<<<<<<< HEAD from __future__ import annotations import json from collections import defaultdict -======= -import asyncio -from collections import OrderedDict ->>>>>>> send18/dev from pathlib import Path from typing import Set -<<<<<<< HEAD from metagpt.actions import Action, WriteCode, WriteCodeReview, WriteTasks from metagpt.actions.fix_bug import FixBug from metagpt.actions.summarize_code import SummarizeCode @@ -49,18 +43,6 @@ from metagpt.schema import ( Message, ) from metagpt.utils.common import any_to_name, any_to_str, any_to_str_set -======= -import aiofiles - -from metagpt.actions import WriteCode, WriteCodeReview, WriteDesign, WriteTasks -from metagpt.config import CONFIG -from metagpt.logs import logger -from metagpt.roles import Role -from metagpt.schema import Message -from metagpt.utils.common import CodeParser -from metagpt.utils.special_tokens import FILENAME_CODE_SEP, MSG_SEP - ->>>>>>> send18/dev IS_PASS_PROMPT = """ {context} @@ -85,7 +67,6 @@ class Engineer(Role): use_code_review (bool): Whether to use code review. """ -<<<<<<< HEAD def __init__( self, name: str = "Alex", @@ -96,18 +77,6 @@ class Engineer(Role): use_code_review: bool = False, ) -> None: """Initializes the Engineer role with given attributes.""" -======= -class Engineer(Role): - def __init__( - self, - name="Alex", - profile="Engineer", - goal="Write elegant, readable, extensible, efficient code", - constraints="The code you write should conform to code standard like PEP8, be modular, easy to read and maintain", - n_borg=1, - use_code_review=False, - ): ->>>>>>> send18/dev super().__init__(name, profile, goal, constraints) self.use_code_review = use_code_review self._watch([WriteTasks, SummarizeCode, WriteCode, WriteCodeReview, FixBug]) @@ -121,7 +90,6 @@ class Engineer(Role): m = json.loads(task_msg.content) return m.get("Task list") -<<<<<<< HEAD async def _act_sp_with_cr(self, review=False) -> Set[str]: changed_files = set() src_file_repo = CONFIG.git_repo.new_file_repository(CONFIG.src_workspace) @@ -145,83 +113,8 @@ class Engineer(Role): msg = Message( content=coding_context.json(), instruct_content=coding_context, role=self.profile, cause_by=WriteCode ) -======= - @classmethod - def parse_tasks(self, task_msg: Message) -> list[str]: - if task_msg.instruct_content: - return task_msg.instruct_content.dict().get("Task list") - return CodeParser.parse_file_list(block="Task list", text=task_msg.content) - - @classmethod - def parse_code(self, code_text: str) -> str: - return CodeParser.parse_code(block="", text=code_text) - - @classmethod - def parse_workspace(cls, system_design_msg: Message) -> str: - if system_design_msg.instruct_content: - return system_design_msg.instruct_content.dict().get("Python package name").strip().strip("'").strip('"') - return CodeParser.parse_str(block="Python package name", text=system_design_msg.content) - - def get_workspace(self) -> Path: - msg = self._rc.memory.get_by_action(WriteDesign)[-1] - if not msg: - return CONFIG.workspace / "src" - workspace = self.parse_workspace(msg) - # Codes are written in workspace/{package_name}/{package_name} - return CONFIG.workspace / workspace - - async def write_file(self, filename: str, code: str): - workspace = self.get_workspace() - filename = filename.replace('"', "").replace("\n", "") - file = workspace / filename - file.parent.mkdir(parents=True, exist_ok=True) - async with aiofiles.open(file, "w") as f: - await f.write(code) - return file - - def recv(self, message: Message) -> None: - self._rc.memory.add(message) - if message in self._rc.important_memory: - self.todos = self.parse_tasks(message) - - async def _act_mp(self) -> Message: - # self.recreate_workspace() - todo_coros = [] - for todo in self.todos: - todo_coro = WriteCode().run( - context=self._rc.memory.get_by_actions([WriteTasks, WriteDesign]), filename=todo - ) - todo_coros.append(todo_coro) - - rsps = await gather_ordered_k(todo_coros, self.n_borg) - for todo, code_rsp in zip(self.todos, rsps): - _ = self.parse_code(code_rsp) - logger.info(todo) - logger.info(code_rsp) - # self.write_file(todo, code) - msg = Message(content=code_rsp, role=self.profile, cause_by=type(self._rc.todo)) self._rc.memory.add(msg) - del self.todos[0] - logger.info(f"Done {self.get_workspace()} generating.") - msg = Message(content="all done.", role=self.profile, cause_by=type(self._rc.todo)) - return msg - - async def _act_sp(self) -> Message: - code_msg_all = [] # gather all code info, will pass to qa_engineer for tests later - instruct_content = {} - for todo in self.todos: - code = await WriteCode().run(context=self._rc.history, filename=todo) - # logger.info(todo) - # logger.info(code_rsp) - # code = self.parse_code(code_rsp) - file_path = await self.write_file(todo, code) - msg = Message(content=code, role=self.profile, cause_by=type(self._rc.todo)) ->>>>>>> send18/dev - self._rc.memory.add(msg) - instruct_content[todo] = code - -<<<<<<< HEAD changed_files.add(coding_context.code_doc.filename) if not changed_files: logger.info("Nothing has changed.") @@ -247,22 +140,8 @@ class Engineer(Role): cause_by=WriteCodeReview if self.use_code_review else WriteCode, send_to=self, sent_from=self, -======= - # code_msg = todo + FILENAME_CODE_SEP + str(file_path) - code_msg = (todo, file_path) - code_msg_all.append(code_msg) - - logger.info(f"Done {self.get_workspace()} generating.") - msg = Message( - content=MSG_SEP.join(todo + FILENAME_CODE_SEP + str(file_path) for todo, file_path in code_msg_all), - instruct_content=instruct_content, - role=self.profile, - cause_by=type(self._rc.todo), - send_to="QaEngineer", ->>>>>>> send18/dev ) -<<<<<<< HEAD async def _act_summarize(self): code_summaries_file_repo = CONFIG.git_repo.new_file_repository(CODE_SUMMARIES_FILE_REPO) code_summaries_pdf_file_repo = CONFIG.git_repo.new_file_repository(CODE_SUMMARIES_PDF_FILE_REPO) @@ -353,49 +232,6 @@ class Engineer(Role): async def _new_coding_doc(filename, src_file_repo, task_file_repo, design_file_repo, dependency): context = await Engineer._new_coding_context( filename, src_file_repo, task_file_repo, design_file_repo, dependency -======= - async def _act_sp_precision(self) -> Message: - code_msg_all = [] # gather all code info, will pass to qa_engineer for tests later - instruct_content = {} - for todo in self.todos: - """ - # 从历史信息中挑选必须的信息,以减少prompt长度(人工经验总结) - 1. Architect全部 - 2. ProjectManager全部 - 3. 是否需要其他代码(暂时需要)? - TODO:目标是不需要。在任务拆分清楚后,根据设计思路,不需要其他代码也能够写清楚单个文件,如果不能则表示还需要在定义的更清晰,这个是代码能够写长的关键 - """ - context = [] - msg = self._rc.memory.get_by_actions([WriteDesign, WriteTasks, WriteCode]) - for m in msg: - context.append(m.content) - context_str = "\n".join(context) - # 编写code - code = await WriteCode().run(context=context_str, filename=todo) - # code review - if self.use_code_review: - try: - rewrite_code = await WriteCodeReview().run(context=context_str, code=code, filename=todo) - code = rewrite_code - except Exception as e: - logger.error("code review failed!", e) - pass - file_path = await self.write_file(todo, code) - msg = Message(content=code, role=self.profile, cause_by=WriteCode) - self._rc.memory.add(msg) - instruct_content[todo] = code - - code_msg = (todo, file_path) - code_msg_all.append(code_msg) - - logger.info(f"Done {self.get_workspace()} generating.") - msg = Message( - content=MSG_SEP.join(todo + FILENAME_CODE_SEP + str(file_path) for todo, file_path in code_msg_all), - instruct_content=instruct_content, - role=self.profile, - cause_by=type(self._rc.todo), - send_to="QaEngineer", ->>>>>>> send18/dev ) coding_doc = Document(root_path=str(src_file_repo.root_path), filename=filename, content=context.json()) return coding_doc diff --git a/metagpt/roles/qa_engineer.py b/metagpt/roles/qa_engineer.py index c8bca8c42..c1573e63b 100644 --- a/metagpt/roles/qa_engineer.py +++ b/metagpt/roles/qa_engineer.py @@ -14,10 +14,7 @@ @Modified By: mashenquan, 2023-12-5. Enhance the workflow to navigate to WriteCode or QaEngineer based on the results of SummarizeCode. """ -<<<<<<< HEAD -from metagpt.actions import DebugError, RunCode, WriteCode, WriteCodeReview, WriteTest - -# from metagpt.const import WORKSPACE_ROOT +from metagpt.actions import DebugError, RunCode, WriteTest from metagpt.actions.summarize_code import SummarizeCode from metagpt.config import CONFIG from metagpt.const import ( @@ -25,13 +22,6 @@ from metagpt.const import ( TEST_CODES_FILE_REPO, TEST_OUTPUTS_FILE_REPO, ) -======= -import os -from pathlib import Path - -from metagpt.actions import DebugError, RunCode, WriteCode, WriteDesign, WriteTest -from metagpt.config import CONFIG ->>>>>>> send18/dev from metagpt.logs import logger from metagpt.roles import Role from metagpt.schema import Document, Message, RunCodeContext, TestingContext @@ -55,32 +45,6 @@ class QaEngineer(Role): self.test_round = 0 self.test_round_allowed = test_round_allowed -<<<<<<< HEAD -======= - @classmethod - def parse_workspace(cls, system_design_msg: Message) -> str: - if not system_design_msg.instruct_content: - return system_design_msg.instruct_content.dict().get("Python package name") - return CodeParser.parse_str(block="Python package name", text=system_design_msg.content) - - def get_workspace(self, return_proj_dir=True) -> Path: - msg = self._rc.memory.get_by_action(WriteDesign)[-1] - if not msg: - return CONFIG.workspace / "src" - workspace = self.parse_workspace(msg) - # project directory: workspace/{package_name}, which contains package source code folder, tests folder, resources folder, etc. - if return_proj_dir: - return CONFIG.workspace / workspace - # development codes directory: workspace/{package_name}/{package_name} - return CONFIG.workspace / workspace / workspace - - def write_file(self, filename: str, code: str): - workspace = self.get_workspace() / "tests" - file = workspace / filename - file.parent.mkdir(parents=True, exist_ok=True) - file.write_text(code) - ->>>>>>> send18/dev async def _write_test(self, message: Message) -> None: src_file_repo = CONFIG.git_repo.new_file_repository(CONFIG.src_workspace) changed_files = set(src_file_repo.changed_files.keys()) diff --git a/metagpt/roles/researcher.py b/metagpt/roles/researcher.py index 576e57969..d13d43495 100644 --- a/metagpt/roles/researcher.py +++ b/metagpt/roles/researcher.py @@ -1,16 +1,10 @@ #!/usr/bin/env python """ -<<<<<<< HEAD +@Modified By: mashenquan, 2023/8/22. A definition has been provided for the return value of _think: returning false indicates that further reasoning cannot continue. @Modified By: mashenquan, 2023-11-1. According to Chapter 2.2.1 and 2.2.2 of RFC 116, change the data type of the `cause_by` value in the `Message` to a string to support the new message distribution feature. """ -======= -@Modified By: mashenquan, 2023/8/22. A definition has been provided for the return value of _think: returning false indicates that further reasoning cannot continue. - -""" ->>>>>>> send18/dev - import asyncio from pydantic import BaseModel @@ -47,8 +41,6 @@ class Researcher(Role): if language not in ("en-us", "zh-cn"): logger.warning(f"The language `{language}` has not been tested, it may not work.") -<<<<<<< HEAD -======= async def _think(self) -> bool: if self._rc.todo is None: self._set_state(0) @@ -60,7 +52,6 @@ class Researcher(Role): self._rc.todo = None return False ->>>>>>> send18/dev async def _act(self) -> Message: logger.info(f"{self._setting}: ready to {self._rc.todo}") todo = self._rc.todo diff --git a/metagpt/roles/role.py b/metagpt/roles/role.py index 9f2cb7753..1f28e3c57 100644 --- a/metagpt/roles/role.py +++ b/metagpt/roles/role.py @@ -4,7 +4,7 @@ @Time : 2023/5/11 14:42 @Author : alexanderwu @File : role.py -<<<<<<< HEAD +@Modified By: mashenquan, 2023/8/22. A definition has been provided for the return value of _think: returning false indicates that further reasoning cannot continue. @Modified By: mashenquan, 2023-11-1. According to Chapter 2.2.1 and 2.2.2 of RFC 116: 1. Merge the `recv` functionality into the `_observe` function. Future message reading operations will be consolidated within the `_observe` function. @@ -18,10 +18,6 @@ only. In the normal workflow, you should use `publish_message` or `put_message` to transmit messages. @Modified By: mashenquan, 2023-11-4. According to the routing feature plan in Chapter 2.2.3.2 of RFC 113, the routing functionality is to be consolidated into the `Environment` class. -======= -@Modified By: mashenquan, 2023-8-7, Support template-style variables, such as '{teaching_language} Teacher'. -@Modified By: mashenquan, 2023/8/22. A definition has been provided for the return value of _think: returning false indicates that further reasoning cannot continue. ->>>>>>> send18/dev """ from __future__ import annotations @@ -31,20 +27,11 @@ from typing import Iterable, Set, Type from pydantic import BaseModel, Field from metagpt.actions import Action, ActionOutput -from metagpt.config import CONFIG -<<<<<<< HEAD from metagpt.llm import LLM, HumanProvider from metagpt.logs import logger from metagpt.memory import Memory from metagpt.schema import Message, MessageQueue from metagpt.utils.common import any_to_name, any_to_str -======= -from metagpt.const import OPTIONS -from metagpt.llm import LLMFactory -from metagpt.logs import logger -from metagpt.memory import LongTermMemory, Memory -from metagpt.schema import Message, MessageTag ->>>>>>> send18/dev PREFIX_TEMPLATE = """You are a {profile}, named {name}, your goal is {goal}, and the constraint is {constraints}. """ @@ -87,11 +74,7 @@ class RoleReactMode(str, Enum): class RoleSetting(BaseModel): -<<<<<<< HEAD - """Role Settings""" -======= """Role properties""" ->>>>>>> send18/dev name: str profile: str @@ -108,16 +91,10 @@ class RoleSetting(BaseModel): class RoleContext(BaseModel): -<<<<<<< HEAD """Role Runtime Context""" env: "Environment" = Field(default=None) msg_buffer: MessageQueue = Field(default_factory=MessageQueue) # Message Buffer with Asynchronous Updates -======= - """Runtime role context""" - - env: "Environment" = Field(default=None) ->>>>>>> send18/dev memory: Memory = Field(default_factory=Memory) # long_term_memory: LongTermMemory = Field(default_factory=LongTermMemory) state: int = Field(default=-1) # -1 indicates initial or termination state where todo is None @@ -133,34 +110,22 @@ class RoleContext(BaseModel): arbitrary_types_allowed = True def check(self, role_id: str): - if CONFIG.long_term_memory: - self.long_term_memory.recover_memory(role_id, self) - self.memory = self.long_term_memory # use memory to act as long_term_memory for unify operation + # if hasattr(CONFIG, "long_term_memory") and CONFIG.long_term_memory: + # self.long_term_memory.recover_memory(role_id, self) + # self.memory = self.long_term_memory # use memory to act as long_term_memory for unify operation + pass @property def important_memory(self) -> list[Message]: -<<<<<<< HEAD - """Get the information corresponding to the watched actions""" -======= """Retrieve information corresponding to the attention action.""" ->>>>>>> send18/dev return self.memory.get_by_actions(self.watch) @property def history(self) -> list[Message]: return self.memory.get() - @property - def prerequisite(self): - """Retrieve information with `prerequisite` tag""" - if self.memory and hasattr(self.memory, "get_by_tags"): - vv = self.memory.get_by_tags([MessageTag.Prerequisite.value]) - return vv[-1:] if len(vv) > 1 else vv - return [] - class Role: -<<<<<<< HEAD """Role/Agent""" def __init__(self, name="", profile="", goal="", constraints="", desc="", is_human=False): @@ -168,20 +133,6 @@ class Role: self._setting = RoleSetting( name=name, profile=profile, goal=goal, constraints=constraints, desc=desc, is_human=is_human ) -======= - """Role/Proxy""" - - def __init__(self, name="", profile="", goal="", constraints="", desc="", *args, **kwargs): - # Replace template-style variables, such as '{teaching_language} Teacher'. - name = Role.format_value(name) - profile = Role.format_value(profile) - goal = Role.format_value(goal) - constraints = Role.format_value(constraints) - desc = Role.format_value(desc) - - self._llm = LLMFactory.new_llm() - self._setting = RoleSetting(name=name, profile=profile, goal=goal, constraints=constraints, desc=desc) ->>>>>>> send18/dev self._states = [] self._actions = [] self._role_id = str(self._setting) @@ -258,12 +209,8 @@ class Role: self._rc.todo = self._actions[self._rc.state] if state >= 0 else None def set_env(self, env: "Environment"): -<<<<<<< HEAD """Set the environment in which the role works. The role can talk to the environment and can also receive messages by observing.""" -======= - """设置角色工作所处的环境,角色可以向环境说话,也可以通过观察接受环境消息""" ->>>>>>> send18/dev self._rc.env = env if env: env.set_subscription(self, self._subscription) @@ -275,7 +222,6 @@ class Role: @property def name(self): -<<<<<<< HEAD """Get virtual user name""" return self._setting.name @@ -283,9 +229,6 @@ class Role: def subscription(self) -> Set: """The labels for messages to be consumed by the Role object.""" return self._subscription -======= - """Return role `name`, read only""" - return self._setting.name @property def desc(self): @@ -306,7 +249,6 @@ class Role: def action_count(self): """Return number of action""" return len(self._actions) ->>>>>>> send18/dev def _get_prefix(self): """Get the role prefix""" @@ -314,20 +256,14 @@ class Role: return self._setting.desc return PREFIX_TEMPLATE.format(**self._setting.dict()) -<<<<<<< HEAD - async def _think(self) -> None: - """Think about what to do and decide on the next action""" -======= async def _think(self) -> bool: """Consider what to do and decide on the next course of action. Return false if nothing can be done.""" ->>>>>>> send18/dev if len(self._actions) == 1: # If there is only one action, then only this one can be performed self._set_state(0) return True prompt = self._get_prefix() prompt += STATE_TEMPLATE.format( -<<<<<<< HEAD history=self._rc.history, states="\n".join(self._states), n_states=len(self._states) - 1, @@ -344,49 +280,27 @@ class Role: if next_state == -1: logger.info(f"End actions with {next_state=}") self._set_state(next_state) -======= - history=self._rc.history, states="\n".join(self._states), n_states=len(self._states) - 1 - ) - next_state = await self._llm.aask(prompt) - logger.debug(f"{prompt=}") - if not next_state.isdigit() or int(next_state) not in range(len(self._states)): - logger.warning(f"Invalid answer of state, {next_state=}") - next_state = "0" - self._set_state(int(next_state)) return True ->>>>>>> send18/dev async def _act(self) -> Message: logger.info(f"{self._setting}: ready to {self._rc.todo}") -<<<<<<< HEAD response = await self._rc.todo.run(self._rc.important_memory) -======= - requirement = self._rc.important_memory or self._rc.prerequisite - response = await self._rc.todo.run(requirement) - # logger.info(response) ->>>>>>> send18/dev if isinstance(response, ActionOutput): msg = Message( content=response.content, instruct_content=response.instruct_content, role=self.profile, -<<<<<<< HEAD cause_by=self._rc.todo, sent_from=self, ) elif isinstance(response, Message): msg = response -======= - cause_by=type(self._rc.todo), - ) ->>>>>>> send18/dev else: msg = Message(content=response, role=self.profile, cause_by=self._rc.todo, sent_from=self) self._rc.memory.add(msg) return msg -<<<<<<< HEAD async def _observe(self, ignore_memory=False) -> int: """Prepare new messages for processing from the message buffer and other sources.""" # Read unprocessed messages from the msg buffer. @@ -400,21 +314,6 @@ class Role: # Design Rules: # If you need to further categorize Message objects, you can do so using the Message.set_meta function. # msg_buffer is a receiving buffer, avoid adding message data and operations to msg_buffer. -======= - async def _observe(self) -> int: - """从环境中观察,获得重要信息,并加入记忆""" - if not self._rc.env: - return 0 - env_msgs = self._rc.env.memory.get() - - observed = self._rc.env.memory.get_by_actions(self._rc.watch) - - self._rc.news = self._rc.memory.remember(observed) # remember recent exact or similar memories - - for i in env_msgs: - self.recv(i) - ->>>>>>> send18/dev news_text = [f"{i.role}: {i.content[:20]}..." for i in self._rc.news] if news_text: logger.debug(f"{self._setting} observed: {news_text}") @@ -505,36 +404,10 @@ class Role: self.publish_message(rsp) return rsp -<<<<<<< HEAD @property def is_idle(self) -> bool: """If true, all actions have been executed.""" return not self._rc.news and not self._rc.todo and self._rc.msg_buffer.empty() -======= - @staticmethod - def format_value(value): - """Fill parameters inside `value` with `options`.""" - if not isinstance(value, str): - return value - if "{" not in value: - return value - - merged_opts = OPTIONS.get() or {} - try: - return value.format(**merged_opts) - except KeyError as e: - logger.warning(f"Parameter is missing:{e}") - - for k, v in merged_opts.items(): - value = value.replace("{" + f"{k}" + "}", str(v)) - return value - - def add_action(self, act): - self._actions.append(act) - - def add_to_do(self, act): - self._rc.todo = act ->>>>>>> send18/dev async def think(self) -> Action: """The exported `think` function""" @@ -547,16 +420,7 @@ class Role: return ActionOutput(content=msg.content, instruct_content=msg.instruct_content) @property -<<<<<<< HEAD def todo(self) -> str: if self._actions: return any_to_name(self._actions[0]) return "" -======= - def todo_description(self): - if not self._rc or not self._rc.todo: - return "" - if self._rc.todo.desc: - return self._rc.todo.desc - return f"{type(self._rc.todo).__name__}" ->>>>>>> send18/dev diff --git a/metagpt/schema.py b/metagpt/schema.py index 70e84ff15..baed5582b 100644 --- a/metagpt/schema.py +++ b/metagpt/schema.py @@ -22,7 +22,9 @@ from asyncio import Queue, QueueEmpty, wait_for from json import JSONDecodeError from pathlib import Path from typing import Dict, List, Optional, Set, TypedDict + from pydantic import BaseModel, Field + from metagpt.config import CONFIG from metagpt.const import ( MESSAGE_ROUTE_CAUSE_BY, @@ -95,14 +97,14 @@ class Message(BaseModel): send_to: Set = Field(default_factory={MESSAGE_ROUTE_TO_ALL}) def __init__( - self, - content, - instruct_content=None, - role="user", - cause_by="", - sent_from="", - send_to=MESSAGE_ROUTE_TO_ALL, - **kwargs, + self, + content, + instruct_content=None, + role="user", + cause_by="", + sent_from="", + send_to=MESSAGE_ROUTE_TO_ALL, + **kwargs, ): """ Parameters not listed below will be stored as meta info, including custom parameters. @@ -343,4 +345,3 @@ class CodeSummarizeContext(BaseModel): class BugFixContext(BaseModel): filename: str = "" - diff --git a/metagpt/team.py b/metagpt/team.py index 91587655f..6a3fae0d9 100644 --- a/metagpt/team.py +++ b/metagpt/team.py @@ -45,8 +45,9 @@ class Team(BaseModel): @staticmethod def _check_balance(): if CONFIG.cost_manager.total_cost > CONFIG.cost_manager.max_budget: - raise NoMoneyException(CONFIG.cost_manager.total_cost, - f'Insufficient funds: {CONFIG.cost_manager.max_budget}') + raise NoMoneyException( + CONFIG.cost_manager.total_cost, f"Insufficient funds: {CONFIG.cost_manager.max_budget}" + ) def run_project(self, idea, send_to: str = ""): """Start a project from publishing user requirement.""" diff --git a/metagpt/tools/__init__.py b/metagpt/tools/__init__.py index a148bb744..aab8c990c 100644 --- a/metagpt/tools/__init__.py +++ b/metagpt/tools/__init__.py @@ -24,6 +24,6 @@ class WebBrowserEngineType(Enum): CUSTOM = "custom" @classmethod - def _missing_(cls, key): - """缺省类型转换""" + def __missing__(cls, key): + """Default type conversion""" return cls.CUSTOM diff --git a/metagpt/tools/hello.py b/metagpt/tools/hello.py index 2eb4c31f0..8a21e1b4e 100644 --- a/metagpt/tools/hello.py +++ b/metagpt/tools/hello.py @@ -22,6 +22,6 @@ async def post_greeting(name: str) -> str: if __name__ == "__main__": - app = connexion.AioHttpApp(__name__, specification_dir='../../.well-known/') + app = connexion.AioHttpApp(__name__, specification_dir="../../.well-known/") app.add_api("openapi.yaml", arguments={"title": "Hello World Example"}) app.run(port=8080) diff --git a/metagpt/tools/metagpt_text_to_image.py b/metagpt/tools/metagpt_text_to_image.py index c5a0b872f..50c0edcba 100644 --- a/metagpt/tools/metagpt_text_to_image.py +++ b/metagpt/tools/metagpt_text_to_image.py @@ -8,18 +8,13 @@ """ import asyncio import base64 -import os -import sys -from pathlib import Path -from typing import List, Dict +from typing import Dict, List import aiohttp import requests from pydantic import BaseModel from metagpt.config import CONFIG, Config - -sys.path.append(str(Path(__file__).resolve().parent.parent.parent)) # fix-bug: No module named 'metagpt' from metagpt.logs import logger @@ -38,9 +33,7 @@ class MetaGPTText2Image: :return: The image data is returned in Base64 encoding. """ - headers = { - "Content-Type": "application/json" - } + headers = {"Content-Type": "application/json"} dims = size_type.split("x") data = { "prompt": text, diff --git a/metagpt/tools/openai_text_to_embedding.py b/metagpt/tools/openai_text_to_embedding.py index 86b58d71f..fb6fbc653 100644 --- a/metagpt/tools/openai_text_to_embedding.py +++ b/metagpt/tools/openai_text_to_embedding.py @@ -8,26 +8,23 @@ For more details, checkout: `https://platform.openai.com/docs/api-reference/embeddings/object` """ import asyncio -import os -from pathlib import Path from typing import List import aiohttp import requests from pydantic import BaseModel -import sys from metagpt.config import CONFIG, Config - -sys.path.append(str(Path(__file__).resolve().parent.parent.parent)) # fix-bug: No module named 'metagpt' from metagpt.logs import logger class Embedding(BaseModel): """Represents an embedding vector returned by embedding endpoint.""" + object: str # The object type, which is always "embedding". embedding: List[ - float] # The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide. + float + ] # The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide. index: int # The index of the embedding in the list of embeddings. @@ -58,10 +55,7 @@ class OpenAIText2Embedding: :return: A json object of :class:`ResultEmbedding` class if successful, otherwise `{}`. """ - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {self.openai_api_key}" - } + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.openai_api_key}"} data = {"input": text, "model": model} try: async with aiohttp.ClientSession() as session: diff --git a/metagpt/tools/sd_engine.py b/metagpt/tools/sd_engine.py index 479f83c63..c4d9d2df4 100644 --- a/metagpt/tools/sd_engine.py +++ b/metagpt/tools/sd_engine.py @@ -6,21 +6,14 @@ import asyncio import base64 import io import json -import os from os.path import join from typing import List from aiohttp import ClientSession from PIL import Image, PngImagePlugin -<<<<<<< HEAD from metagpt.config import CONFIG -======= -from metagpt.config import Config -from metagpt.logs import logger ->>>>>>> send18/dev - -# from metagpt.const import WORKSPACE_ROOT +from metagpt.const import SD_OUTPUT_FILE_REPO from metagpt.logs import logger payload = { @@ -84,14 +77,10 @@ class SDEngine: return self.payload def _save(self, imgs, save_name=""): -<<<<<<< HEAD - save_dir = CONFIG.workspace_path / "resources" / "SD_Output" -======= - save_dir = CONFIG.get_workspace() / "resources" / "SD_Output" ->>>>>>> send18/dev - if not os.path.exists(save_dir): - os.makedirs(save_dir, exist_ok=True) - batch_decode_base64_to_image(imgs, save_dir, save_name=save_name) + save_dir = CONFIG.workspace_path / SD_OUTPUT_FILE_REPO + if not save_dir.exists(): + save_dir.mkdir(parents=True, exist_ok=True) + batch_decode_base64_to_image(imgs, str(save_dir), save_name=save_name) async def run_t2i(self, prompts: List): # Asynchronously run the SD API for multiple prompts diff --git a/metagpt/tools/web_browser_engine.py b/metagpt/tools/web_browser_engine.py index 1f1a5ec67..cda137cbd 100644 --- a/metagpt/tools/web_browser_engine.py +++ b/metagpt/tools/web_browser_engine.py @@ -20,16 +20,16 @@ class WebBrowserEngine: engine: WebBrowserEngineType | None = None, run_func: Callable[..., Coroutine[Any, Any, WebPage | list[WebPage]]] | None = None, ): - engine = engine or options.get("web_browser_engine") + engine = engine or CONFIG.web_browser_engine if engine is None: raise NotImplementedError if WebBrowserEngineType(engine) is WebBrowserEngineType.PLAYWRIGHT: module = "metagpt.tools.web_browser_engine_playwright" - run_func = importlib.import_module(module).PlaywrightWrapper(options=options).run + run_func = importlib.import_module(module).PlaywrightWrapper().run elif WebBrowserEngineType(engine) is WebBrowserEngineType.SELENIUM: module = "metagpt.tools.web_browser_engine_selenium" - run_func = importlib.import_module(module).SeleniumWrapper(options=options).run + run_func = importlib.import_module(module).SeleniumWrapper().run elif WebBrowserEngineType(engine) is WebBrowserEngineType.CUSTOM: run_func = run_func else: @@ -53,8 +53,6 @@ if __name__ == "__main__": import fire async def main(url: str, *urls: str, engine_type: Literal["playwright", "selenium"] = "playwright", **kwargs): - return await WebBrowserEngine(options=CONFIG.options, engine=WebBrowserEngineType(engine_type), **kwargs).run( - url, *urls - ) + return await WebBrowserEngine(engine=WebBrowserEngineType(engine_type), **kwargs).run(url, *urls) fire.Fire(main) diff --git a/metagpt/tools/web_browser_engine_selenium.py b/metagpt/tools/web_browser_engine_selenium.py index b0fcb3fe1..51d26e551 100644 --- a/metagpt/tools/web_browser_engine_selenium.py +++ b/metagpt/tools/web_browser_engine_selenium.py @@ -9,13 +9,13 @@ import asyncio import importlib from concurrent import futures from copy import deepcopy -from typing import Literal, Dict +from typing import Dict, Literal from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.wait import WebDriverWait -from metagpt.config import Config +from metagpt.config import CONFIG from metagpt.utils.parse_html import WebPage @@ -41,11 +41,11 @@ class SeleniumWrapper: executor: futures.Executor | None = None, ) -> None: if browser_type is None: - browser_type = options.get("selenium_browser_type") + browser_type = CONFIG.selenium_browser_type self.browser_type = browser_type launch_kwargs = launch_kwargs or {} - if options.get("global_proxy") and "proxy-server" not in launch_kwargs: - launch_kwargs["proxy-server"] = options.get("global_proxy") + if CONFIG.global_proxy and "proxy-server" not in launch_kwargs: + launch_kwargs["proxy-server"] = CONFIG.global_proxy self.executable_path = launch_kwargs.pop("executable_path", None) self.launch_args = [f"--{k}={v}" for k, v in launch_kwargs.items()] @@ -123,8 +123,6 @@ if __name__ == "__main__": import fire async def main(url: str, *urls: str, browser_type: str = "chrome", **kwargs): - return await SeleniumWrapper(options=Config().runtime_options, - browser_type=browser_type, - **kwargs).run(url, *urls) + return await SeleniumWrapper(browser_type=browser_type, **kwargs).run(url, *urls) fire.Fire(main) diff --git a/metagpt/utils/common.py b/metagpt/utils/common.py index b627316cd..57aba463c 100644 --- a/metagpt/utils/common.py +++ b/metagpt/utils/common.py @@ -18,10 +18,9 @@ import os import platform import re from typing import List, Tuple, Union + +from metagpt.config import CONFIG from metagpt.const import MESSAGE_ROUTE_TO_ALL -from pathlib import Path -from typing import List, Tuple -import yaml from metagpt.logs import logger @@ -186,7 +185,7 @@ class OutputParser: if start_index != -1 and end_index != -1: # Extract the structure part - structure_text = text[start_index: end_index + 1] + structure_text = text[start_index : end_index + 1] try: # Attempt to convert the text to a Python data type using ast.literal_eval @@ -371,3 +370,21 @@ def any_to_name(val): :return: The name of the value. """ return any_to_str(val).split(".")[-1] + + +def format_value(value): + """Fill parameters inside `value` with `options`.""" + if not isinstance(value, str): + return value + if "{" not in value: + return value + + merged_opts = CONFIG.options or {} + try: + return value.format(**merged_opts) + except KeyError as e: + logger.warning(f"Parameter is missing:{e}") + + for k, v in merged_opts.items(): + value = value.replace("{" + f"{k}" + "}", str(v)) + return value diff --git a/metagpt/utils/cost_manager.py b/metagpt/utils/cost_manager.py index f0fea44ce..ce53f2285 100644 --- a/metagpt/utils/cost_manager.py +++ b/metagpt/utils/cost_manager.py @@ -6,10 +6,12 @@ @Desc : mashenquan, 2023/8/28. Separate the `CostManager` class to support user-level cost accounting. """ +from typing import NamedTuple + from pydantic import BaseModel + from metagpt.logs import logger from metagpt.utils.token_counter import TOKEN_COSTS -from typing import NamedTuple class Costs(NamedTuple): @@ -39,8 +41,9 @@ class CostManager(BaseModel): """ self.total_prompt_tokens += prompt_tokens self.total_completion_tokens += completion_tokens - cost = (prompt_tokens * TOKEN_COSTS[model]["prompt"] + completion_tokens * TOKEN_COSTS[model][ - "completion"]) / 1000 + cost = ( + prompt_tokens * TOKEN_COSTS[model]["prompt"] + completion_tokens * TOKEN_COSTS[model]["completion"] + ) / 1000 self.total_cost += cost logger.info( f"Total running cost: ${self.total_cost:.3f} | Max budget: ${self.max_budget:.3f} | " diff --git a/metagpt/utils/git_repository.py b/metagpt/utils/git_repository.py index 9827b8252..1340b1768 100644 --- a/metagpt/utils/git_repository.py +++ b/metagpt/utils/git_repository.py @@ -8,13 +8,15 @@ """ from __future__ import annotations -from gitignore_parser import parse_gitignore, rule_from_pattern, handle_negation import shutil from enum import Enum from pathlib import Path from typing import Dict, List + from git.repo import Repo from git.repo.fun import is_git_dir +from gitignore_parser import parse_gitignore + from metagpt.const import DEFAULT_WORKSPACE_ROOT from metagpt.logs import logger from metagpt.utils.dependency_file import DependencyFile @@ -236,8 +238,9 @@ class GitRepository: rpath = file_path.relative_to(root_relative_path) files.append(str(rpath)) else: - subfolder_files = self.get_files(relative_path=file_path, root_relative_path=root_relative_path, - filter_ignored=False) + subfolder_files = self.get_files( + relative_path=file_path, root_relative_path=root_relative_path, filter_ignored=False + ) files.extend(subfolder_files) except Exception as e: logger.error(f"Error: {e}") diff --git a/metagpt/utils/mermaid.py b/metagpt/utils/mermaid.py index bf7e6c4a7..3fa7ab79a 100644 --- a/metagpt/utils/mermaid.py +++ b/metagpt/utils/mermaid.py @@ -7,22 +7,15 @@ @Modified By: mashenquan, 2023/8/20. Remove global configuration `CONFIG`, enable configuration support for business isolation. """ import asyncio -<<<<<<< HEAD import os from pathlib import Path -from metagpt.config import CONFIG -from metagpt.const import METAGPT_ROOT -======= -from pathlib import Path - -# from metagpt.utils.common import check_cmd_exists import aiofiles -from metagpt.config import CONFIG, Config -from metagpt.const import PROJECT_ROOT ->>>>>>> send18/dev +from metagpt.config import CONFIG +from metagpt.const import METAGPT_ROOT from metagpt.logs import logger +from metagpt.utils.common import check_cmd_exists async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048, height=2048) -> int: @@ -43,7 +36,6 @@ async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048, await f.write(mermaid_code) # tmp.write_text(mermaid_code, encoding="utf-8") -<<<<<<< HEAD engine = CONFIG.mermaid_engine.lower() if engine == "nodejs": if check_cmd_exists(CONFIG.mmdc) != 0: @@ -100,25 +92,6 @@ async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048, logger.warning(f"Unsupported mermaid engine: {engine}") return 0 -======= - # if check_cmd_exists("mmdc") != 0: - # logger.warning("RUN `npm install -g @mermaid-js/mermaid-cli` to install mmdc") - # return -1 - - # for suffix in ["pdf", "svg", "png"]: - for suffix in ["png"]: - output_file = f"{output_file_without_suffix}.{suffix}" - # Call the `mmdc` command to convert the Mermaid code to a PNG - logger.info(f"Generating {output_file}..") - cmds = [CONFIG.mmdc, "-i", str(tmp), "-o", output_file, "-w", str(width), "-H", str(height)] - - if CONFIG.puppeteer_config: - cmds.extend(["-p", CONFIG.puppeteer_config]) - process = await asyncio.create_subprocess_exec(*cmds) - await process.wait() - return process.returncode ->>>>>>> send18/dev - if __name__ == "__main__": MMC1 = """classDiagram @@ -171,22 +144,7 @@ if __name__ == "__main__": S-->>SE: return summary SE-->>M: return summary""" -<<<<<<< HEAD -if __name__ == "__main__": loop = asyncio.new_event_loop() result = loop.run_until_complete(mermaid_to_file(MMC1, METAGPT_ROOT / f"{CONFIG.mermaid_engine}/1")) - result = loop.run_until_complete(mermaid_to_file(MMC2, METAGPT_ROOT / f"{CONFIG.mermaid_engine}/1")) + result = loop.run_until_complete(mermaid_to_file(MMC2, METAGPT_ROOT / f"{CONFIG.mermaid_engine}/2")) loop.close() -======= - conf = Config() - asyncio.run( - mermaid_to_file( - options=conf.runtime_options, mermaid_code=MMC1, output_file_without_suffix=PROJECT_ROOT / "tmp/1.png" - ) - ) - asyncio.run( - mermaid_to_file( - options=conf.runtime_options, mermaid_code=MMC2, output_file_without_suffix=PROJECT_ROOT / "tmp/2.png" - ) - ) ->>>>>>> send18/dev diff --git a/tests/conftest.py b/tests/conftest.py index 2709b38ae..375b9ff7f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,7 +10,9 @@ import asyncio import logging import re from unittest.mock import Mock + import pytest + from metagpt.config import CONFIG from metagpt.const import DEFAULT_WORKSPACE_ROOT from metagpt.logs import logger @@ -95,7 +97,7 @@ def setup_and_teardown_git_repo(request): # Register the function for destroying the environment. request.addfinalizer(fin) + @pytest.fixture(scope="session", autouse=True) def init_config(): Config() - diff --git a/tests/metagpt/actions/test_ui_design.py b/tests/metagpt/actions/test_ui_design.py index b9c91d21f..83590ec7d 100644 --- a/tests/metagpt/actions/test_ui_design.py +++ b/tests/metagpt/actions/test_ui_design.py @@ -101,7 +101,6 @@ body { """ - def test_ui_design_parse_css(): ui_design_work = UIDesign(name="UI design action") diff --git a/tests/metagpt/actions/test_write_code.py b/tests/metagpt/actions/test_write_code.py index 0bd6633cd..73f3a6dcf 100644 --- a/tests/metagpt/actions/test_write_code.py +++ b/tests/metagpt/actions/test_write_code.py @@ -7,9 +7,10 @@ @Modifiled By: mashenquan, 2023-12-6. According to RFC 135 """ import pytest -from metagpt.provider.openai_api import OpenAIGPTAPI as LLM + from metagpt.actions.write_code import WriteCode from metagpt.logs import logger +from metagpt.provider.openai_api import OpenAIGPTAPI as LLM from metagpt.schema import CodingContext, Document from tests.metagpt.actions.mock import TASKS_2, WRITE_CODE_PROMPT_SAMPLE diff --git a/tests/metagpt/actions/test_write_teaching_plan.py b/tests/metagpt/actions/test_write_teaching_plan.py index 6754fe88c..3f25b2167 100644 --- a/tests/metagpt/actions/test_write_teaching_plan.py +++ b/tests/metagpt/actions/test_write_teaching_plan.py @@ -8,8 +8,9 @@ import asyncio from typing import Optional -from pydantic import BaseModel + from langchain.llms.base import LLM +from pydantic import BaseModel from metagpt.actions.write_teaching_plan import WriteTeachingPlanPart from metagpt.config import Config @@ -17,7 +18,7 @@ from metagpt.schema import Message class MockWriteTeachingPlanPart(WriteTeachingPlanPart): - def __init__(self, options, name: str = '', context=None, llm: LLM = None, topic="", language="Chinese"): + def __init__(self, options, name: str = "", context=None, llm: LLM = None, topic="", language="Chinese"): super().__init__(options, name, context, llm, topic, language) async def _aask(self, prompt: str, system_msgs: Optional[list[str]] = None) -> str: @@ -32,18 +33,8 @@ async def mock_write_teaching_plan_part(): language: str inputs = [ - { - "input": "AABBCC", - "name": "A", - "topic": WriteTeachingPlanPart.COURSE_TITLE, - "language": "C" - }, - { - "input": "DDEEFFF", - "name": "A1", - "topic": "B1", - "language": "C1" - } + {"input": "AABBCC", "name": "A", "topic": WriteTeachingPlanPart.COURSE_TITLE, "language": "C"}, + {"input": "DDEEFFF", "name": "A1", "topic": "B1", "language": "C1"}, ] for i in inputs: @@ -63,5 +54,5 @@ def test_suite(): loop.run_until_complete(task) -if __name__ == '__main__': +if __name__ == "__main__": test_suite() diff --git a/tests/metagpt/learn/test_text_to_embedding.py b/tests/metagpt/learn/test_text_to_embedding.py index d81a8ac1c..e3d20a759 100644 --- a/tests/metagpt/learn/test_text_to_embedding.py +++ b/tests/metagpt/learn/test_text_to_embedding.py @@ -19,9 +19,7 @@ async def mock_text_to_embedding(): class Input(BaseModel): input: str - inputs = [ - {"input": "Panda emoji"} - ] + inputs = [{"input": "Panda emoji"}] for i in inputs: seed = Input(**i) @@ -36,5 +34,5 @@ def test_suite(): loop.run_until_complete(task) -if __name__ == '__main__': +if __name__ == "__main__": test_suite() diff --git a/tests/metagpt/learn/test_text_to_image.py b/tests/metagpt/learn/test_text_to_image.py index c359797de..982a39b13 100644 --- a/tests/metagpt/learn/test_text_to_image.py +++ b/tests/metagpt/learn/test_text_to_image.py @@ -19,9 +19,7 @@ async def mock_text_to_image(): input: str size_type: str - inputs = [ - {"input": "Panda emoji", "size_type": "512x512"} - ] + inputs = [{"input": "Panda emoji", "size_type": "512x512"}] for i in inputs: seed = Input(**i) @@ -31,7 +29,7 @@ async def mock_text_to_image(): flags = ";base64," assert flags in base64_data ix = base64_data.find(flags) + len(flags) - declaration = base64_data[0: ix] + declaration = base64_data[0:ix] assert declaration data = base64_data[ix:] assert data @@ -44,5 +42,5 @@ def test_suite(): loop.run_until_complete(task) -if __name__ == '__main__': +if __name__ == "__main__": test_suite() diff --git a/tests/metagpt/learn/test_text_to_speech.py b/tests/metagpt/learn/test_text_to_speech.py index 68de5a3b2..42b6839fa 100644 --- a/tests/metagpt/learn/test_text_to_speech.py +++ b/tests/metagpt/learn/test_text_to_speech.py @@ -18,9 +18,7 @@ async def mock_text_to_speech(): class Input(BaseModel): input: str - inputs = [ - {"input": "Panda emoji"} - ] + inputs = [{"input": "Panda emoji"}] for i in inputs: seed = Input(**i) @@ -30,7 +28,7 @@ async def mock_text_to_speech(): flags = ";base64," assert flags in base64_data ix = base64_data.find(flags) + len(flags) - declaration = base64_data[0: ix] + declaration = base64_data[0:ix] assert declaration data = base64_data[ix:] assert data @@ -43,5 +41,5 @@ def test_suite(): loop.run_until_complete(task) -if __name__ == '__main__': - test_suite() \ No newline at end of file +if __name__ == "__main__": + test_suite() diff --git a/tests/metagpt/memory/test_brain_memory.py b/tests/metagpt/memory/test_brain_memory.py index b5fc942ca..2f2a984d8 100644 --- a/tests/metagpt/memory/test_brain_memory.py +++ b/tests/metagpt/memory/test_brain_memory.py @@ -21,14 +21,7 @@ def test_json(): knowledge: List[str] stack: List[str] - inputs = [ - { - "history": ["a", "b"], - "solution": ["c"], - "knowledge": ["d", "e"], - "stack": ["f"] - } - ] + inputs = [{"history": ["a", "b"], "solution": ["c"], "knowledge": ["d", "e"], "stack": ["f"]}] for i in inputs: v = Input(**i) @@ -53,5 +46,6 @@ def test_json(): msg = Message(**v) assert msg -if __name__ == '__main__': - test_json() \ No newline at end of file + +if __name__ == "__main__": + test_json() diff --git a/tests/metagpt/roles/test_teacher.py b/tests/metagpt/roles/test_teacher.py index 8f673d6e0..82d6c7052 100644 --- a/tests/metagpt/roles/test_teacher.py +++ b/tests/metagpt/roles/test_teacher.py @@ -7,10 +7,9 @@ """ from typing import Dict, Optional + from pydantic import BaseModel -from metagpt.config import Config -from metagpt.provider.openai_api import CostManager from metagpt.roles.teacher import Teacher @@ -40,7 +39,7 @@ def test_init(): "expect_constraints": "Do in HaHa, CN", "kwargs": {"language": "CN", "key1": "HaHa", "something_big": "sleep", "teaching_language": "EN"}, "desc": "aaa{language}", - "expect_desc": "aaaCN" + "expect_desc": "aaaCN", }, { "name": "Lily{language}", @@ -53,17 +52,20 @@ def test_init(): "expect_constraints": "Do in {key1}, {language}", "kwargs": {}, "desc": "aaa{language}", - "expect_desc": "aaa{language}" + "expect_desc": "aaa{language}", }, ] for i in inputs: seed = Inputs(**i) - options = Config().runtime_options - cost_manager = CostManager(**options) - teacher = Teacher(options=options, cost_manager=cost_manager, name=seed.name, profile=seed.profile, - goal=seed.goal, constraints=seed.constraints, - desc=seed.desc, **seed.kwargs) + teacher = Teacher( + name=seed.name, + profile=seed.profile, + goal=seed.goal, + constraints=seed.constraints, + desc=seed.desc, + **seed.kwargs + ) assert teacher.name == seed.expect_name assert teacher.desc == seed.expect_desc assert teacher.profile == seed.expect_profile @@ -79,16 +81,8 @@ def test_new_file_name(): expect: str inputs = [ - { - "lesson_title": "# @344\n12", - "ext": ".md", - "expect": "_344_12.md" - }, - { - "lesson_title": "1#@$%!*&\\/:*?\"<>|\n\t \'1", - "ext": ".cc", - "expect": "1_1.cc" - } + {"lesson_title": "# @344\n12", "ext": ".md", "expect": "_344_12.md"}, + {"lesson_title": "1#@$%!*&\\/:*?\"<>|\n\t '1", "ext": ".cc", "expect": "1_1.cc"}, ] for i in inputs: seed = Inputs(**i) @@ -96,6 +90,6 @@ def test_new_file_name(): assert result == seed.expect -if __name__ == '__main__': +if __name__ == "__main__": test_init() test_new_file_name() diff --git a/tests/metagpt/test_environment.py b/tests/metagpt/test_environment.py index 29ca38f5a..933d74b97 100644 --- a/tests/metagpt/test_environment.py +++ b/tests/metagpt/test_environment.py @@ -9,6 +9,7 @@ """ import pytest + from metagpt.actions import UserRequirement from metagpt.environment import Environment from metagpt.logs import logger @@ -22,19 +23,16 @@ def env(): def test_add_role(env: Environment): - role = ProductManager(name="Alice", - profile="product manager", - goal="create a new product", - constraints="limited resources") + role = ProductManager( + name="Alice", profile="product manager", goal="create a new product", constraints="limited resources" + ) env.add_role(role) assert env.get_role(role.profile) == role def test_get_roles(env: Environment): - role1 = Role(name="Alice", profile="product manager", - goal="create a new product", constraints="limited resources") - role2 = Role(name="Bob", profile="engineer", - goal="develop the new product", constraints="short deadline") + role1 = Role(name="Alice", profile="product manager", goal="create a new product", constraints="limited resources") + role2 = Role(name="Bob", profile="engineer", goal="develop the new product", constraints="short deadline") env.add_role(role1) env.add_role(role2) roles = env.get_roles() @@ -43,10 +41,10 @@ def test_get_roles(env: Environment): @pytest.mark.asyncio async def test_publish_and_process_message(env: Environment): - product_manager = ProductManager(name="Alice", profile="Product Manager", - goal="做AI Native产品", constraints="资源有限") - architect = Architect(name="Bob", profile="Architect", goal="设计一个可用、高效、较低成本的系统,包括数据结构与接口", - constraints="资源有限,需要节省成本") + product_manager = ProductManager(name="Alice", profile="Product Manager", goal="做AI Native产品", constraints="资源有限") + architect = Architect( + name="Bob", profile="Architect", goal="设计一个可用、高效、较低成本的系统,包括数据结构与接口", constraints="资源有限,需要节省成本" + ) env.add_roles([product_manager, architect]) env.publish_message(Message(role="User", content="需要一个基于LLM做总结的搜索引擎", cause_by=UserRequirement)) diff --git a/tests/metagpt/test_llm.py b/tests/metagpt/test_llm.py index 23be82268..f2d4371d5 100644 --- a/tests/metagpt/test_llm.py +++ b/tests/metagpt/test_llm.py @@ -9,14 +9,12 @@ import pytest -from metagpt.config import Config -from metagpt.provider.openai_api import OpenAIGPTAPI as LLM, CostManager +from metagpt.provider.openai_api import OpenAIGPTAPI as LLM @pytest.fixture() def llm(): - options = Config().runtime_options - return LLM(options=options, cost_manager=CostManager(**options)) + return LLM() @pytest.mark.asyncio @@ -36,5 +34,6 @@ async def test_llm_acompletion(llm): assert len(await llm.acompletion_batch([hello_msg])) > 0 assert len(await llm.acompletion_batch_text([hello_msg])) > 0 + # if __name__ == "__main__": # pytest.main([__file__, "-s"]) diff --git a/tests/metagpt/tools/test_sd_tool.py b/tests/metagpt/tools/test_sd_tool.py index 9003dbe9c..e457101a9 100644 --- a/tests/metagpt/tools/test_sd_tool.py +++ b/tests/metagpt/tools/test_sd_tool.py @@ -24,4 +24,3 @@ async def test_sd_engine_run_t2i(): await sd_engine.run_t2i(prompts=["test"]) img_path = CONFIG.workspace_path / "resources" / "SD_Output" / "output_0.png" assert os.path.exists(img_path) - diff --git a/tests/metagpt/tools/test_web_browser_engine_playwright.py b/tests/metagpt/tools/test_web_browser_engine_playwright.py index 5ebd7394e..cc6c09925 100644 --- a/tests/metagpt/tools/test_web_browser_engine_playwright.py +++ b/tests/metagpt/tools/test_web_browser_engine_playwright.py @@ -24,8 +24,9 @@ async def test_scrape_web_page(browser_type, use_proxy, kwagrs, url, urls, proxy try: if use_proxy: conf.global_proxy = proxy - browser = web_browser_engine_playwright.PlaywrightWrapper(options=conf.runtime_options, - browser_type=browser_type, **kwagrs) + browser = web_browser_engine_playwright.PlaywrightWrapper( + options=conf.runtime_options, browser_type=browser_type, **kwagrs + ) result = await browser.run(url) result = result.inner_text assert isinstance(result, str) diff --git a/tests/metagpt/utils/test_config.py b/tests/metagpt/utils/test_config.py index f38cddb0d..bd89f0ed3 100644 --- a/tests/metagpt/utils/test_config.py +++ b/tests/metagpt/utils/test_config.py @@ -33,6 +33,5 @@ def test_options(): assert config.options -if __name__ == '__main__': +if __name__ == "__main__": test_options() -