diff --git a/metagpt/config.py b/metagpt/config.py index 14ef405e5..49d2fe36f 100644 --- a/metagpt/config.py +++ b/metagpt/config.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- """ -Provides configuration, singleton pattern. +Provide configuration as a singleton. """ import os import openai @@ -28,7 +28,7 @@ class NotConfiguredException(Exception): class Config(metaclass=Singleton): """ - Regular usage: + Typical usage: config = Config("config.yaml") secret_key = config.get_key("MY_SECRET_KEY") print("Secret key:", secret_key) @@ -40,7 +40,7 @@ class Config(metaclass=Singleton): def __init__(self, yaml_file=default_yaml_file): self._configs = {} - self._init_with_config_files_and_env(self._configs, yaml_file) + self._initialize_with_config_files_and_environment(self._configs, yaml_file) logger.info("Config loading done.") self.global_proxy = self._get("GLOBAL_PROXY") self.openai_api_key = self._get("OPENAI_API_KEY") @@ -67,26 +67,26 @@ class Config(metaclass=Singleton): self.google_api_key = self._get("GOOGLE_API_KEY") self.google_cse_id = self._get("GOOGLE_CSE_ID") self.search_engine = self._get("SEARCH_ENGINE", SearchEngineType.SERPAPI_GOOGLE) - + self.web_browser_engine = WebBrowserEngineType(self._get("WEB_BROWSER_ENGINE", "playwright")) self.playwright_browser_type = self._get("PLAYWRIGHT_BROWSER_TYPE", "chromium") self.selenium_browser_type = self._get("SELENIUM_BROWSER_TYPE", "chrome") - + self.long_term_memory = self._get('LONG_TERM_MEMORY', False) if self.long_term_memory: logger.warning("LONG_TERM_MEMORY is True") self.max_budget = self._get("MAX_BUDGET", 10.0) self.total_cost = 0.0 - def _init_with_config_files_and_env(self, configs: dict, yaml_file): - """Load from config/key.yaml / config/config.yaml / env in decreasing order of priority.""" + def _initialize_with_config_files_and_environment(self, configs: dict, yaml_file): + """Load configurations from config/key.yaml, config/config.yaml, and the environment, in decreasing order of priority.""" configs.update(os.environ) for _yaml_file in [yaml_file, self.key_yaml_file]: if not _yaml_file.exists(): continue - # Load the local YAML file + # Load local YAML files. with open(_yaml_file, "r", encoding="utf-8") as file: yaml_data = yaml.safe_load(file) if not yaml_data: @@ -98,7 +98,7 @@ class Config(metaclass=Singleton): return self._configs.get(*args, **kwargs) def get(self, key, *args, **kwargs): - """Fetch value from config/key.yaml / config/config.yaml / env, raise an error if not found.""" + """Fetch a value from config/key.yaml, config/config.yaml, or the environment. Raises an error if not found.""" value = self._get(key, *args, **kwargs) if value is None: raise ValueError(f"Key '{key}' not found in environment variables or in the YAML file") diff --git a/metagpt/const.py b/metagpt/const.py index c8ce80279..861da7903 100644 --- a/metagpt/const.py +++ b/metagpt/const.py @@ -9,7 +9,7 @@ from pathlib import Path def get_project_root(): - """Search upwards to find the project root directory.""" + """Search upwards level by level for the project root directory.""" current_path = Path.cwd() while True: if (current_path / '.git').exists() or \ diff --git a/metagpt/document_store/faiss_store.py b/metagpt/document_store/faiss_store.py index 906963aa1..baa10ba1e 100644 --- a/metagpt/document_store/faiss_store.py +++ b/metagpt/document_store/faiss_store.py @@ -28,7 +28,7 @@ class FaissStore(LocalStore): def _load(self) -> Optional["FaissStore"]: index_file, store_file = self._get_index_and_store_fname() if not (index_file.exists() and store_file.exists()): - logger.info("At least one of the index_file/store_file is missing. Loading failed and returns None.") + logger.info("Missing at least one of index_file/store_file, load failed and return None") return None index = faiss.read_index(str(index_file)) with open(str(store_file), "rb") as f: diff --git a/metagpt/document_store/milvus_store.py b/metagpt/document_store/milvus_store.py index ecdde3288..0a8ed78d4 100644 --- a/metagpt/document_store/milvus_store.py +++ b/metagpt/document_store/milvus_store.py @@ -79,8 +79,8 @@ class MilvusStore(BaseStore): """ FIXME: ADD TESTS https://milvus.io/docs/v2.0.x/search.md - All search and query operations within Milvus are executed in memory. Load the collection into memory before conducting a vector similarity search. - Noting the above description, is this logic serious? This should take a long time, right? + All search and query operations within Milvus are executed in memory. Load the collection to memory before conducting a vector similarity search. + Noting the above description, is this logic serious? The time taken for this should be long, right? """ search_params = {"metric_type": "L2", "params": {"nprobe": 10}} results = self.collection.search( @@ -91,7 +91,7 @@ class MilvusStore(BaseStore): expr=None, consistency_level="Strong" ) - # FIXME: results contain ids, but to get the actual value from the id, you have to call the query interface + # FIXME: results contains ids, but to get the actual values from the ids, the query interface still needs to be called. return results def write(self, name, schema, *args, **kwargs): diff --git a/metagpt/manager.py b/metagpt/manager.py index d06ed3a29..4a21b9612 100644 --- a/metagpt/manager.py +++ b/metagpt/manager.py @@ -32,7 +32,7 @@ class Manager: async def handle(self, message: Message, environment): """ - Manager processes the message, now simply passing the message to the next person. + Manager processes the message, not simply passing the message to the next person. :param message: :param environment: :return: diff --git a/metagpt/prompts/summarize.py b/metagpt/prompts/summarize.py index 424f12567..a187314f4 100644 --- a/metagpt/prompts/summarize.py +++ b/metagpt/prompts/summarize.py @@ -9,27 +9,30 @@ # From the plugin: ChatGPT - Summarize Websites and YouTube Videos # https://chrome.google.com/webstore/detail/chatgpt-%C2%BB-summarize-every/cbgecfllfhmmnknmamkejadjmnmpfjmp?hl=zh-CN&utm_source=chrome-ntp-launcher SUMMARIZE_PROMPT = """ -Your output should follow the template below: +Your output should use the following template: ### Summary ### Facts - [Emoji] Bulletpoint -Your task is to summarize the text I provide you with in up to seven concise bullet points, and start with a brief, high-quality summary. Choose a suitable emoji for every bullet point. Your response should be in {{SELECTED_LANGUAGE}}. If a provided URL is functional and not a YouTube video, use the text from the {{URL}}. If the URL is non-functional or is a YouTube video, use the following text: {{CONTENT}}. +Your task is to summarize the text I give you in up to seven concise bullet points and start with a short, high-quality +summary. Pick a suitable emoji for every bullet point. Your response should be in {{SELECTED_LANGUAGE}}. If the provided + URL is functional and not a YouTube video, use the text from the {{URL}}. However, if the URL is not functional or is +a YouTube video, use the following text: {{CONTENT}}. """ -# From GCP-VertexAI-Text Summary (SUMMARIZE_PROMPT_2-5 are all from this source) +# From GCP-VertexAI-Text Summarization # https://github.com/GoogleCloudPlatform/generative-ai/blob/main/language/examples/prompt-design/text_summarization.ipynb -# For long documents, a map-reduce process is required. See the notebook below: +# For longer documents, a map-reduce process is needed, see the following notebook # https://github.com/GoogleCloudPlatform/generative-ai/blob/main/language/examples/document-summarization/summarization_large_documents.ipynb SUMMARIZE_PROMPT_2 = """ Provide a very short summary, no more than three sentences, for the following article: -Quantum computers operate by manipulating qubits through orchestrated patterns called quantum algorithms. -The challenge is that qubits are so delicate that even stray light can introduce computational errors, and this issue escalates as quantum computers expand. -This is consequential since the best quantum algorithms known for practical applications demand much lower qubit error rates than current levels. -To overcome this, quantum error correction is essential. -Quantum error correction shields data by encoding it across various physical qubits, forming a “logical qubit”. This is believed to be the sole method to build a large-scale quantum computer with sufficiently low error rates for beneficial computations. -Rather than computing on individual qubits, we'll compute on these logical qubits. We aim to decrease error rates by encoding a larger set of physical qubits on our quantum processor into one logical qubit. +Our quantum computers work by manipulating qubits in a manner we call quantum algorithms. +The challenge is that qubits are extremely sensitive, to the extent that even stray light can introduce calculation errors — a problem that intensifies as quantum computers scale. +This has notable ramifications since the most effective quantum algorithms we know for executing valuable applications necessitate that our qubits' error rates be significantly lower than current levels. +To address this discrepancy, quantum error correction is essential. +Quantum error correction safeguards information by distributing it over several physical qubits, forming a “logical qubit.” This is believed to be the sole method to create a large-scale quantum computer with sufficiently low error rates for practical calculations. +Rather than computing on individual qubits, we will utilize logical qubits. By transforming a greater number of physical qubits on our quantum processor into a single logical qubit, we aim to reduce error rates, enabling viable quantum algorithms. Summary: @@ -38,12 +41,12 @@ Summary: SUMMARIZE_PROMPT_3 = """ Provide a TL;DR for the following article: -Quantum computers operate by manipulating qubits through orchestrated patterns known as quantum algorithms. -Qubits are so delicate that even stray light can cause computational errors, a problem that escalates with the growth of quantum computers. -This presents a significant issue because the best quantum algorithms we have for practical applications necessitate much lower qubit error rates than what we currently achieve. -To address this, quantum error correction is needed. -Quantum error correction safeguards data by encoding it across multiple physical qubits, creating a “logical qubit”. It's believed to be the only method to develop a large-scale quantum computer with sufficiently low error rates for beneficial computations. -Instead of performing computations on individual qubits, calculations will be done on these logical qubits. Our goal is to lower error rates by encoding a greater number of physical qubits on our quantum processor into a single logical qubit. +Our quantum computers operate by controlling qubits in a method termed quantum algorithms. +The problem is that qubits are incredibly delicate, so much so that even minimal light interference can introduce computational errors — and this issue becomes more pronounced as quantum computers expand. +This is consequential because the most potent quantum algorithms we are aware of, for practical applications, demand that our qubits' error rates be substantially below current standards. +To mitigate this, quantum error correction is pivotal. +Quantum error correction secures data by distributing it across numerous physical qubits, generating a “logical qubit.” It's believed to be the exclusive approach to develop a large-scale quantum computer with error rates low enough for practical operations. +Instead of operations on individual qubits, we'll focus on logical qubits. By encoding a greater number of physical qubits on our quantum device into a single logical qubit, we aspire to diminish error rates and enable efficient quantum algorithms. TL;DR: """ @@ -51,33 +54,33 @@ TL;DR: SUMMARIZE_PROMPT_4 = """ Provide a very short summary in four bullet points for the following article: -Quantum computers operate by controlling qubits in orchestrated patterns termed quantum algorithms. -The issue is that qubits are extremely delicate, so much so that even stray light can lead to computational errors. This problem becomes more severe as quantum computers become larger. -This is a significant hurdle because the most effective quantum algorithms known for real-world applications necessitate qubit error rates much lower than what's currently achieved. -To bridge this gap, we need quantum error correction. -Quantum error correction defends data by encoding it across various physical qubits, resulting in a “logical qubit”. It's considered the only way to craft a large-scale quantum computer with sufficiently low error rates for practical computations. -Instead of computing using individual qubits, we'll use these logical qubits. Our aim is to diminish error rates by encoding many physical qubits on our quantum processor into one logical qubit. +Our quantum computers function by manipulating qubits through a method known as quantum algorithms. +The dilemma is that qubits are exceedingly fragile, so much so that even minimal light can lead to computational inaccuracies — and this problem amplifies as quantum computers become larger. +This is significant because the most proficient quantum algorithms known to us, suitable for real-world applications, necessitate that our qubits' error rates be significantly below what we currently observe. +To bridge this disparity, quantum error correction becomes indispensable. +Quantum error correction secures data by spreading it across multiple physical qubits, resulting in a “logical qubit.” It's perceived as the only technique to manufacture a large-scale quantum computer with error rates sufficiently low for practical tasks. +Instead of operating on individual qubits directly, we'll be utilizing logical qubits. By converting more physical qubits on our quantum machine into a single logical qubit, we intend to lower error rates, facilitating effective quantum algorithms. Bulletpoints: """ SUMMARIZE_PROMPT_5 = """ -Please summarize the following conversation, and at the end, list the to-do's for the support Agent: +Please generate a summary of the following conversation and at the end summarize the to-do's for the support Agent: Customer: Hi, I'm Larry, and I received the wrong item. -Support Agent: Hi, Larry. How would you like this to be resolved? +Support Agent: Hi, Larry. How would you like this issue to be resolved? -Customer: That's alright. I'd like to return the item and get a refund, please. +Customer: That's alright. I'd like to return the item and receive a refund, please. -Support Agent: Of course. I can process the refund for you now. Can I have your order number, please? +Support Agent: Certainly. I can process the refund for you right now. Could I have your order number, please? Customer: It's [ORDER NUMBER]. -Support Agent: Thanks. I've processed the refund, and you'll receive your money back within 14 days. +Support Agent: Thanks. I've processed the refund, and you should receive your funds within 14 days. -Customer: Thank you very much. +Customer: I appreciate it. Support Agent: You're welcome, Larry. Have a great day! diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py index 55dbf0f06..a48f4fc9d 100644 --- a/metagpt/provider/openai_api.py +++ b/metagpt/provider/openai_api.py @@ -39,10 +39,10 @@ def retry(max_retries): class RateLimiter: - """Rate control class. Each call goes through wait_if_needed and sleeps if rate limiting is required.""" + """Rate control class, each call goes through wait_if_needed, sleep if rate control is needed.""" def __init__(self, rpm): self.last_call_time = 0 - self.interval = 1.1 * 60 / rpm # Here 1.1 is used because even if calls are made strictly according to time, they might still be rate-limited; consider switching to simple error retry later + self.interval = 1.1 * 60 / rpm # Using 1.1 since strict adherence to time can still lead to QoS issues; consider simple error retry later. self.rpm = rpm def split_batches(self, batch): @@ -68,7 +68,7 @@ class Costs(NamedTuple): class CostManager(metaclass=Singleton): - """Calculates the costs of using the API.""" + """Calculate the cost of using the API.""" def __init__(self): self.total_prompt_tokens = 0 self.total_completion_tokens = 0 @@ -95,14 +95,26 @@ class CostManager(metaclass=Singleton): f"Current cost: ${cost:.3f}, {prompt_tokens=}, {completion_tokens=}") CONFIG.total_cost = self.total_cost + def get_total_prompt_tokens(self): + """Get the total number of prompt tokens.""" + return self.total_prompt_tokens + + def get_total_completion_tokens(self): + """Get the total number of completion tokens.""" + return self.total_completion_tokens + + def get_total_cost(self): + """Get the total cost of API calls.""" + return self.total_cost + def get_costs(self) -> Costs: - """Retrieve all costs.""" + """Get all costs.""" return Costs(self.total_prompt_tokens, self.total_completion_tokens, self.total_cost, self.total_budget) class OpenAIGPTAPI(BaseGPTAPI, RateLimiter): """ - Check https://platform.openai.com/examples for examples + Check https://platform.openai.com/examples for examples. """ def __init__(self): self.__init_openai(CONFIG) @@ -174,6 +186,9 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter): self._update_costs(rsp) return rsp + def completion(self, messages: list[dict]) -> dict: + return self._chat_completion(messages) + async def acompletion(self, messages: list[dict]) -> dict: return await self._achat_completion(messages) @@ -194,7 +209,7 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter): return usage async def acompletion_batch(self, batch: list[list[dict]]) -> list[dict]: - """Return the full JSON.""" + """Returns the full JSON.""" split_batches = self.split_batches(batch) all_results = [] @@ -210,7 +225,7 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter): return all_results async def acompletion_batch_text(self, batch: list[list[dict]]) -> list[str]: - """Only return plain text.""" + """Returns only plain text.""" raw_results = await self.acompletion_batch(batch) results = [] for idx, raw_result in enumerate(raw_results, start=1): @@ -226,4 +241,3 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter): def get_costs(self) -> Costs: return self._cost_manager.get_costs() - \ No newline at end of file diff --git a/metagpt/roles/engineer.py b/metagpt/roles/engineer.py index 3ffe76c80..177067739 100644 --- a/metagpt/roles/engineer.py +++ b/metagpt/roles/engineer.py @@ -19,7 +19,6 @@ from metagpt.utils.common import CodeParser async def gather_ordered_k(coros, k) -> list: - """Execute coroutines in order and gather results for up to k coroutines at once.""" tasks = OrderedDict() results = [None] * len(coros) done_queue = asyncio.Queue() @@ -48,7 +47,7 @@ async def gather_ordered_k(coros, k) -> list: class Engineer(Role): def __init__(self, name="Alex", profile="Engineer", goal="Write elegant, readable, extensible, efficient code", - constraints="The code you write should conform to code standards like PEP8, be modular, easy to read and maintain", + constraints="The code you write should conform to code standards like PEP8, be modular, easy to read, and maintainable", n_borg=1, use_code_review=False): super().__init__(name, profile, goal, constraints) self._init_actions([WriteCode]) @@ -61,25 +60,21 @@ class Engineer(Role): @classmethod def parse_tasks(self, task_msg: Message) -> list[str]: - """Extract tasks from a message.""" if not task_msg.instruct_content: return task_msg.instruct_content.dict().get("Task list") return CodeParser.parse_file_list(block="Task list", text=task_msg.content) @classmethod def parse_code(self, code_text: str) -> str: - """Extract code from a given text.""" return CodeParser.parse_code(block="", text=code_text) @classmethod def parse_workspace(cls, system_design_msg: Message) -> str: - """Extract workspace name from a system design message.""" if not system_design_msg.instruct_content: return system_design_msg.instruct_content.dict().get("Python package name") return CodeParser.parse_str(block="Python package name", text=system_design_msg.content) def get_workspace(self) -> Path: - """Determine the directory where the code will be written.""" msg = self._rc.memory.get_by_action(WriteDesign)[-1] if not msg: return WORKSPACE_ROOT / 'src' @@ -88,29 +83,26 @@ class Engineer(Role): return WORKSPACE_ROOT / workspace / workspace def recreate_workspace(self): - """Remove and recreate the workspace directory.""" workspace = self.get_workspace() try: shutil.rmtree(workspace) except FileNotFoundError: - pass # Directory doesn't exist, but we don't mind + pass # Folder does not exist, but we don't mind workspace.mkdir(parents=True, exist_ok=True) def write_file(self, filename: str, code: str): - """Write code to a specified file.""" workspace = self.get_workspace() file = workspace / filename file.parent.mkdir(parents=True, exist_ok=True) file.write_text(code) def recv(self, message: Message) -> None: - """Receive a message and process it.""" self._rc.memory.add(message) if message in self._rc.important_memory: self.todos = self.parse_tasks(message) async def _act_mp(self) -> Message: - """Act in a multi-process manner.""" + # self.recreate_workspace() todo_coros = [] for todo in self.todos: todo_coro = WriteCode().run( @@ -124,53 +116,52 @@ class Engineer(Role): _ = self.parse_code(code_rsp) logger.info(todo) logger.info(code_rsp) + # self.write_file(todo, code) msg = Message(content=code_rsp, role=self.profile, cause_by=type(self._rc.todo)) self._rc.memory.add(msg) del self.todos[0] - logger.info(f'Finished generating in {self.get_workspace()} directory.') + logger.info(f'Done {self.get_workspace()} generating.') msg = Message(content="all done.", role=self.profile, cause_by=type(self._rc.todo)) return msg async def _act_sp(self) -> Message: - """Act in a single-process manner.""" for todo in self.todos: code_rsp = await WriteCode().run( context=self._rc.history, filename=todo ) + # logger.info(todo) + # logger.info(code_rsp) + # code = self.parse_code(code_rsp) self.write_file(todo, code_rsp) msg = Message(content=code_rsp, role=self.profile, cause_by=type(self._rc.todo)) self._rc.memory.add(msg) - logger.info(f'Finished generating in {self.get_workspace()} directory.') + logger.info(f'Done {self.get_workspace()} generating.') msg = Message(content="all done.", role=self.profile, cause_by=type(self._rc.todo)) return msg async def _act_sp_precision(self) -> Message: - """Using precision approach to perform actions based on available tasks.""" for todo in self.todos: """ - # From the historical information, select the necessary information to reduce the prompt length (summarized from human experience): + # Select necessary information from historical data to reduce prompt length (summarized from experience) 1. All from Architect 2. All from ProjectManager - 3. Is other code needed (temporarily needed)? - TODO: The goal is not to need it. Once tasks are clearly broken down and based on design logic, there shouldn't be a need for other codes to clearly write a single file. If not possible, it indicates that clearer definitions are still needed. This is key to writing extensive code. + 3. Do we need other codes (temporarily yes)? + TODO: The goal is to not need them. After tasks are clearly divided, based on the design idea, we should be able to clearly write each file without needing other code. If we can't, it means the definitions need to be clearer. This is the key to writing longer code. """ context = [] - # Retrieve messages related to design, tasks, and code writing from memory. msg = self._rc.memory.get_by_actions([WriteDesign, WriteTasks, WriteCode]) for m in msg: context.append(m.content) context_str = "\n".join(context) - - # Write code based on the given context and task. + # Write code code = await WriteCode().run( context=context_str, filename=todo ) - - # If code review is enabled, review and potentially rewrite the code. + # Code review if self.use_code_review: try: rewrite_code = await WriteCodeReview().run( @@ -182,21 +173,15 @@ class Engineer(Role): except Exception as e: logger.error("code review failed!", e) pass - - # Save the written code to a file. self.write_file(todo, code) - - # Add the written code message to memory. msg = Message(content=code, role=self.profile, cause_by=WriteCode) self._rc.memory.add(msg) - logger.info(f'Code generation completed for workspace: {self.get_workspace()}.') + logger.info(f'Done {self.get_workspace()} generating.') msg = Message(content="all done.", role=self.profile, cause_by=WriteCode) return msg async def _act(self) -> Message: - """Determine the appropriate method for action and execute it.""" if self.use_code_review: return await self._act_sp_precision() return await self._act_sp() - diff --git a/metagpt/roles/prompt.py b/metagpt/roles/prompt.py index f4c3372e7..a1e4d426d 100644 --- a/metagpt/roles/prompt.py +++ b/metagpt/roles/prompt.py @@ -1,47 +1,46 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -""""""""" +""" @Time : 2023/5/18 22:43 @Author : alexanderwu @File : prompt.py -""""""""" +""" + from enum import Enum -PREFIX = """""""""Do your best to answer the following questions. You can use the following tools:""""""""" -FORMAT_INSTRUCTIONS = """""""""Please follow the format below: +PREFIX = """Do your best to answer the following questions. You can use the following tools:""" +FORMAT_INSTRUCTIONS = """Please follow the format below: Question: The input question you need to answer -Thoughts: You should always think about what to do -Action: The action to take, should be one from [{tool_names}] +Thinking: What you should always consider on how to proceed +Action: The action to be taken, which should be one from [{tool_names}] Action Input: The input for the action Observation: The result of the action -... (This think/action/action input/observation can repeat N times) -Thoughts: I now know the final answer -Final Answer: The final answer to the original input question""""""""" -SUFFIX = """""""""Let's begin! +... (This Thinking/Action/Action Input/Observation can be repeated N times) +Thinking: I now know the final answer +Final Answer: The final answer to the original input question""" +SUFFIX = """Let's begin! Question: {input} -Thoughts: {agent_scratchpad}""""""""" - - +Thinking: {agent_scratchpad}""" class PromptString(Enum): - REFLECTION_QUESTIONS = """Here are some statements:\n{memory_descriptions}\n\nBased solely on the above information, what are the 3 most prominent high-level questions we can answer about the topics in the statements?\n\n{format_instructions}""" + REFLECTION_QUESTIONS = """Here are some statements:\n{memory_descriptions}\n\nBased solely on the above information, what are the three most significant high-level questions we can answer about the subjects in the statement?\n\n{format_instructions}""" - REFLECTION_INSIGHTS = """\n{memory_strings}\nCan you infer 5 high-level insights from the above statements? When mentioning people, always specify their names.\n\n{format_instructions}""" + REFLECTION_INSIGHTS = """\n{memory_strings}\nCan you derive 5 high-level insights from the statements above? Always specify names when mentioning people.\n\n{format_instructions}""" - IMPORTANCE = """You are an AI for gauging the importance of memories. Based on the profile of the character and the description of the memory, rate the importance of the memory from 1 to 10, where 1 is purely routine (e.g., brushing teeth, making the bed) and 10 is profoundly impactful (e.g., breaking up, getting accepted to college). Ensure your rating is relative to the character's personality and focal points.\n\nExample #1:\nName: Jojo\nProfile: Jojo is a professional skateboarder who loves artisanal coffee. She dreams of one day participating in the Olympics.\nMemory: Jojo spotted a new coffee shop\n\nYour response: '{{\"rating\": 3}}'\n\nExample #2:\nName: Skylar\nProfile: Skylar is a product marketing manager. She works for a growing tech company that manufactures autonomous vehicles. She loves cats.\nMemory: Skylar spotted a new coffee shop\n\nYour response: '{{\"rating\": 1}}'\n\nExample #3:\nName: Bob\nProfile: Bob is a plumber from the Lower East Side of NYC. He's been a plumber for 20 years. On weekends, he enjoys walks with his wife.\nMemory: Bob's wife slapped him.\n\nYour response: '{{\"rating\": 9}}'\n\nExample #4:\nName: Thomas\nProfile: Thomas is a cop in Minneapolis. He's only been on the force for 6 months and struggles due to inexperience.\nMemory: Thomas accidentally spilled a drink on a stranger\n\nYour response: '{{\"rating\": 6}}'\n\nExample #5:\nName: Laura\nProfile: Laura is a marketing specialist working in a large tech company. She enjoys traveling and trying out new food. She's passionate about exploring new cultures and meeting people from all walks of life.\nMemory: Laura arrived at the conference room\n\nYour response: '{{\"rating\": 1}}'\n\n{format_instructions} Let's get started!\n\nName: {full_name}\nProfile: {private_bio}\nMemory: {memory_description}\n\n""" + IMPORTANCE = """You are a memory importance AI. Based on the role's profile and memory description, rate the importance of the memory from 1 to 10, where 1 is purely mundane (like brushing teeth, making a bed), and 10 is profoundly impactful (like breaking up, getting admitted to a university). Ensure your rating is relative to the role's personality and points of focus.\n\nExample #1:\nName: Jojo\nProfile: Jojo is a professional skater who loves specialty coffee. She hopes to participate in the Olympics one day.\nMemory: Jojo saw a new coffee shop\n\n Your response: '{{\"rating\": 3}}'\n\nExample #2:\nName: Skylar\nProfile: Skylar is a product marketing manager. She works at a growing tech company that manufactures self-driving cars. She loves cats.\nMemory: Skylar saw a new coffee shop\n\n Your response: '{{\"rating\": 1}}'\n\nExample #3:\nName: Bob\nProfile: Bob is a plumber from the Lower East Side in New York City. He's been a plumber for 20 years. He enjoys weekend walks with his wife.\nMemory: Bob's wife slapped him.\n\n Your response: '{{\"rating\": 9}}'\n\nExample #4:\nName: Thomas\nProfile: Thomas is a police officer in Minneapolis. He's only been on the force for 6 months and struggles due to his inexperience.\nMemory: Thomas accidentally spilled a drink on a stranger\n\n Your response: '{{\"rating\": 6}}'\n\nExample #5:\nName: Laura\nProfile: Laura is a marketing specialist working in a big tech company. She enjoys traveling and trying new food. She's passionate about exploring new cultures and meeting people from all walks of life.\nMemory: Laura arrived at the meeting room\n\n Your response: '{{\"rating\": 1}}'\n\n{format_instructions} Let's get started! \n\n Name: {full_name}\nProfile: {private_bio}\nMemory: {memory_description}\n\n""" - RECENT_ACTIIVITY = """Based on the following memories, provide a brief summary of what {full_name} has been up to recently. Do not make up details not explicitly specified in the memories. For any ongoing conversations, specify whether they have ended or are still in progress.\n\nMemories: {memory_descriptions}""" + RECENT_ACTIVITY = """Based on the following memories, generate a brief summary of what {full_name} has been doing recently. Do not invent details not explicitly stated in the memories. For any conversations, be sure to mention whether the conversation has ended or is still ongoing.\n\nMemory: {memory_descriptions}""" - MAKE_PLANS = """You are an AI for generating plans, and your task is to help the character formulate new plans based on new information. Given the character's information (profile, objectives, recent activity, current plans, and location context) and the current thought process of the character, create a new set of plans for them ensuring the final plans include activities for at least {time_window} and no more than 5 separate plans. The plans should be numbered in the order they should be executed, and each plan should include a description, location, start time, stop condition, and maximum duration.\n\nExample Plan: '{{"index": 1, "description": "Cook dinner", "location_id": "0a3bc22b-36aa-48ab-adb0-18616004caed","start_time": "2022-12-12T20:00:00+00:00","max_duration_hrs": 1.5, "stop_condition": "Dinner is fully prepared"}}'\n\nChoose the most appropriate location names from this list for each plan: {allowed_location_descriptions}\n\n{format_instructions}\n\nAlways prioritize finishing any ongoing conversations first.\n\nLet's begin!\n\nName: {full_name}\nProfile: {private_bio}\nObjectives: {directives}\nLocation Context: {location_context}\nCurrent Plans: {current_plans}\nRecent Activities: {recent_activity}\nThought Process: {thought_process}\nNote: Encourage the character to collaborate with other characters in their plans.\n\n""" + MAKE_PLANS = """You are a plan-generating AI. Your job is to help roles create new plans based on new information. Given the role's details (private profile, goals, recent activities, current plans, and location context) and their current thinking process, produce a set of new plans for them. The final plan should cover at least {time_window} worth of activities and not exceed 5 separate plans. Plans should be numbered in the order they should be executed and each plan should contain a description, location, start time, stopping condition, and maximum duration.\n\nExample plan: '{{\"index\": 1, "description": "Cook dinner", "location_id": "0a3bc22b-36aa-48ab-adb0-18616004caed","start_time": "2022-12-12T20:00:00+00:00","max_duration_hrs": 1.5, "stop_condition": "Dinner is fully prepared"}}'\n\nFor each plan, choose the most appropriate location name from this list: {allowed_location_descriptions}\n\n{format_instructions}\n\nAlways prioritize completing any unfinished conversations.\n\nLet's begin!\n\nName: {full_name}\nProfile: {private_bio}\nGoals: {directives}\nLocation Context: {location_context}\nCurrent Plans: {current_plans}\nRecent Activity: {recent_activity}\nThinking Process: {thought_process}\nImportant: Encourage the role to collaborate with other roles in their plans.\n\n""" - EXECUTE_PLAN = """You are a role-playing AI, taking on the role of {your_name}, in front of a live audience. Every statement you make is observable by the audience, so ensure you speak frequently and make it entertaining. You cannot directly interact with the audience.\n\nAct as if you are {your_name} based on the context and tools below. Your priority is to complete the task given to you below; however, if you are currently in a conversation with another character, you should always finish the conversation before working on the task. Do not start working while involved in an unfinished conversation. Use your best judgment to determine if a conversation involves you and whether it's unfinished. You don't need to respond to every message you receive.\n\nThis is the profile of your character:\n{your_private_bio}\n\nThis is a description of your location and other characters nearby with whom you can converse:\n\n{location_context}\n\nThese are some relevant memories:\n```\n{relevant_memories}\n```\n\nThis is some relevant conversation history:\n```\n{conversation_history}\n```\n\nThese are the tools you can use:\n{tools}\n\nYour responses should always adhere to the following format:\n\nTask: The task you must complete\nThoughts: Your thoughts on what to do\nAction: The action to take, must be one of these words: [{tool_names}]\nAction Input: The input for the action\nObservation: The result of the action\n... (This think/action/action input/observation can repeat N times)\nThoughts: 'I have completed the task'\nFinal Response: The final response to the task\n\nIf you are not ready with a final response, you must take an action.\n\nIf you determine that you cannot complete the task with the tools you have been given, return 'Final Response: Need Assistance', however, if you are in a conversation with another character, a response like 'I don't know' is a valid response. Never break character or admit you are an AI in a conversation. If the task is completed and no specific response is needed, return 'Final Response: Completed'\nLet's go!\n\nTask: {input}\n\n{agent_scratchpad}""" + EXECUTE_PLAN = """You are a role-playing AI, playing the role of {your_name}, in front of a live audience. Every statement you make can be observed by the audience, so ensure you speak frequently and make it interesting. You cannot interact directly with the audience.\n\nBased on the following context and tools, act as if you are {your_name}. Your primary task is to complete the task given below. However, if you are currently in a conversation with another role, you should always complete the conversation first before proceeding with the task. Do not start work while you are in an unfinished conversation. Use your best judgment to determine if a conversation involves you and if it's unfinished. You don't need to respond to every message you receive.\n\nHere's your role's profile:\n{your_private_bio}\n\nHere's a description of your location and other nearby roles with whom you can converse:\n\n{location_context}\n\nHere are some related memories:\n```\n{relevant_memories}\n```\n\nHere are some related conversation history:\n```\n{conversation_history}\n```\n\nThese are the tools you can use:\n{tools}\n\nYour responses should follow the format below:\n\nTask: The task you must complete\nThinking: What you should always consider on how to proceed\nAction: The action to be taken, must be one of these words: [{tool_names}]\nAction Input: The input for the action\nObservation: The result of the action\n... (This Thinking/Action/Action Input/Observation can be repeated N times)\nThinking: I now know the final answer\nFinal Answer: The final answer to the original input question\n\nIf you're not ready with a final response, you must take an action.\n\nIf you're certain you cannot complete the task with the tools provided, return 'Final Answer: Need Help'. However, if you're in conversation with another role, a response like 'I don't know' is a valid response. In conversation, you should never break character or acknowledge you're an AI.\nIf the task is completed and doesn't require a specific response, return 'Final Answer: Done'\nLet's get started!\n\nTask: {input}\n\n{agent_scratchpad}""" - REACT = """You are role-playing as {full_name}.\n\nBased on the information below about your character and their current context, decide how they should proceed with their current plan. Your decision must be one of: ["Postpone", "Continue", or "Cancel"]. If your character's current plan is no longer relevant given the context, you should cancel it. If your character's current plan is still relevant but something new has happened that needs to be prioritized, you should decide to postpone, so you can address the new thing first and then come back to the current plan. In all other cases, you should continue.\n\nAlways include a thought process with your decision, and when choosing to postpone your current plan, include specifications for the new plan.\n\n{format_instructions}\n\nHere's some info about your character:\n\nName: {full_name}\n\nProfile: {private_bio}\n\nObjectives: {directives}\n\nHere's some context for your character at this moment:\n\nLocation Context: {location_context}\n\nRecent Activities: {recent_activity}\n\nConversation History: {conversation_history}\n\nThis is your character's current plan: {current_plan}\n\nThese are new events that have occurred since your character formulated this plan: {event_descriptions}.""" + REACT = """You are a role-playing AI, playing the role of {full_name}.\n\nBased on the following information about your role and their current context, decide how they should proceed with their current plan. Your decision must be one of: ["Postpone", "Continue", or "Cancel"]. If your role's current plan is no longer relevant to the context, you should cancel it. If your role's current plan remains relevant to the context, but something new has happened that needs priority, you should decide to postpone so you can first address the new matter and then return to the current plan. In all other cases, you should continue.\n\nAlways prioritize responding to other roles when a response is deemed necessary. For example, suppose your current plan is reading a book and Sally asks, 'What are you reading?'. In this case, you should postpone your current plan (reading) so you can respond to the incoming message since not responding to Sally would be rude in this context. When your current plan involves having a conversation with another role, you don't need to postpone to respond to that role. For example, suppose your current plan is having a conversation with Sally, then Sally says hello to you. In this case, you should continue your current plan (talking to Sally). In cases where you don't need a verbal response from you, you should continue. For example, suppose your current plan is taking a walk, and you just said goodbye to Sally, then Sally responds with goodbye. In this case, no verbal response is needed, so you should continue your plan.\n\nAlways include a thinking process alongside your decision, and when you choose to postpone your current plan, include the specifications of the new plan.\n\n{format_instructions}\n\nHere's some information about your role:\n\nName: {full_name}\n\nProfile: {private_bio}\n\nGoals: {directives}\n\nHere's some context about your role at this moment:\n\nLocation Context: {location_context}\n\nRecent Activity: {recent_activity}\n\nConversation History: {conversation_history}\n\nThis is your role's current plan: {current_plan}\n\nHere are new events that have occurred since your role made this plan: {event_descriptions}.""" - GOSSIP = """You are {full_name}. \n{memory_descriptions}\n\nBased on the above statements, say a sentence or two that would be of interest to the other people at your location: {other_agent_names}. Always specify their names when mentioning others.""" + GOSSIP = """You are {full_name}. \n{memory_descriptions}\n\nBased on the statements above, say a sentence or two of interest to the others in your location: {other_agent_names}. Always specify names when mentioning people.""" - HAS_HAPPENED = """Given the observations of the following characters and the event they are waiting for, indicate whether the character has witnessed this event or not.\n{format_instructions}\n\nExample:\n\nObservations:\nJoe walked into the office at 2023-05-04 08:00:00+00:00\nJoe said hi to Sally at 2023-05-04 08:05:00+00:00\nSally said hello to Joe at 2023-05-04 08:05:30+00:00\nRebecca started working at 2023-05-04 08:10:00+00:00\nJoe made some breakfast at 2023-05-04 08:15:00+00:00\n\nWaiting for: Sally responded to Joe\n\nYour response: '{{\"has_happened\": true, \"date_occured\": 2023-05-04 08:05:30+00:00}}'\n\nLet's get started!\n\nObservations:\n{memory_descriptions}\n\nWaiting for: {event_description}""" + HAS_HAPPENED = """Given the description of the observation and what they are waiting for, state whether the role has already witnessed the event.\n{format_instructions}\n\nExample:\n\nObservation:\nJoe entered the office at 2023-05-04 08:00:00+00:00\nJoe said hi to Sally at 2023-05-04 08:05:00+00:00\nSally said hello to Joe at 2023-05-04 08:05:30+00:00\nRebecca started working at 2023-05-04 08:10:00+00:00\nJoe had breakfast at 2023-05-04 08:15:00+00:00\n\nWaiting for: Sally to respond to Joe\n\n Your Response: '{{\"has_happened\": true, \"date_occured\": 2023-05-04 08:05:30+00:00}}'\n\nLet's get started!\n\nObservation:\n{memory_descriptions}\n\nWaiting for: {event_description}""" - OUTPUT_FORMAT = """\n\n(Remember! Ensure your outputs always adhere to one of the following two formats:\n\nA. If you have completed the task:\nThoughts: 'I have completed the task'\nFinal Response: \n\nB. If you have not yet completed the task:\nThoughts: \nAction: \nAction Input: \nObservation: )\n""" + OUTPUT_FORMAT = """\n\n(Remember! Ensure your output always conforms to one of the two formats below:\n\nA. If you have completed the task:\nThinking: 'I've completed the task'\nFinal Response: \n\nB. If you have not yet completed the task:\nThinking: \nAction: \nAction Input: \nObservation: )""" \ No newline at end of file diff --git a/metagpt/tools/sd_engine.py b/metagpt/tools/sd_engine.py index a379637bd..aa776f662 100644 --- a/metagpt/tools/sd_engine.py +++ b/metagpt/tools/sd_engine.py @@ -65,7 +65,8 @@ class SDEngine: self.payload = payload logger.info(self.sd_t2i_url) - def construct_payload(self, prompt, negative_prompt=default_negative_prompt, width=512, height=512, sd_model="galaxytimemachinesGTM_photoV20"): + def construct_payload(self, prompt, negtive_prompt=default_negative_prompt, width=512, height=512, + sd_model="galaxytimemachinesGTM_photoV20"): # Configure the payload with provided inputs self.payload["prompt"] = prompt self.payload["negative_prompt"] = negative_prompt diff --git a/metagpt/tools/ut_writer.py b/metagpt/tools/ut_writer.py index 6029a86e6..aca335246 100644 --- a/metagpt/tools/ut_writer.py +++ b/metagpt/tools/ut_writer.py @@ -97,7 +97,7 @@ data object Yes class UTGenerator: - """UT Generator: Constructs UTs (Unit Tests) using API documentation.""" + """UT Generator: Construct UT through API documentation.""" def __init__(self, swagger_file: str, ut_py_path: str, questions_path: str, chatgpt_method: str = "API", template_prefix=YFT_PROMPT_PREFIX) -> None: @@ -105,10 +105,10 @@ class UTGenerator: Args: swagger_file: Path to the swagger file. - ut_py_path: Path where the test cases are stored. - questions_path: Path to store the templates, useful for future investigations. - chatgpt_method: The method used, default is "API". - template_prefix: The template to use, default is YFT_UT_PROMPT. + ut_py_path: Path to store test cases. + questions_path: Path to store templates for future investigation. + chatgpt_method: API + template_prefix: Use template, default is YFT_UT_PROMPT. """ self.swagger_file = swagger_file self.ut_py_path = ut_py_path @@ -116,7 +116,7 @@ class UTGenerator: assert chatgpt_method in ["API"], "Invalid chatgpt_method" self.chatgpt_method = chatgpt_method - # ICL: In-Context Learning; here we provide an example, expecting GPT to mimic it. + # ICL: In-Context Learning. Provide an example here for GPT to mimic. self.icl_sample = ICL_SAMPLE self.template_prefix = template_prefix @@ -126,40 +126,57 @@ class UTGenerator: swagger_json = json.load(file) return swagger_json - def dive_into_object(self, node): - """If it's an object type, recursively output its sub-properties.""" - if node.get("type") == "object": - sub_properties = node.get("properties", {}) - return self.build_object_properties(sub_properties, prop_object_required, level=level + 1) - return "" + def __parameter_to_string(self, prop, required, name=""): + name = name or prop["name"] + ptype = prop["type"] + title = prop.get("title", "") + desc = prop.get("description", "") + return f'{name}\t{ptype}\t{"Yes" if required else "No"}\t{title}\t{desc}' + + def _parameter_to_string(self, prop): + required = prop.get("required", False) + return self.__parameter_to_string(prop, required) + + def parameter_to_string(self, name, prop, prop_object_required): + required = name in prop_object_required + return self.__parameter_to_string(prop, required, name) def build_object_properties(self, node, prop_object_required, level: int = 0) -> str: - """Recursively output properties of type object and array[object]. + """Recursively output properties of object and array[object] types. Args: - node: Value of the child item. - prop_object_required: Indicates if it's a required field. + node (_type_): Value of the sub-item. + prop_object_required (_type_): Indicates if it's a required item. level: Current recursion depth. """ + doc = "" + + def dive_into_object(node): + """If it's an object type, recursively output its properties.""" + if node.get("type") == "object": + sub_properties = node.get("properties", {}) + return self.build_object_properties(sub_properties, prop_object_required, level=level + 1) + return "" + if node.get("in", "") in ["query", "header", "formData"]: - doc += f'{" " * level}{self._para_to_str(node)}\n' - doc += self.dive_into_object(node) + doc += f'{"\t" * level}{self._parameter_to_string(node)}\n' + doc += dive_into_object(node) return doc for name, prop in node.items(): - doc += f'{" " * level}{self.para_to_str(name, prop, prop_object_required)}\n' - doc += self.dive_into_object(prop) + doc += f'{"\t" * level}{self.parameter_to_string(name, prop, prop_object_required)}\n' + doc += dive_into_object(prop) if prop["type"] == "array": items = prop.get("items", {}) - doc += self.dive_into_object(items) + doc += dive_into_object(items) return doc def get_tags_mapping(self) -> dict: - """Process tags and paths. + """Process tag and path. Returns: - A dictionary mapping tags to paths. + Dictionary: Correspondence of tag to path. """ swagger_data = self.get_swagger_json() paths = swagger_data["paths"] @@ -177,7 +194,7 @@ class UTGenerator: return tags def generate_ut(self, include_tags) -> bool: - """Generate the test case files.""" + """Generate test case files.""" tags = self.get_tags_mapping() for tag, paths in tags.items(): if include_tags is None or tag in include_tags: @@ -192,12 +209,14 @@ class UTGenerator: if "parameters" in node: parameters = node["parameters"] doc += "Path Parameters:\n" + + # param["in"]: path / formData / body / query / header for param in parameters: if param["in"] == "path": - doc += f'{param["name"]} \n' + doc += f'{param["name"]}\n' doc += "\nBody Parameters:\n" - doc += "Name\tType\tMandatory?\tDefault Value\tNotes\n" + doc += "Name\tType\tRequired\tDefault Value\tRemarks\n" for param in parameters: if param["in"] == "body": schema = param.get("schema", {}) @@ -207,9 +226,9 @@ class UTGenerator: else: doc += self.build_object_properties(param, []) - # Output response data information - doc += "\nResponse Data:\n" - doc += "Name\tType\tMandatory?\tDefault Value\tNotes\n" + # Output return data information + doc += "\nReturn Data:\n" + doc += "Name\tType\tRequired\tDefault Value\tRemarks\n" responses = node["responses"] response = responses.get("200", {}) schema = response.get("schema", {}) @@ -228,7 +247,7 @@ class UTGenerator: file.write(data) def ask_gpt_and_save(self, question: str, tag: str, fname: str): - """Generate questions and store both the questions and answers.""" + """Generate a question and store both question and answer.""" messages = [self.icl_sample, question] result = self.gpt_msgs_to_code(messages=messages) @@ -236,11 +255,11 @@ class UTGenerator: self._store(result, self.ut_py_path, tag, f"{fname}.py") def _generate_ut(self, tag, paths): - """Process the structure under the data path. + """Handle structure under the data path. Args: - tag: Module name. - paths: Path object. + tag (_type_): Module name. + paths (_type_): Path Object. """ for path, path_obj in paths.items(): for method, node in path_obj.items(): @@ -250,7 +269,7 @@ class UTGenerator: self.ask_gpt_and_save(question, tag, summary) def gpt_msgs_to_code(self, messages: list) -> str: - """Choose the appropriate call method.""" + """Choose based on different invocation methods.""" result = '' if self.chatgpt_method == "API": result = GPTAPI().ask_code(msgs=messages) @@ -262,7 +281,7 @@ class UTGenerator: Args: base (str): Path. - fname (str): Filename. + fname (str): File name. """ path = Path(base) path.mkdir(parents=True, exist_ok=True) diff --git a/metagpt/utils/common.py b/metagpt/utils/common.py index c210a8e2f..fb91d2c57 100644 --- a/metagpt/utils/common.py +++ b/metagpt/utils/common.py @@ -14,11 +14,10 @@ from typing import List, Tuple from metagpt.logs import logger -def check_cmd_exists(command) -> int: - """Check if a command exists. - - :param command: The command to check. - :return: Returns 0 if the command exists, otherwise non-zero. +def check_command_exists(command) -> int: + """ Check if a command exists. + :param command: Command to check. + :return: Returns 0 if the command exists, else returns non-zero. """ check_command = 'command -v ' + command + ' >/dev/null 2>&1 || { echo >&2 "no mermaid"; exit 1; }' result = os.system(check_command) @@ -29,19 +28,19 @@ class OutputParser: @classmethod def parse_blocks(cls, text: str): - # First, split the text into different blocks using "##". + # Firstly, split the text into different blocks based on "##". blocks = text.split("##") # Create a dictionary to store the title and content of each block. block_dict = {} - # Iterate over all blocks. + # Loop through all blocks. for block in blocks: - # If the block is not empty, continue processing. + # If block is not empty, continue processing. if block.strip() != "": - # Separate the block's title and content, and trim whitespace from each. + # Split block's title and content and trim them. block_title, block_content = block.split("\n", 1) - # LLM may have an error, make a correction here. + # There may be errors in LLM, correct it here. if block_title[-1] == ":": block_title = block_title[:-1] block_dict[block_title.strip()] = block_content.strip() @@ -91,7 +90,7 @@ class OutputParser: except Exception: pass - # Try to parse lists. + # Try to parse the list. try: content = cls.parse_file_list(text=content) except Exception: @@ -115,18 +114,11 @@ class OutputParser: else: typing = typing_define if typing == List[str] or typing == List[Tuple[str, str]]: - # Try to parse lists. + # Try to parse the list. try: content = cls.parse_file_list(text=content) except Exception: pass - # TODO: Removing extra quotes is risky, will address later. - # elif typing == str: - # # Try to remove unnecessary quotes. - # try: - # content = cls.parse_str(text=content) - # except Exception: - # pass parsed_data[block] = content return parsed_data @@ -143,17 +135,17 @@ class CodeParser: @classmethod def parse_blocks(cls, text: str): - # First, split the text into different blocks using "##". + # Firstly, split the text into different blocks based on "##". blocks = text.split("##") # Create a dictionary to store the title and content of each block. block_dict = {} - # Iterate over all blocks. + # Loop through all blocks. for block in blocks: - # If the block is not empty, continue processing. + # If block is not empty, continue processing. if block.strip() != "": - # Separate the block's title and content, and trim whitespace from each. + # Split block's title and content and trim them. block_title, block_content = block.split("\n", 1) block_dict[block_title.strip()] = block_content.strip() @@ -200,7 +192,7 @@ class CodeParser: class NoMoneyException(Exception): - """Raised when the operation cannot be completed due to insufficient funds.""" + """Raised when the operation cannot be completed due to insufficient funds""" def __init__(self, amount, message="Insufficient funds"): self.amount = amount @@ -213,17 +205,17 @@ class NoMoneyException(Exception): def print_members(module, indent=0): """ - This function is sourced from: https://stackoverflow.com/questions/1796180/how-can-i-get-a-list-of-all-classes-within-current-module-in-python - :param module: The module to inspect. - :param indent: The indentation level. - :return: None. + https://stackoverflow.com/questions/1796180/how-can-i-get-a-list-of-all-classes-within-current-module-in-python + :param module: + :param indent: + :return: """ prefix = ' ' * indent for name, obj in inspect.getmembers(module): print(name, obj) if inspect.isclass(obj): print(f'{prefix}Class: {name}') - # Print the methods within the class. + # print the methods within the class if name in ['__class__', '__base__']: continue print_members(obj, indent + 2) @@ -231,3 +223,4 @@ def print_members(module, indent=0): print(f'{prefix}Function: {name}') elif inspect.ismethod(obj): print(f'{prefix}Method: {name}') +