mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-04-26 09:16:21 +02:00
Merge branch 'main' into feature/teacher
This commit is contained in:
commit
28c3bfd036
30 changed files with 800 additions and 129 deletions
|
|
@ -5,15 +5,47 @@
|
|||
@Author : alexanderwu
|
||||
@File : debug_error.py
|
||||
"""
|
||||
import re
|
||||
|
||||
from metagpt.logs import logger
|
||||
from metagpt.actions.action import Action
|
||||
from metagpt.utils.common import CodeParser
|
||||
|
||||
|
||||
PROMPT_TEMPLATE = """
|
||||
NOTICE
|
||||
1. Role: You are a Development Engineer or QA engineer;
|
||||
2. Task: You received this message from another Development Engineer or QA engineer who ran or tested your code.
|
||||
Based on the message, first, figure out your own role, i.e. Engineer or QaEngineer,
|
||||
then rewrite the development code or the test code based on your role, the error, and the summary, such that all bugs are fixed and the code performs well.
|
||||
Attention: Use '##' to split sections, not '#', and '## <SECTION_NAME>' SHOULD WRITE BEFORE the test case or script and triple quotes.
|
||||
The message is as follows:
|
||||
{context}
|
||||
---
|
||||
Now you should start rewriting the code:
|
||||
## file name of the code to rewrite: Write code with triple quoto. Do your best to implement THIS IN ONLY ONE FILE.
|
||||
"""
|
||||
class DebugError(Action):
|
||||
def __init__(self, name, context=None, llm=None):
|
||||
def __init__(self, name="DebugError", context=None, llm=None):
|
||||
super().__init__(name, context, llm)
|
||||
|
||||
async def run(self, code, error):
|
||||
prompt = f"Here is a piece of Python code:\n\n{code}\n\nThe following error occurred during execution:" \
|
||||
f"\n\n{error}\n\nPlease try to fix the error in this code."
|
||||
fixed_code = await self._aask(prompt)
|
||||
return fixed_code
|
||||
# async def run(self, code, error):
|
||||
# prompt = f"Here is a piece of Python code:\n\n{code}\n\nThe following error occurred during execution:" \
|
||||
# f"\n\n{error}\n\nPlease try to fix the error in this code."
|
||||
# fixed_code = await self._aask(prompt)
|
||||
# return fixed_code
|
||||
|
||||
async def run(self, context):
|
||||
if "PASS" in context:
|
||||
return "", "the original code works fine, no need to debug"
|
||||
|
||||
file_name = re.search("## File To Rewrite:\s*(.+\\.py)", context).group(1)
|
||||
|
||||
logger.info(f"Debug and rewrite {file_name}")
|
||||
|
||||
prompt = PROMPT_TEMPLATE.format(context=context)
|
||||
|
||||
rsp = await self._aask(prompt)
|
||||
|
||||
code = CodeParser.parse_code(block="", text=rsp)
|
||||
|
||||
return file_name, code
|
||||
|
|
|
|||
|
|
@ -6,20 +6,118 @@
|
|||
@File : run_code.py
|
||||
"""
|
||||
import traceback
|
||||
import os
|
||||
import subprocess
|
||||
from typing import List, Tuple
|
||||
|
||||
from metagpt.logs import logger
|
||||
from metagpt.actions.action import Action
|
||||
|
||||
PROMPT_TEMPLATE = """
|
||||
Role: You are a senior development and qa engineer, your role is summarize the code running result.
|
||||
If the running result does not include an error, you should explicitly approve the result.
|
||||
On the other hand, if the running result indicates some error, you should point out which part, the development code or the test code, produces the error,
|
||||
and give specific instructions on fixing the errors. Here is the code info:
|
||||
{context}
|
||||
Now you should begin your analysis
|
||||
---
|
||||
## instruction:
|
||||
Please summarize the cause of the errors and give correction instruction
|
||||
## File To Rewrite:
|
||||
Determine the ONE file to rewrite in order to fix the error, for example, xyz.py, or test_xyz.py
|
||||
## Status:
|
||||
Determine if all of the code works fine, if so write PASS, else FAIL,
|
||||
WRITE ONLY ONE WORD, PASS OR FAIL, IN THI SECTION
|
||||
## Send To:
|
||||
Please write Engineer if the errors are due to problematic development codes, and QaEngineer to problematic test codes, and NoOne if there are no errors,
|
||||
WRITE ONLY ONE WORD, Engineer OR QaEngineer OR NoOne, IN THIS SECTION.
|
||||
---
|
||||
You should fill in necessary instruction, status, send to, and finally return all content between the --- segment line.
|
||||
"""
|
||||
|
||||
CONTEXT = """
|
||||
## Development Code File Name
|
||||
{code_file_name}
|
||||
## Development Code
|
||||
```python
|
||||
{code}
|
||||
```
|
||||
## Test File Name
|
||||
{test_file_name}
|
||||
## Test Code
|
||||
```python
|
||||
{test_code}
|
||||
```
|
||||
## Running Command
|
||||
{command}
|
||||
## Running Output
|
||||
standard output: {outs};
|
||||
standard errors: {errs};
|
||||
"""
|
||||
|
||||
class RunCode(Action):
|
||||
def __init__(self, name, context=None, llm=None):
|
||||
def __init__(self, name="RunCode", context=None, llm=None):
|
||||
super().__init__(name, context, llm)
|
||||
|
||||
async def run(self, code):
|
||||
@classmethod
|
||||
async def run_text(cls, code) -> Tuple[str, str]:
|
||||
try:
|
||||
# We will document_store the result in this dictionary
|
||||
namespace = {}
|
||||
exec(code, namespace)
|
||||
return namespace.get('result', None)
|
||||
return namespace.get('result', ""), ""
|
||||
except Exception:
|
||||
# If there is an error in the code, return the error message
|
||||
return traceback.format_exc()
|
||||
return "", traceback.format_exc()
|
||||
|
||||
@classmethod
|
||||
async def run_script(cls, working_directory, additional_python_paths=[], command=[]) -> Tuple[str, str]:
|
||||
working_directory = str(working_directory)
|
||||
additional_python_paths = [str(path) for path in additional_python_paths]
|
||||
|
||||
# Copy the current environment variables
|
||||
env = os.environ.copy()
|
||||
|
||||
# Modify the PYTHONPATH environment variable
|
||||
additional_python_paths = [working_directory] + additional_python_paths
|
||||
additional_python_paths = ":".join(additional_python_paths)
|
||||
env['PYTHONPATH'] = additional_python_paths + ':' + env.get('PYTHONPATH', '')
|
||||
|
||||
# Start the subprocess
|
||||
process = subprocess.Popen(command, cwd=working_directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
|
||||
|
||||
try:
|
||||
# Wait for the process to complete, with a timeout
|
||||
stdout, stderr = process.communicate(timeout=10)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.info("The command did not complete within the given timeout.")
|
||||
process.kill() # Kill the process if it times out
|
||||
stdout, stderr = process.communicate()
|
||||
return stdout.decode('utf-8'), stderr.decode('utf-8')
|
||||
|
||||
async def run(
|
||||
self, code, mode="script", code_file_name="", test_code="", test_file_name="", command=[], **kwargs
|
||||
) -> str:
|
||||
logger.info(f"Running {' '.join(command)}")
|
||||
if mode == "script":
|
||||
outs, errs = await self.run_script(command=command, **kwargs)
|
||||
elif mode == "text":
|
||||
outs, errs = await self.run_text(code=code)
|
||||
|
||||
logger.info(f"{outs=}")
|
||||
logger.info(f"{errs=}")
|
||||
|
||||
context = CONTEXT.format(
|
||||
code=code, code_file_name=code_file_name,
|
||||
test_code=test_code, test_file_name=test_file_name,
|
||||
command=" ".join(command),
|
||||
outs=outs[:500], # outs might be long but they are not important, truncate them to avoid token overflow
|
||||
errs=errs[:10000] # truncate errors to avoid token overflow
|
||||
)
|
||||
|
||||
prompt = PROMPT_TEMPLATE.format(context=context)
|
||||
rsp = await self._aask(prompt)
|
||||
|
||||
result = context + rsp
|
||||
|
||||
return result
|
||||
|
|
|
|||
|
|
@ -5,22 +5,45 @@
|
|||
@Author : alexanderwu
|
||||
@File : write_test.py
|
||||
"""
|
||||
from metagpt.logs import logger
|
||||
from metagpt.actions.action import Action
|
||||
from metagpt.utils.common import CodeParser
|
||||
|
||||
PROMPT_TEMPLATE = """
|
||||
NOTICE
|
||||
1. Role: You are a QA engineer; the main goal is to design, develop, and execute PEP8 compliant, well-structured, maintainable test cases and scripts for Python 3.9. Your focus should be on ensuring the product quality of the entire project through systematic testing.
|
||||
2. Requirement: Based on the context, develop a comprehensive test suite that adequately covers all relevant aspects of the code file under review. Your test suite will be part of the overall project QA, so please develop complete, robust, and reusable test cases.
|
||||
3. Attention1: Use '##' to split sections, not '#', and '## <SECTION_NAME>' SHOULD WRITE BEFORE the test case or script.
|
||||
4. Attention2: If there are any settings in your tests, ALWAYS SET A DEFAULT VALUE, ALWAYS USE STRONG TYPE AND EXPLICIT VARIABLE.
|
||||
5. Attention3: YOU MUST FOLLOW "Data structures and interface definitions". DO NOT CHANGE ANY DESIGN. Make sure your tests respect the existing design and ensure its validity.
|
||||
6. Think before writing: What should be tested and validated in this document? What edge cases could exist? What might fail?
|
||||
7. CAREFULLY CHECK THAT YOU DON'T MISS ANY NECESSARY TEST CASES/SCRIPTS IN THIS FILE.
|
||||
Attention: Use '##' to split sections, not '#', and '## <SECTION_NAME>' SHOULD WRITE BEFORE the test case or script and triple quotes.
|
||||
-----
|
||||
## Given the following code, please write appropriate test cases using Python's unittest framework to verify the correctness and robustness of this code:
|
||||
```python
|
||||
{code_to_test}
|
||||
```
|
||||
Note that the code to test is at {source_file_path}, we will put your test code at {workspace}/tests/{test_file_name}, and run your test code from {workspace},
|
||||
you should correctly import the necessary classes based on these file locations!
|
||||
## {test_file_name}: Write test code with triple quoto. Do your best to implement THIS ONLY ONE FILE.
|
||||
"""
|
||||
|
||||
class WriteTest(Action):
|
||||
def __init__(self, name="", context=None, llm=None):
|
||||
def __init__(self, name="WriteTest", context=None, llm=None):
|
||||
super().__init__(name, context, llm)
|
||||
self.code = None
|
||||
self.test_prompt_template = """
|
||||
Given the following code or function:
|
||||
{code}
|
||||
|
||||
As a test engineer, please write appropriate test cases using Python's unittest framework to verify the correctness and robustness of this code.
|
||||
"""
|
||||
async def write_code(self, prompt):
|
||||
code_rsp = await self._aask(prompt)
|
||||
code = CodeParser.parse_code(block="", text=code_rsp)
|
||||
return code
|
||||
|
||||
async def run(self, code):
|
||||
self.code = code
|
||||
prompt = self.test_prompt_template.format(code=self.code)
|
||||
test_cases = await self._aask(prompt)
|
||||
return test_cases
|
||||
async def run(self, code_to_test, test_file_name, source_file_path, workspace):
|
||||
prompt = PROMPT_TEMPLATE.format(
|
||||
code_to_test=code_to_test,
|
||||
test_file_name=test_file_name,
|
||||
source_file_path=source_file_path,
|
||||
workspace=workspace
|
||||
)
|
||||
code = await self.write_code(prompt)
|
||||
return code
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue