Merge branch 'da_change' into 'mgx_ops'

add report for reply to human when end

See merge request pub/MetaGPT!345
This commit is contained in:
林义章 2024-08-21 03:42:15 +00:00
commit e907863bcb
4 changed files with 23 additions and 27 deletions

View file

@ -227,13 +227,8 @@ You have just finished all tasks.
Reply to the human requirements.
Do not output any other format.
Your reply is:
"""
SUMMARY_PROMPT = """
# Restrictions
{requirements_constraints}
You have just completed some tasks.
Summarize the tasks you have accomplished without including detailed information.
If there are any deliverables, list their descriptions and provide their file paths.
Summarize what you have accomplished lately. Be concise.
If you produce any deliverables, include their short descriptions and file paths. If there are any metrics or quantitative results, include them, too.
"""

View file

@ -16,7 +16,7 @@ Note:
1. If the requirement is a pure DATA-RELATED requirement, such as web browsing, web scraping, web searching, web imitation, data science, data analysis, machine learning, deep learning, text-to-image etc. DON'T decompose it, assign a single task with the original user requirement as instruction directly to Data Analyst.
2. If the requirement is developing a software, game, app, or website, excluding the above data-related tasks, you should decompose the requirement into multiple tasks and assign them to different team members based on their expertise. The software default development process has four steps: creating a Product Requirement Document (PRD) by the Product Manager -> writing a System Design by the Architect -> creating tasks by the Project Manager -> and coding by the Engineer. You may choose to execute any of these steps. When publishing message to Product Manager, you should directly copy the full original user requirement.
2.1. If the requirement contains both DATA-RELATED part mentioned in 1 and software development part mentioned in 2, you should decompose the software development part and assign them to different team members based on their expertise, and assign the DATA-RELATED part to Data Analyst David directly.
3.1 If the task involves code review or code checking, you should assign it to Engineer.
3.1 If the task involves code review (CR) or code checking, you should assign it to Engineer.
3.2. If the requirement is to fix a bug or issue, you should assign it to Issue Solver. However, if the code is written by Engineer, Engineer must maintain the code.
4. If the requirement is a common-sense, logical, or math problem, you should respond directly without assigning any task to team members.
5. If you think the requirement is not clear or ambiguous, you should ask the user for clarification immediately. Assign tasks only after all info is clear.

View file

@ -26,7 +26,7 @@ from metagpt.tools.tool_registry import register_tool
class DataAnalyst(RoleZero):
name: str = "David"
profile: str = "DataAnalyst"
goal: str = "Take on any data-related tasks, such as data analysis, machine learning, deep learning, web browsing, web scraping, web searching, web deployment, terminal operation, document QA & analysis, etc."
goal: str = "Take on any data-related tasks, such as data analysis, machine learning, deep learning, web browsing, web scraping, web searching, terminal operation, document QA & analysis, etc."
instruction: str = ROLE_INSTRUCTION + EXTRA_INSTRUCTION
task_type_desc: str = TASK_TYPE_DESC

View file

@ -190,13 +190,13 @@ class RoleZero(Role):
memory = self.parse_images(memory)
req = self.llm.format_msg(memory + [UserMessage(content=prompt)])
state_data = dict(
plan_status=plan_status,
current_task=current_task,
instruction=instruction,
)
async with ThoughtReporter(enable_llm_stream=True) as reporter:
await reporter.async_report({"type": "react"})
state_data = dict(
plan_status=plan_status,
current_task=current_task,
instruction=instruction,
)
self.command_rsp = await self.llm_cached_aask(req=req, system_msgs=[system_prompt], state_data=state_data)
self.command_rsp = await self._check_duplicates(req, self.command_rsp)
@ -303,13 +303,13 @@ class RoleZero(Role):
self.llm.format_msg(memory),
system_msgs=[QUICK_RESPONSE_SYSTEM_PROMPT.format(role_info=self._get_prefix())],
)
# If the answer contains the substring '[Message] from A to B:', remove it.
pattern = r"\[Message\] from .+? to .+?:\s*"
answer = re.sub(pattern, "", answer, count=1)
if "command_name" in answer:
# an actual TASK intent misclassified as QUICK, correct it here, FIXME: a better way is to classify it correctly in the first place
answer = ""
intent_result = "TASK"
# If the answer contains the substring '[Message] from A to B:', remove it.
pattern = r"\[Message\] from .+? to .+?:\s*"
answer = re.sub(pattern, "", answer, count=1)
if "command_name" in answer:
# an actual TASK intent misclassified as QUICK, correct it here, FIXME: a better way is to classify it correctly in the first place
answer = ""
intent_result = "TASK"
elif "SEARCH" in intent_result:
query = "\n".join(str(msg) for msg in memory)
answer = await SearchEnhancedQA().run(query)
@ -492,19 +492,20 @@ class RoleZero(Role):
async def _end(self):
self._set_state(-1)
memory = self.rc.memory.get(self.memory_k)
# Ensure reply to the human before the "end" command is executed.
# Ensure reply to the human before the "end" command is executed. Hard code k=5 for checking.
if not any(["reply_to_human" in memory.content for memory in self.get_memories(k=5)]):
logger.info("manually reply to human")
reply_to_human_prompt = REPORT_TO_HUMAN_PROMPT.format(
requirements_constraints=self.requirements_constraints,
)
reply_content = await self.llm.aask(self.llm.format_msg(memory + [UserMessage(reply_to_human_prompt)]))
async with ThoughtReporter(enable_llm_stream=True) as reporter:
await reporter.async_report({"type": "quick"})
reply_content = await self.llm.aask(self.llm.format_msg(memory + [UserMessage(reply_to_human_prompt)]))
await self.reply_to_human(content=reply_content)
self.rc.memory.add(AIMessage(content=reply_content, cause_by=RunCommand))
outputs = ""
# Summary of the Completed Task and Deliverables
if self.use_summary:
summary_prompt = SUMMARY_PROMPT.format(
requirements_constraints=self.requirements_constraints,
)
outputs = await self.llm.aask(self.llm.format_msg(memory + [UserMessage(summary_prompt)]))
logger.info("end current run and summarize")
outputs = await self.llm.aask(self.llm.format_msg(memory + [UserMessage(SUMMARY_PROMPT)]))
return outputs