diff --git a/metagpt/prompts/di/role_zero.py b/metagpt/prompts/di/role_zero.py index b4d8e4fcc..3356ab1c0 100644 --- a/metagpt/prompts/di/role_zero.py +++ b/metagpt/prompts/di/role_zero.py @@ -222,8 +222,8 @@ However, you MUST respond to the user message by yourself directly, DON'T ask yo REPORT_TO_HUMAN_PROMPT = """ ## Examlpe example 1: -User requirement:开发贪吃蛇游戏 -reply: 贪吃蛇游戏的开发已经完成。所有文件(index.html、style.css和script.js)已经创建并经过审查。 +User requirement: create a 2048 game +reply: The development of the 2048 game has been completed. All files (index.html, style.css, and script.js) have been created and reviewed. example 2: User requirement: Crawl and extract all the herb names from the website, Tell me the number of herbs. @@ -234,6 +234,7 @@ reply : The herb names have been successfully extracted. A total of 8 herb names Carefully review the history and respond to the user in the expected language to meet their requirements. If you have any deliverables that are helpful in explaining the results (such as files, metrics, quantitative results, etc.), provide brief descriptions of them. Your reply must be concise. +{lanaguge_restruction} Directly output your reply content. Do not add any output format. """ SUMMARY_PROMPT = """ diff --git a/metagpt/roles/di/role_zero.py b/metagpt/roles/di/role_zero.py index 260c937a5..37392673a 100644 --- a/metagpt/roles/di/role_zero.py +++ b/metagpt/roles/di/role_zero.py @@ -494,17 +494,15 @@ class RoleZero(Role): memory = self.rc.memory.get(self.memory_k) # Ensure reply to the human before the "end" command is executed. if not any(["reply_to_human" in memory.content for memory in self.get_memories(k=5)]): - reply_to_human_prompt = REPORT_TO_HUMAN_PROMPT.format( - requirements_constraints=self.requirements_constraints, - ) + pattern = r"\[Language Restrictions\](.*?)\n" + match = re.search(pattern, self.requirements_constraints, re.DOTALL) + reply_to_human_prompt = REPORT_TO_HUMAN_PROMPT.format(lanaguge_restruction=match.group(0) if match else "") reply_content = await self.llm.aask(self.llm.format_msg(memory + [UserMessage(reply_to_human_prompt)])) await self.reply_to_human(content=reply_content) self.rc.memory.add(AIMessage(content=reply_content, cause_by=RunCommand)) outputs = "" # Summary of the Completed Task and Deliverables if self.use_summary: - summary_prompt = SUMMARY_PROMPT.format( - requirements_constraints=self.requirements_constraints, - ) + summary_prompt = SUMMARY_PROMPT.format() outputs = await self.llm.aask(self.llm.format_msg(memory + [UserMessage(summary_prompt)])) return outputs