diff --git a/README.md b/README.md index a39b509df..ead43c9e7 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,6 @@ # MetaGPT: The Multi-Agent Framework
-
@@ -128,8 +127,8 @@ ## Citation
```bibtex
@misc{hong2023metagpt,
- title={MetaGPT: Meta Programming for Multi-Agent Collaborative Framework},
- author={Sirui Hong and Xiawu Zheng and Jonathan Chen and Yuheng Cheng and Jinlin Wang and Ceyao Zhang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu},
+ title={MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework},
+ author={Sirui Hong and Mingchen Zhuge and Jonathan Chen and Xiawu Zheng and Yuheng Cheng and Ceyao Zhang and Jinlin Wang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu and Jürgen Schmidhuber},
year={2023},
eprint={2308.00352},
archivePrefix={arXiv},
diff --git a/docs/FAQ-EN.md b/docs/FAQ-EN.md
index 4c86ed150..f9df50caf 100644
--- a/docs/FAQ-EN.md
+++ b/docs/FAQ-EN.md
@@ -33,7 +33,7 @@
1. Choose a task from the Roadmap (or you can propose one). By submitting a PR, you can become a contributor and join the dev team.
-1. Current contributors come from backgrounds including: ByteDance AI Lab/DingDong/Didi/Xiaohongshu, Tencent/Baidu/MSRA/TikTok/BloomGPT Infra/Bilibili/CUHK/HKUST/CMU/UCB
+1. Current contributors come from backgrounds including ByteDance AI Lab/DingDong/Didi/Xiaohongshu, Tencent/Baidu/MSRA/TikTok/BloomGPT Infra/Bilibili/CUHK/HKUST/CMU/UCB
@@ -41,7 +41,7 @@
MetaGPT Community - The position of Chief Evangelist rotates on a monthly basis. The primary responsibilities include:
-1. Maintaining community FAQ documents, announcements, Github resources/READMEs.
+1. Maintaining community FAQ documents, announcements, and Github resources/READMEs.
1. Responding to, answering, and distributing community questions within an average of 30 minutes, including on platforms like Github Issues, Discord and WeChat.
1. Upholding a community atmosphere that is enthusiastic, genuine, and friendly.
1. Encouraging everyone to become contributors and participate in projects that are closely related to achieving AGI (Artificial General Intelligence).
@@ -125,10 +125,10 @@
1. The UI role takes over from the product manager role, extending the output from the 【UI Design draft】 provided by the product manager role. The UI role has implemented the UIDesign Action. Within the run of UIDesign, it processes the respective context, and based on the set template, outputs the UI. The output from the UI role includes:
- 1. UI Design Description:Describes the content to be designed and the design objectives.
- 1. Selected Elements:Describes the elements in the design that need to be illustrated.
- 1. HTML Layout:Outputs the HTML code for the page.
- 1. CSS Styles (styles.css):Outputs the CSS code for the page.
+ 1. UI Design Description: Describes the content to be designed and the design objectives.
+ 1. Selected Elements: Describes the elements in the design that need to be illustrated.
+ 1. HTML Layout: Outputs the HTML code for the page.
+ 1. CSS Styles (styles.css): Outputs the CSS code for the page.
1. Currently, the SD skill is a tool invoked by UIDesign. It instantiates the SDEngine, with specific code found in metagpt/tools/sd_engine.
diff --git a/docs/README_CN.md b/docs/README_CN.md
index 50cf207b4..409bdc7af 100644
--- a/docs/README_CN.md
+++ b/docs/README_CN.md
@@ -19,7 +19,6 @@ # MetaGPT: 多智能体框架
-
diff --git a/docs/README_JA.md b/docs/README_JA.md
index 0a65e83e0..10cb7ee82 100644
--- a/docs/README_JA.md
+++ b/docs/README_JA.md
@@ -19,7 +19,6 @@ # MetaGPT: マルチエージェントフレームワーク
-
@@ -275,12 +274,12 @@ ### 使用方法
### コードウォークスルー
```python
-from metagpt.software_company import SoftwareCompany
+from metagpt.team import Team
from metagpt.roles import ProjectManager, ProductManager, Architect, Engineer
async def startup(idea: str, investment: float = 3.0, n_round: int = 5):
"""スタートアップを実行する。ボスになる。"""
- company = SoftwareCompany()
+ company = Team()
company.hire([ProductManager(), Architect(), ProjectManager(), Engineer()])
company.invest(investment)
company.start_project(idea)
@@ -304,8 +303,8 @@ ## 引用
```bibtex
@misc{hong2023metagpt,
- title={MetaGPT: Meta Programming for Multi-Agent Collaborative Framework},
- author={Sirui Hong and Xiawu Zheng and Jonathan Chen and Yuheng Cheng and Jinlin Wang and Ceyao Zhang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu},
+ title={MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework},
+ author={Sirui Hong and Mingchen Zhuge and Jonathan Chen and Xiawu Zheng and Yuheng Cheng and Ceyao Zhang and Jinlin Wang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu and Jürgen Schmidhuber},
year={2023},
eprint={2308.00352},
archivePrefix={arXiv},
diff --git a/examples/build_customized_agent.py b/examples/build_customized_agent.py
index 87d7a9c76..be34e5e5e 100644
--- a/examples/build_customized_agent.py
+++ b/examples/build_customized_agent.py
@@ -9,6 +9,7 @@ import asyncio
import fire
+from metagpt.llm import LLM
from metagpt.actions import Action
from metagpt.roles import Role
from metagpt.schema import Message
@@ -19,19 +20,10 @@ class SimpleWriteCode(Action):
PROMPT_TEMPLATE = """
Write a python function that can {instruction} and provide two runnnable test cases.
Return ```python your_code_here ``` with NO other texts,
- example:
- ```python
- # function
- def add(a, b):
- return a + b
- # test cases
- print(add(1, 2))
- print(add(3, 4))
- ```
your code:
"""
- def __init__(self, name="SimpleWriteCode", context=None, llm=None):
+ def __init__(self, name: str = "SimpleWriteCode", context=None, llm: LLM = None):
super().__init__(name, context, llm)
async def run(self, instruction: str):
@@ -51,8 +43,9 @@ class SimpleWriteCode(Action):
code_text = match.group(1) if match else rsp
return code_text
+
class SimpleRunCode(Action):
- def __init__(self, name="SimpleRunCode", context=None, llm=None):
+ def __init__(self, name: str = "SimpleRunCode", context=None, llm: LLM = None):
super().__init__(name, context, llm)
async def run(self, code_text: str):
@@ -61,6 +54,7 @@ class SimpleRunCode(Action):
logger.info(f"{code_result=}")
return code_result
+
class SimpleCoder(Role):
def __init__(
self,
@@ -73,16 +67,16 @@ class SimpleCoder(Role):
async def _act(self) -> Message:
logger.info(f"{self._setting}: ready to {self._rc.todo}")
- todo = self._rc.todo
+ todo = self._rc.todo # todo will be SimpleWriteCode()
- msg = self._rc.memory.get()[-1] # retrieve the latest memory
- instruction = msg.content
+ msg = self.get_memories(k=1)[0] # find the most recent messages
- code_text = await SimpleWriteCode().run(instruction)
- msg = Message(content=code_text, role=self.profile, cause_by=todo)
+ code_text = await todo.run(msg.content)
+ msg = Message(content=code_text, role=self.profile, cause_by=type(todo))
return msg
+
class RunnableCoder(Role):
def __init__(
self,
@@ -92,43 +86,23 @@ class RunnableCoder(Role):
):
super().__init__(name, profile, **kwargs)
self._init_actions([SimpleWriteCode, SimpleRunCode])
-
- async def _think(self) -> None:
- if self._rc.todo is None:
- self._set_state(0)
- return
-
- if self._rc.state + 1 < len(self._states):
- self._set_state(self._rc.state + 1)
- else:
- self._rc.todo = None
+ self._set_react_mode(react_mode="by_order")
async def _act(self) -> Message:
logger.info(f"{self._setting}: ready to {self._rc.todo}")
+ # By choosing the Action by order under the hood
+ # todo will be first SimpleWriteCode() then SimpleRunCode()
todo = self._rc.todo
- msg = self._rc.memory.get()[-1]
- if isinstance(todo, SimpleWriteCode):
- instruction = msg.content
- result = await SimpleWriteCode().run(instruction)
+ msg = self.get_memories(k=1)[0] # find the most k recent messages
+ result = await todo.run(msg.content)
- elif isinstance(todo, SimpleRunCode):
- code_text = msg.content
- result = await SimpleRunCode().run(code_text)
-
- msg = Message(content=result, role=self.profile, cause_by=todo)
+ msg = Message(content=result, role=self.profile, cause_by=type(todo))
self._rc.memory.add(msg)
return msg
- async def _react(self) -> Message:
- while True:
- await self._think()
- if self._rc.todo is None:
- break
- await self._act()
- return Message(content="All job done", role=self.profile)
-def main(msg="write a function that calculates the sum of a list"):
+def main(msg="write a function that calculates the product of a list and run it"):
# role = SimpleCoder()
role = RunnableCoder()
logger.info(msg)
diff --git a/examples/build_customized_multi_agents.py b/examples/build_customized_multi_agents.py
new file mode 100644
index 000000000..0df927e32
--- /dev/null
+++ b/examples/build_customized_multi_agents.py
@@ -0,0 +1,158 @@
+'''
+Filename: MetaGPT/examples/build_customized_multi_agents.py
+Created Date: Wednesday, November 15th 2023, 7:12:39 pm
+Author: garylin2099
+'''
+import re
+import asyncio
+import fire
+
+from metagpt.llm import LLM
+from metagpt.actions import Action, BossRequirement
+from metagpt.roles import Role
+from metagpt.team import Team
+from metagpt.schema import Message
+from metagpt.logs import logger
+
+def parse_code(rsp):
+ pattern = r'```python(.*)```'
+ match = re.search(pattern, rsp, re.DOTALL)
+ code_text = match.group(1) if match else rsp
+ return code_text
+
+class SimpleWriteCode(Action):
+
+ PROMPT_TEMPLATE = """
+ Write a python function that can {instruction}.
+ Return ```python your_code_here ``` with NO other texts,
+ your code:
+ """
+
+ def __init__(self, name: str = "SimpleWriteCode", context=None, llm: LLM = None):
+ super().__init__(name, context, llm)
+
+ async def run(self, instruction: str):
+
+ prompt = self.PROMPT_TEMPLATE.format(instruction=instruction)
+
+ rsp = await self._aask(prompt)
+
+ code_text = parse_code(rsp)
+
+ return code_text
+
+
+class SimpleCoder(Role):
+ def __init__(
+ self,
+ name: str = "Alice",
+ profile: str = "SimpleCoder",
+ **kwargs,
+ ):
+ super().__init__(name, profile, **kwargs)
+ self._watch([BossRequirement])
+ self._init_actions([SimpleWriteCode])
+
+
+class SimpleWriteTest(Action):
+
+ PROMPT_TEMPLATE = """
+ Context: {context}
+ Write {k} unit tests using pytest for the given function, assuming you have imported it.
+ Return ```python your_code_here ``` with NO other texts,
+ your code:
+ """
+
+ def __init__(self, name: str = "SimpleWriteTest", context=None, llm: LLM = None):
+ super().__init__(name, context, llm)
+
+ async def run(self, context: str, k: int = 3):
+
+ prompt = self.PROMPT_TEMPLATE.format(context=context, k=k)
+
+ rsp = await self._aask(prompt)
+
+ code_text = parse_code(rsp)
+
+ return code_text
+
+
+class SimpleTester(Role):
+ def __init__(
+ self,
+ name: str = "Bob",
+ profile: str = "SimpleTester",
+ **kwargs,
+ ):
+ super().__init__(name, profile, **kwargs)
+ self._init_actions([SimpleWriteTest])
+ # self._watch([SimpleWriteCode])
+ self._watch([SimpleWriteCode, SimpleWriteReview]) # feel free to try this too
+
+ async def _act(self) -> Message:
+ logger.info(f"{self._setting}: ready to {self._rc.todo}")
+ todo = self._rc.todo
+
+ # context = self.get_memories(k=1)[0].content # use the most recent memory as context
+ context = self.get_memories() # use all memories as context
+
+ code_text = await todo.run(context, k=5) # specify arguments
+ msg = Message(content=code_text, role=self.profile, cause_by=type(todo))
+
+ return msg
+
+
+class SimpleWriteReview(Action):
+
+ PROMPT_TEMPLATE = """
+ Context: {context}
+ Review the test cases and provide one critical comments:
+ """
+
+ def __init__(self, name: str = "SimpleWriteReview", context=None, llm: LLM = None):
+ super().__init__(name, context, llm)
+
+ async def run(self, context: str):
+
+ prompt = self.PROMPT_TEMPLATE.format(context=context)
+
+ rsp = await self._aask(prompt)
+
+ return rsp
+
+
+class SimpleReviewer(Role):
+ def __init__(
+ self,
+ name: str = "Charlie",
+ profile: str = "SimpleReviewer",
+ **kwargs,
+ ):
+ super().__init__(name, profile, **kwargs)
+ self._init_actions([SimpleWriteReview])
+ self._watch([SimpleWriteTest])
+
+
+async def main(
+ idea: str = "write a function that calculates the product of a list",
+ investment: float = 3.0,
+ n_round: int = 5,
+ add_human: bool = False,
+):
+ logger.info(idea)
+
+ team = Team()
+ team.hire(
+ [
+ SimpleCoder(),
+ SimpleTester(),
+ SimpleReviewer(is_human=add_human),
+ ]
+ )
+
+ team.invest(investment=investment)
+ team.start_project(idea)
+ await team.run(n_round=n_round)
+
+if __name__ == '__main__':
+ fire.Fire(main)
diff --git a/examples/debate.py b/examples/debate.py
index 05db28070..a37e60848 100644
--- a/examples/debate.py
+++ b/examples/debate.py
@@ -7,14 +7,14 @@ import asyncio
import platform
import fire
-from metagpt.software_company import SoftwareCompany
+from metagpt.team import Team
from metagpt.actions import Action, BossRequirement
from metagpt.roles import Role
from metagpt.schema import Message
from metagpt.logs import logger
-class ShoutOut(Action):
- """Action: Shout out loudly in a debate (quarrel)"""
+class SpeakAloud(Action):
+ """Action: Speak out aloud in a debate (quarrel)"""
PROMPT_TEMPLATE = """
## BACKGROUND
@@ -27,7 +27,7 @@ class ShoutOut(Action):
craft a strong and emotional response in 80 words, in {name}'s rhetoric and viewpoints, your will argue:
"""
- def __init__(self, name="ShoutOut", context=None, llm=None):
+ def __init__(self, name="SpeakAloud", context=None, llm=None):
super().__init__(name, context, llm)
async def run(self, context: str, name: str, opponent_name: str):
@@ -39,96 +39,57 @@ class ShoutOut(Action):
return rsp
-class Trump(Role):
+class Debator(Role):
def __init__(
self,
- name: str = "Trump",
- profile: str = "Republican",
+ name: str,
+ profile: str,
+ opponent_name: str,
**kwargs,
):
super().__init__(name, profile, **kwargs)
- self._init_actions([ShoutOut])
- self._watch([ShoutOut])
- self.name = "Trump"
- self.opponent_name = "Biden"
+ self._init_actions([SpeakAloud])
+ self._watch([BossRequirement, SpeakAloud])
+ self.name = name
+ self.opponent_name = opponent_name
async def _observe(self) -> int:
await super()._observe()
# accept messages sent (from opponent) to self, disregard own messages from the last round
- self._rc.news = [msg for msg in self._rc.news if msg.send_to == self.name]
+ self._rc.news = [msg for msg in self._rc.news if msg.send_to == self.name]
return len(self._rc.news)
async def _act(self) -> Message:
logger.info(f"{self._setting}: ready to {self._rc.todo}")
+ todo = self._rc.todo # An instance of SpeakAloud
- msg_history = self._rc.memory.get_by_actions([ShoutOut])
- context = []
- for m in msg_history:
- context.append(str(m))
- context = "\n".join(context)
+ memories = self.get_memories()
+ context = "\n".join(f"{msg.sent_from}: {msg.content}" for msg in memories)
+ # print(context)
- rsp = await ShoutOut().run(context=context, name=self.name, opponent_name=self.opponent_name)
+ rsp = await todo.run(context=context, name=self.name, opponent_name=self.opponent_name)
msg = Message(
content=rsp,
role=self.profile,
- cause_by=ShoutOut,
+ cause_by=type(todo),
sent_from=self.name,
send_to=self.opponent_name,
)
- return msg
-
-class Biden(Role):
- def __init__(
- self,
- name: str = "Biden",
- profile: str = "Democrat",
- **kwargs,
- ):
- super().__init__(name, profile, **kwargs)
- self._init_actions([ShoutOut])
- self._watch([BossRequirement, ShoutOut])
- self.name = "Biden"
- self.opponent_name = "Trump"
-
- async def _observe(self) -> int:
- await super()._observe()
- # accept the very first human instruction (the debate topic) or messages sent (from opponent) to self,
- # disregard own messages from the last round
- self._rc.news = [msg for msg in self._rc.news if msg.cause_by == BossRequirement or msg.send_to == self.name]
- return len(self._rc.news)
-
- async def _act(self) -> Message:
- logger.info(f"{self._setting}: ready to {self._rc.todo}")
-
- msg_history = self._rc.memory.get_by_actions([BossRequirement, ShoutOut])
- context = []
- for m in msg_history:
- context.append(str(m))
- context = "\n".join(context)
-
- rsp = await ShoutOut().run(context=context, name=self.name, opponent_name=self.opponent_name)
-
- msg = Message(
- content=rsp,
- role=self.profile,
- cause_by=ShoutOut,
- sent_from=self.name,
- send_to=self.opponent_name,
- )
+ self._rc.memory.add(msg)
return msg
-async def startup(idea: str, investment: float = 3.0, n_round: int = 5,
- code_review: bool = False, run_tests: bool = False):
- """We reuse the startup paradigm for roles to interact with each other.
- Now we run a startup of presidents and watch they quarrel. :) """
- company = SoftwareCompany()
- company.hire([Biden(), Trump()])
- company.invest(investment)
- company.start_project(idea)
- await company.run(n_round=n_round)
+async def debate(idea: str, investment: float = 3.0, n_round: int = 5):
+ """Run a team of presidents and watch they quarrel. :) """
+ Biden = Debator(name="Biden", profile="Democrat", opponent_name="Trump")
+ Trump = Debator(name="Trump", profile="Republican", opponent_name="Biden")
+ team = Team()
+ team.hire([Biden, Trump])
+ team.invest(investment)
+ team.start_project(idea, send_to="Biden") # send debate topic to Biden and let him speak first
+ await team.run(n_round=n_round)
def main(idea: str, investment: float = 3.0, n_round: int = 10):
@@ -141,7 +102,7 @@ def main(idea: str, investment: float = 3.0, n_round: int = 10):
"""
if platform.system() == "Windows":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
- asyncio.run(startup(idea, investment, n_round))
+ asyncio.run(debate(idea, investment, n_round))
if __name__ == '__main__':
diff --git a/metagpt/const.py b/metagpt/const.py
index 7f3f87dfa..407ce803a 100644
--- a/metagpt/const.py
+++ b/metagpt/const.py
@@ -6,7 +6,7 @@
@File : const.py
"""
from pathlib import Path
-
+from loguru import logger
def get_project_root():
"""Search upwards to find the project root directory."""
@@ -17,10 +17,15 @@ def get_project_root():
or (current_path / ".project_root").exists()
or (current_path / ".gitignore").exists()
):
+ # use metagpt with git clone will land here
+ logger.info(f"PROJECT_ROOT set to {str(current_path)}")
return current_path
parent_path = current_path.parent
if parent_path == current_path:
- raise Exception("Project root not found.")
+ # use metagpt with pip install will land here
+ cwd = Path.cwd()
+ logger.info(f"PROJECT_ROOT set to current working directory: {str(cwd)}")
+ return cwd
current_path = parent_path
diff --git a/metagpt/llm.py b/metagpt/llm.py
index e6f815950..9324da126 100644
--- a/metagpt/llm.py
+++ b/metagpt/llm.py
@@ -8,6 +8,7 @@
from metagpt.provider.anthropic_api import Claude2 as Claude
from metagpt.provider.openai_api import OpenAIGPTAPI as LLM
+from metagpt.provider.human_provider import HumanProvider
DEFAULT_LLM = LLM()
CLAUDE_LLM = Claude()
diff --git a/metagpt/provider/human_provider.py b/metagpt/provider/human_provider.py
new file mode 100644
index 000000000..1d12f972f
--- /dev/null
+++ b/metagpt/provider/human_provider.py
@@ -0,0 +1,35 @@
+'''
+Filename: MetaGPT/metagpt/provider/human_provider.py
+Created Date: Wednesday, November 8th 2023, 11:55:46 pm
+Author: garylin2099
+'''
+from typing import Optional
+from metagpt.provider.base_gpt_api import BaseGPTAPI
+from metagpt.logs import logger
+
+class HumanProvider(BaseGPTAPI):
+ """Humans provide themselves as a 'model', which actually takes in human input as its response.
+ This enables replacing LLM anywhere in the framework with a human, thus introducing human interaction
+ """
+
+ def ask(self, msg: str) -> str:
+ logger.info("It's your turn, please type in your response. You may also refer to the context below")
+ rsp = input(msg)
+ if rsp in ["exit", "quit"]:
+ exit()
+ return rsp
+
+ async def aask(self, msg: str, system_msgs: Optional[list[str]] = None) -> str:
+ return self.ask(msg)
+
+ def completion(self, messages: list[dict]):
+ """dummy implementation of abstract method in base"""
+ return []
+
+ async def acompletion(self, messages: list[dict]):
+ """dummy implementation of abstract method in base"""
+ return []
+
+ async def acompletion_text(self, messages: list[dict], stream=False) -> str:
+ """dummy implementation of abstract method in base"""
+ return []
diff --git a/metagpt/roles/engineer.py b/metagpt/roles/engineer.py
index 6d65575a8..1f6685b38 100644
--- a/metagpt/roles/engineer.py
+++ b/metagpt/roles/engineer.py
@@ -207,6 +207,7 @@ class Engineer(Role):
async def _act(self) -> Message:
"""Determines the mode of action based on whether code review is used."""
+ logger.info(f"{self._setting}: ready to WriteCode")
if self.use_code_review:
return await self._act_sp_precision()
return await self._act_sp()
diff --git a/metagpt/roles/invoice_ocr_assistant.py b/metagpt/roles/invoice_ocr_assistant.py
index c307b20c0..15f831c97 100644
--- a/metagpt/roles/invoice_ocr_assistant.py
+++ b/metagpt/roles/invoice_ocr_assistant.py
@@ -42,17 +42,7 @@ class InvoiceOCRAssistant(Role):
self.filename = ""
self.origin_query = ""
self.orc_data = None
-
- async def _think(self) -> None:
- """Determine the next action to be taken by the role."""
- if self._rc.todo is None:
- self._set_state(0)
- return
-
- if self._rc.state + 1 < len(self._states):
- self._set_state(self._rc.state + 1)
- else:
- self._rc.todo = None
+ self._set_react_mode(react_mode="by_order")
async def _act(self) -> Message:
"""Perform an action as determined by the role.
@@ -94,17 +84,3 @@ class InvoiceOCRAssistant(Role):
msg = Message(content=content, instruct_content=resp)
self._rc.memory.add(msg)
return msg
-
- async def _react(self) -> Message:
- """Execute the invoice ocr assistant's think and actions.
-
- Returns:
- A message containing the final result of the assistant's actions.
- """
- while True:
- await self._think()
- if self._rc.todo is None:
- break
- msg = await self._act()
- return msg
-
diff --git a/metagpt/roles/researcher.py b/metagpt/roles/researcher.py
index acb46c718..c5512121a 100644
--- a/metagpt/roles/researcher.py
+++ b/metagpt/roles/researcher.py
@@ -31,20 +31,11 @@ class Researcher(Role):
):
super().__init__(name, profile, goal, constraints, **kwargs)
self._init_actions([CollectLinks(name), WebBrowseAndSummarize(name), ConductResearch(name)])
+ self._set_react_mode(react_mode="by_order")
self.language = language
if language not in ("en-us", "zh-cn"):
logger.warning(f"The language `{language}` has not been tested, it may not work.")
- async def _think(self) -> None:
- if self._rc.todo is None:
- self._set_state(0)
- return
-
- if self._rc.state + 1 < len(self._states):
- self._set_state(self._rc.state + 1)
- else:
- self._rc.todo = None
-
async def _act(self) -> Message:
logger.info(f"{self._setting}: ready to {self._rc.todo}")
todo = self._rc.todo
@@ -73,12 +64,8 @@ class Researcher(Role):
self._rc.memory.add(ret)
return ret
- async def _react(self) -> Message:
- while True:
- await self._think()
- if self._rc.todo is None:
- break
- msg = await self._act()
+ async def react(self) -> Message:
+ msg = await super().react()
report = msg.instruct_content
self.write_report(report.topic, report.content)
return msg
diff --git a/metagpt/roles/role.py b/metagpt/roles/role.py
index 44bb3e976..b96c361c0 100644
--- a/metagpt/roles/role.py
+++ b/metagpt/roles/role.py
@@ -7,14 +7,15 @@
"""
from __future__ import annotations
-from typing import Iterable, Type
+from typing import Iterable, Type, Union
+from enum import Enum
from pydantic import BaseModel, Field
# from metagpt.environment import Environment
from metagpt.config import CONFIG
from metagpt.actions import Action, ActionOutput
-from metagpt.llm import LLM
+from metagpt.llm import LLM, HumanProvider
from metagpt.logs import logger
from metagpt.memory import Memory, LongTermMemory
from metagpt.schema import Message
@@ -27,12 +28,14 @@ Please note that only the text between the first and second "===" is information
{history}
===
-You can now choose one of the following stages to decide the stage you need to go in the next step:
+Your previous stage: {previous_state}
+
+Now choose one of the following stages you need to go to in the next step:
{states}
Just answer a number between 0-{n_states}, choose the most suitable stage according to the understanding of the conversation.
Please note that the answer only needs a number, no need to add any other text.
-If there is no conversation record, choose 0.
+If you think you have completed your goal and don't need to go to any of the stages, return -1.
Do not answer anything else, and do not add any other information in your answer.
"""
@@ -46,6 +49,14 @@ ROLE_TEMPLATE = """Your response should be based on the previous conversation hi
{name}: {result}
"""
+class RoleReactMode(str, Enum):
+ REACT = "react"
+ BY_ORDER = "by_order"
+ PLAN_AND_ACT = "plan_and_act"
+
+ @classmethod
+ def values(cls):
+ return [item.value for item in cls]
class RoleSetting(BaseModel):
"""Role Settings"""
@@ -54,6 +65,7 @@ class RoleSetting(BaseModel):
goal: str
constraints: str
desc: str
+ is_human: bool
def __str__(self):
return f"{self.name}({self.profile})"
@@ -67,10 +79,12 @@ class RoleContext(BaseModel):
env: 'Environment' = Field(default=None)
memory: Memory = Field(default_factory=Memory)
long_term_memory: LongTermMemory = Field(default_factory=LongTermMemory)
- state: int = Field(default=0)
+ state: int = Field(default=-1) # -1 indicates initial or termination state where todo is None
todo: Action = Field(default=None)
watch: set[Type[Action]] = Field(default_factory=set)
news: list[Type[Message]] = Field(default=[])
+ react_mode: RoleReactMode = RoleReactMode.REACT # see `Role._set_react_mode` for definitions of the following two attributes
+ max_react_loop: int = 1
class Config:
arbitrary_types_allowed = True
@@ -93,9 +107,10 @@ class RoleContext(BaseModel):
class Role:
"""Role/Agent"""
- def __init__(self, name="", profile="", goal="", constraints="", desc=""):
- self._llm = LLM()
- self._setting = RoleSetting(name=name, profile=profile, goal=goal, constraints=constraints, desc=desc)
+ def __init__(self, name="", profile="", goal="", constraints="", desc="", is_human=False):
+ self._llm = LLM() if not is_human else HumanProvider()
+ self._setting = RoleSetting(name=name, profile=profile, goal=goal,
+ constraints=constraints, desc=desc, is_human=is_human)
self._states = []
self._actions = []
self._role_id = str(self._setting)
@@ -109,24 +124,48 @@ class Role:
self._reset()
for idx, action in enumerate(actions):
if not isinstance(action, Action):
- i = action("")
+ i = action("", llm=self._llm)
else:
+ if self._setting.is_human and not isinstance(action.llm, HumanProvider):
+ logger.warning(f"is_human attribute does not take effect,"
+ f"as Role's {str(action)} was initialized using LLM, try passing in Action classes instead of initialized instances")
i = action
i.set_prefix(self._get_prefix(), self.profile)
self._actions.append(i)
self._states.append(f"{idx}. {action}")
+ def _set_react_mode(self, react_mode: str, max_react_loop: int = 1):
+ """Set strategy of the Role reacting to observed Message. Variation lies in how
+ this Role elects action to perform during the _think stage, especially if it is capable of multiple Actions.
+
+ Args:
+ react_mode (str): Mode for choosing action during the _think stage, can be one of:
+ "react": standard think-act loop in the ReAct paper, alternating thinking and acting to solve the task, i.e. _think -> _act -> _think -> _act -> ...
+ Use llm to select actions in _think dynamically;
+ "by_order": switch action each time by order defined in _init_actions, i.e. _act (Action1) -> _act (Action2) -> ...;
+ "plan_and_act": first plan, then execute an action sequence, i.e. _think (of a plan) -> _act -> _act -> ...
+ Use llm to come up with the plan dynamically.
+ Defaults to "react".
+ max_react_loop (int): Maximum react cycles to execute, used to prevent the agent from reacting forever.
+ Take effect only when react_mode is react, in which we use llm to choose actions, including termination.
+ Defaults to 1, i.e. _think -> _act (-> return result and end)
+ """
+ assert react_mode in RoleReactMode.values(), f"react_mode must be one of {RoleReactMode.values()}"
+ self._rc.react_mode = react_mode
+ if react_mode == RoleReactMode.REACT:
+ self._rc.max_react_loop = max_react_loop
+
def _watch(self, actions: Iterable[Type[Action]]):
"""Listen to the corresponding behaviors"""
self._rc.watch.update(actions)
# check RoleContext after adding watch actions
self._rc.check(self._role_id)
- def _set_state(self, state):
+ def _set_state(self, state: int):
"""Update the current state."""
self._rc.state = state
logger.debug(self._actions)
- self._rc.todo = self._actions[self._rc.state]
+ self._rc.todo = self._actions[self._rc.state] if state >= 0 else None
def set_env(self, env: 'Environment'):
"""Set the environment in which the role works. The role can talk to the environment and can also receive messages by observing."""
@@ -151,13 +190,19 @@ class Role:
return
prompt = self._get_prefix()
prompt += STATE_TEMPLATE.format(history=self._rc.history, states="\n".join(self._states),
- n_states=len(self._states) - 1)
+ n_states=len(self._states) - 1, previous_state=self._rc.state)
+ # print(prompt)
next_state = await self._llm.aask(prompt)
logger.debug(f"{prompt=}")
- if not next_state.isdigit() or int(next_state) not in range(len(self._states)):
- logger.warning(f'Invalid answer of state, {next_state=}')
- next_state = "0"
- self._set_state(int(next_state))
+ if (not next_state.isdigit() and next_state != "-1") \
+ or int(next_state) not in range(-1, len(self._states)):
+ logger.warning(f'Invalid answer of state, {next_state=}, will be set to -1')
+ next_state = -1
+ else:
+ next_state = int(next_state)
+ if next_state == -1:
+ logger.info(f"End actions with {next_state=}")
+ self._set_state(next_state)
async def _act(self) -> Message:
# prompt = self.get_prefix()
@@ -203,10 +248,45 @@ class Role:
self._rc.env.publish_message(msg)
async def _react(self) -> Message:
- """Think first, then act"""
- await self._think()
- logger.debug(f"{self._setting}: {self._rc.state=}, will do {self._rc.todo}")
- return await self._act()
+ """Think first, then act, until the Role _think it is time to stop and requires no more todo.
+ This is the standard think-act loop in the ReAct paper, which alternates thinking and acting in task solving, i.e. _think -> _act -> _think -> _act -> ...
+ Use llm to select actions in _think dynamically
+ """
+ actions_taken = 0
+ rsp = Message("No actions taken yet") # will be overwritten after Role _act
+ while actions_taken < self._rc.max_react_loop:
+ # think
+ await self._think()
+ if self._rc.todo is None:
+ break
+ # act
+ logger.debug(f"{self._setting}: {self._rc.state=}, will do {self._rc.todo}")
+ rsp = await self._act()
+ actions_taken += 1
+ return rsp # return output from the last action
+
+ async def _act_by_order(self) -> Message:
+ """switch action each time by order defined in _init_actions, i.e. _act (Action1) -> _act (Action2) -> ..."""
+ for i in range(len(self._states)):
+ self._set_state(i)
+ rsp = await self._act()
+ return rsp # return output from the last action
+
+ async def _plan_and_act(self) -> Message:
+ """first plan, then execute an action sequence, i.e. _think (of a plan) -> _act -> _act -> ... Use llm to come up with the plan dynamically."""
+ # TODO: to be implemented
+ return Message("")
+
+ async def react(self) -> Message:
+ """Entry to one of three strategies by which Role reacts to the observed Message"""
+ if self._rc.react_mode == RoleReactMode.REACT:
+ rsp = await self._react()
+ elif self._rc.react_mode == RoleReactMode.BY_ORDER:
+ rsp = await self._act_by_order()
+ elif self._rc.react_mode == RoleReactMode.PLAN_AND_ACT:
+ rsp = await self._plan_and_act()
+ self._set_state(state=-1) # current reaction is complete, reset state to -1 and todo back to None
+ return rsp
def recv(self, message: Message) -> None:
"""add message to history."""
@@ -223,6 +303,10 @@ class Role:
return await self._react()
+ def get_memories(self, k=0) -> list[Message]:
+ """A wrapper to return the most recent k memories of this role, return all when k=0"""
+ return self._rc.memory.get(k=k)
+
async def run(self, message=None):
"""Observe, and think and act based on the results of the observation"""
if message:
@@ -237,7 +321,7 @@ class Role:
logger.debug(f"{self._setting}: no news. waiting.")
return
- rsp = await self._react()
+ rsp = await self.react()
# Publish the reply to the environment, waiting for the next subscriber to process
self._publish_message(rsp)
return rsp
diff --git a/metagpt/software_company.py b/metagpt/software_company.py
index b2bd18c58..d44a0068a 100644
--- a/metagpt/software_company.py
+++ b/metagpt/software_company.py
@@ -5,58 +5,9 @@
@Author : alexanderwu
@File : software_company.py
"""
-from pydantic import BaseModel, Field
+from metagpt.team import Team as SoftwareCompany
-from metagpt.actions import BossRequirement
-from metagpt.config import CONFIG
-from metagpt.environment import Environment
-from metagpt.logs import logger
-from metagpt.roles import Role
-from metagpt.schema import Message
-from metagpt.utils.common import NoMoneyException
-
-
-class SoftwareCompany(BaseModel):
- """
- Software Company: Possesses a team, SOP (Standard Operating Procedures), and a platform for instant messaging,
- dedicated to writing executable code.
- """
- environment: Environment = Field(default_factory=Environment)
- investment: float = Field(default=10.0)
- idea: str = Field(default="")
-
- class Config:
- arbitrary_types_allowed = True
-
- def hire(self, roles: list[Role]):
- """Hire roles to cooperate"""
- self.environment.add_roles(roles)
-
- def invest(self, investment: float):
- """Invest company. raise NoMoneyException when exceed max_budget."""
- self.investment = investment
- CONFIG.max_budget = investment
- logger.info(f'Investment: ${investment}.')
-
- def _check_balance(self):
- if CONFIG.total_cost > CONFIG.max_budget:
- raise NoMoneyException(CONFIG.total_cost, f'Insufficient funds: {CONFIG.max_budget}')
-
- def start_project(self, idea):
- """Start a project from publishing boss requirement."""
- self.idea = idea
- self.environment.publish_message(Message(role="BOSS", content=idea, cause_by=BossRequirement))
-
- def _save(self):
- logger.info(self.json())
-
- async def run(self, n_round=3):
- """Run company until target round or no money"""
- while n_round > 0:
- # self._save()
- n_round -= 1
- logger.debug(f"{n_round=}")
- self._check_balance()
- await self.environment.run()
- return self.environment.history
-
\ No newline at end of file
+import warnings
+warnings.warn("metagpt.software_company is deprecated and will be removed in the future"
+ "Please use metagpt.team instead. SoftwareCompany class is now named as Team.",
+ DeprecationWarning, 2)
diff --git a/metagpt/team.py b/metagpt/team.py
new file mode 100644
index 000000000..67d3ecec8
--- /dev/null
+++ b/metagpt/team.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+@Time : 2023/5/12 00:30
+@Author : alexanderwu
+@File : software_company.py
+"""
+from pydantic import BaseModel, Field
+
+from metagpt.actions import BossRequirement
+from metagpt.config import CONFIG
+from metagpt.environment import Environment
+from metagpt.logs import logger
+from metagpt.roles import Role
+from metagpt.schema import Message
+from metagpt.utils.common import NoMoneyException
+
+
+class Team(BaseModel):
+ """
+ Team: Possesses one or more roles (agents), SOP (Standard Operating Procedures), and a platform for instant messaging,
+ dedicated to perform any multi-agent activity, such as collaboratively writing executable code.
+ """
+ environment: Environment = Field(default_factory=Environment)
+ investment: float = Field(default=10.0)
+ idea: str = Field(default="")
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def hire(self, roles: list[Role]):
+ """Hire roles to cooperate"""
+ self.environment.add_roles(roles)
+
+ def invest(self, investment: float):
+ """Invest company. raise NoMoneyException when exceed max_budget."""
+ self.investment = investment
+ CONFIG.max_budget = investment
+ logger.info(f'Investment: ${investment}.')
+
+ def _check_balance(self):
+ if CONFIG.total_cost > CONFIG.max_budget:
+ raise NoMoneyException(CONFIG.total_cost, f'Insufficient funds: {CONFIG.max_budget}')
+
+ def start_project(self, idea, send_to: str = ""):
+ """Start a project from publishing boss requirement."""
+ self.idea = idea
+ self.environment.publish_message(Message(role="Human", content=idea, cause_by=BossRequirement, send_to=send_to))
+
+ def _save(self):
+ logger.info(self.json())
+
+ async def run(self, n_round=3):
+ """Run company until target round or no money"""
+ while n_round > 0:
+ # self._save()
+ n_round -= 1
+ logger.debug(f"{n_round=}")
+ self._check_balance()
+ await self.environment.run()
+ return self.environment.history
+
\ No newline at end of file
diff --git a/metagpt/utils/mermaid.py b/metagpt/utils/mermaid.py
index 5e5b275b0..204c22c67 100644
--- a/metagpt/utils/mermaid.py
+++ b/metagpt/utils/mermaid.py
@@ -34,7 +34,10 @@ async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048,
engine = CONFIG.mermaid_engine.lower()
if engine == "nodejs":
if check_cmd_exists(CONFIG.mmdc) != 0:
- logger.warning("RUN `npm install -g @mermaid-js/mermaid-cli` to install mmdc")
+ logger.warning(
+ "RUN `npm install -g @mermaid-js/mermaid-cli` to install mmdc,"
+ "or consider changing MERMAID_ENGINE to `playwright`, `pyppeteer`, or `ink`."
+ )
return -1
for suffix in ["pdf", "svg", "png"]:
diff --git a/setup.py b/setup.py
index f9ae768e6..239156ae3 100644
--- a/setup.py
+++ b/setup.py
@@ -30,16 +30,16 @@ with open(path.join(here, "requirements.txt"), encoding="utf-8") as f:
setup(
name="metagpt",
- version="0.1",
+ version="0.3.0",
description="The Multi-Role Meta Programming Framework",
long_description=long_description,
long_description_content_type="text/markdown",
- url="https://gitlab.deepwisdomai.com/pub/metagpt",
+ url="https://github.com/geekan/MetaGPT",
author="Alexander Wu",
author_email="alexanderwu@fuzhi.ai",
license="Apache 2.0",
keywords="metagpt multi-role multi-agent programming gpt llm",
- packages=find_packages(exclude=["contrib", "docs", "examples"]),
+ packages=find_packages(exclude=["contrib", "docs", "examples", "tests*"]),
python_requires=">=3.9",
install_requires=requirements,
extras_require={
diff --git a/startup.py b/startup.py
index e2a903c9b..e9fbf94d3 100644
--- a/startup.py
+++ b/startup.py
@@ -11,7 +11,7 @@ from metagpt.roles import (
ProjectManager,
QaEngineer,
)
-from metagpt.software_company import SoftwareCompany
+from metagpt.team import Team
async def startup(
@@ -23,7 +23,7 @@ async def startup(
implement: bool = True,
):
"""Run a startup. Be a boss."""
- company = SoftwareCompany()
+ company = Team()
company.hire(
[
ProductManager(),
diff --git a/tests/metagpt/roles/test_ui.py b/tests/metagpt/roles/test_ui.py
index 285bff323..d58d31bd9 100644
--- a/tests/metagpt/roles/test_ui.py
+++ b/tests/metagpt/roles/test_ui.py
@@ -2,7 +2,7 @@
# @Date : 2023/7/22 02:40
# @Author : stellahong (stellahong@fuzhi.ai)
#
-from metagpt.software_company import SoftwareCompany
+from metagpt.team import Team
from metagpt.roles import ProductManager
from tests.metagpt.roles.ui_role import UI
@@ -15,7 +15,7 @@ def test_add_ui():
async def test_ui_role(idea: str, investment: float = 3.0, n_round: int = 5):
"""Run a startup. Be a boss."""
- company = SoftwareCompany()
+ company = Team()
company.hire([ProductManager(), UI()])
company.invest(investment)
company.start_project(idea)
diff --git a/tests/metagpt/test_software_company.py b/tests/metagpt/test_software_company.py
index 00538442c..4fc651f52 100644
--- a/tests/metagpt/test_software_company.py
+++ b/tests/metagpt/test_software_company.py
@@ -8,12 +8,12 @@
import pytest
from metagpt.logs import logger
-from metagpt.software_company import SoftwareCompany
+from metagpt.team import Team
@pytest.mark.asyncio
-async def test_software_company():
- company = SoftwareCompany()
+async def test_team():
+ company = Team()
company.start_project("做一个基础搜索引擎,可以支持知识库")
history = await company.run(n_round=5)
logger.info(history)
diff --git a/tests/metagpt/tools/test_moderation.py b/tests/metagpt/tools/test_moderation.py
index 225acff75..5ec3bd4de 100644
--- a/tests/metagpt/tools/test_moderation.py
+++ b/tests/metagpt/tools/test_moderation.py
@@ -3,7 +3,7 @@
"""
@Time : 2023/9/26 14:46
@Author : zhanglei
-@File : test_translate.py
+@File : test_moderation.py
"""
import pytest