diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yam_
similarity index 100%
rename from .pre-commit-config.yaml
rename to .pre-commit-config.yam_
diff --git a/README.md b/README.md
index 70460ceb4..e80082a3a 100644
--- a/README.md
+++ b/README.md
@@ -12,14 +12,13 @@ # MetaGPT: The Multi-Agent Framework
-
+
-
@@ -33,132 +32,38 @@ # MetaGPT: The Multi-Agent Framework
Software Company Multi-Role Schematic (Gradually Implementing)
-## MetaGPT's Abilities -https://github.com/geekan/MetaGPT/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419 +## Install - - -## Examples (fully generated by GPT-4) - -For example, if you type `python startup.py "Design a RecSys like Toutiao"`, you would get many outputs, one of them is data & api design - - - -It costs approximately **$0.2** (in GPT-4 API fees) to generate one example with analysis and design, and around **$2.0** for a full project. - - - - -## Installation - -### Installation Video Guide - -- [Matthew Berman: How To Install MetaGPT - Build A Startup With One Prompt!!](https://youtu.be/uT75J_KG_aY) - -### Traditional Installation +### Pip installation ```bash -# Step 1: Ensure that NPM is installed on your system. Then install mermaid-js. (If you don't have npm in your computer, please go to the Node.js official website to install Node.js https://nodejs.org/ and then you will have npm tool in your computer.) +# Step 1: Ensure that Python 3.9+ is installed on your system. You can check this by using: +# You can use conda to initialize a new python env +# conda create -n metagpt python=3.9 +# conda activate metagpt +python3 --version + +# Step 2: Clone the repository to your local machine for latest version, and install it. +git clone https://github.com/geekan/MetaGPT.git +cd MetaGPT +pip3 install -e. # or pip3 install metagpt # for stable version + +# Step 3: run the startup.py +# setup your OPENAI_API_KEY in key.yaml copy from config.yaml +python3 startup.py "Write a cli snake game" + +# Step 4 [Optional]: If you want to save the artifacts like diagrams such as quadrant chart, system designs, sequence flow in the workspace, you can execute the step before Step 3. By default, the framework is compatible, and the entire process can be run completely without executing this step. +# If executing, ensure that NPM is installed on your system. Then install mermaid-js. (If you don't have npm in your computer, please go to the Node.js official website to install Node.js https://nodejs.org/ and then you will have npm tool in your computer.) npm --version sudo npm install -g @mermaid-js/mermaid-cli - -# Step 2: Ensure that Python 3.9+ is installed on your system. You can check this by using: -python --version - -# Step 3: Clone the repository to your local machine, and install it. -git clone https://github.com/geekan/metagpt -cd metagpt -pip install -e. ``` -**Note:** +detail installation please refer to [cli_install](https://docs.deepwisdom.ai/guide/get_started/installation.html#install-stable-version) -- If already have Chrome, Chromium, or MS Edge installed, you can skip downloading Chromium by setting the environment variable - `PUPPETEER_SKIP_CHROMIUM_DOWNLOAD` to `true`. - -- Some people are [having issues](https://github.com/mermaidjs/mermaid.cli/issues/15) installing this tool globally. Installing it locally is an alternative solution, - - ```bash - npm install @mermaid-js/mermaid-cli - ``` - -- don't forget to the configuration for mmdc in config.yml - - ```yml - PUPPETEER_CONFIG: "./config/puppeteer-config.json" - MMDC: "./node_modules/.bin/mmdc" - ``` - -- if `pip install -e.` fails with error `[Errno 13] Permission denied: '/usr/local/lib/python3.11/dist-packages/test-easy-install-13129.write-test'`, try instead running `pip install -e. --user` - -- To convert Mermaid charts to SVG, PNG, and PDF formats. In addition to the Node.js version of Mermaid-CLI, you now have the option to use Python version Playwright, pyppeteer or mermaid.ink for this task. - - - Playwright - - **Install Playwright** - - ```bash - pip install playwright - ``` - - - **Install the Required Browsers** - - to support PDF conversion, please install Chrominum. - - ```bash - playwright install --with-deps chromium - ``` - - - **modify `config.yaml`** - - uncomment MERMAID_ENGINE from config.yaml and change it to `playwright` - - ```yaml - MERMAID_ENGINE: playwright - ``` - - - pyppeteer - - **Install pyppeteer** - - ```bash - pip install pyppeteer - ``` - - - **Use your own Browsers** - - pyppeteer allows you use installed browsers, please set the following envirment - - ```bash - export PUPPETEER_EXECUTABLE_PATH = /path/to/your/chromium or edge or chrome - ``` - - please do not use this command to install browser, it is too old - - ```bash - pyppeteer-install - ``` - - - **modify `config.yaml`** - - uncomment MERMAID_ENGINE from config.yaml and change it to `pyppeteer` - - ```yaml - MERMAID_ENGINE: pyppeteer - ``` - - - mermaid.ink - - **modify `config.yaml`** - - uncomment MERMAID_ENGINE from config.yaml and change it to `ink` - - ```yaml - MERMAID_ENGINE: ink - ``` - - Note: this method does not support pdf export. - -### Installation by Docker +### Docker installation +> Note: In the Windows, you need to replace "/opt/metagpt" with a directory that Docker has permission to create, such as "D:\Users\x\metagpt" ```bash # Step 1: Download metagpt official image and prepare config.yaml @@ -174,141 +79,41 @@ # Step 2: Run metagpt demo with container -v /opt/metagpt/workspace:/app/metagpt/workspace \ metagpt/metagpt:latest \ python startup.py "Write a cli snake game" - -# You can also start a container and execute commands in it -docker run --name metagpt -d \ - --privileged \ - -v /opt/metagpt/config/key.yaml:/app/metagpt/config/key.yaml \ - -v /opt/metagpt/workspace:/app/metagpt/workspace \ - metagpt/metagpt:latest - -docker exec -it metagpt /bin/bash -$ python startup.py "Write a cli snake game" ``` -The command `docker run ...` do the following things: +detail installation please refer to [docker_install](https://docs.deepwisdom.ai/guide/get_started/installation.html#install-with-docker) -- Run in privileged mode to have permission to run the browser -- Map host configure file `/opt/metagpt/config/key.yaml` to container `/app/metagpt/config/key.yaml` -- Map host directory `/opt/metagpt/workspace` to container `/app/metagpt/workspace` -- Execute the demo command `python startup.py "Write a cli snake game"` +### QuickStart & Demo Video +- Try it on [MetaGPT Huggingface Space](https://huggingface.co/spaces/deepwisdom/MetaGPT) +- [Matthew Berman: How To Install MetaGPT - Build A Startup With One Prompt!!](https://youtu.be/uT75J_KG_aY) +- [Official Demo Video](https://github.com/geekan/MetaGPT/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d) -### Build image by yourself +https://github.com/geekan/MetaGPT/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419 -```bash -# You can also build metagpt image by yourself. -git clone https://github.com/geekan/MetaGPT.git -cd MetaGPT && docker build -t metagpt:custom . -``` +## Tutorial -## Configuration +- 🗒 [Online Document](https://docs.deepwisdom.ai/) +- 💻 [Usage](https://docs.deepwisdom.ai/guide/get_started/quickstart.html) +- 🔎 [What can MetaGPT do?](https://docs.deepwisdom.ai/guide/get_started/introduction.html) +- 🛠 How to build your own agents? + - [MetaGPT Usage & Development Guide | Agent 101](https://docs.deepwisdom.ai/guide/tutorials/agent_101.html) + - [MetaGPT Usage & Development Guide | MultiAgent 101](https://docs.deepwisdom.ai/guide/tutorials/multi_agent_101.html) +- 🧑💻 Contribution + - [Develop Roadmap](docs/ROADMAP.md) +- 🔖 Use Cases + - [Debate](https://docs.deepwisdom.ai/guide/use_cases/multi_agent/debate.html) + - [Researcher](https://docs.deepwisdom.ai/guide/use_cases/agent/researcher.html) + - [Recepit Assistant](https://docs.deepwisdom.ai/guide/use_cases/agent/receipt_assistant.html) +- ❓ [FAQs](https://docs.deepwisdom.ai/guide/faq.html) -- Configure your `OPENAI_API_KEY` in any of `config/key.yaml / config/config.yaml / env` -- Priority order: `config/key.yaml > config/config.yaml > env` +## Support -```bash -# Copy the configuration file and make the necessary modifications. -cp config/config.yaml config/key.yaml -``` +### Discard Join US +📢 Join Our [Discord Channel](https://discord.gg/ZRHeExS6xv)! -| Variable Name | config/key.yaml | env | -| ------------------------------------------ | ----------------------------------------- | ----------------------------------------------- | -| OPENAI_API_KEY # Replace with your own key | OPENAI_API_KEY: "sk-..." | export OPENAI_API_KEY="sk-..." | -| OPENAI_API_BASE # Optional | OPENAI_API_BASE: "https://
-
@@ -33,57 +32,35 @@ # MetaGPT: 多智能体框架
软件公司多角色示意图(正在逐步实现)
-## MetaGPT 的能力 - -https://github.com/geekan/MetaGPT/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419 - - -## 示例(均由 GPT-4 生成) - -例如,键入`python startup.py "写个类似今日头条的推荐系统"`并回车,你会获得一系列输出,其一是数据结构与API设计 - - - -这需要大约**0.2美元**(GPT-4 API的费用)来生成一个带有分析和设计的示例,大约2.0美元用于一个完整的项目 - ## 安装 - -### 传统安装 +### Pip安装 ```bash -# 第 1 步:确保您的系统上安装了 NPM。并使用npm安装mermaid-js +# 第 1 步:确保您的系统上安装了 Python 3.9+。您可以使用以下命令进行检查: +# 可以使用conda来初始化新的python环境 +# conda create -n metagpt python=3.9 +# conda activate metagpt +python3 --version + +# 第 2 步:克隆最新仓库到您的本地机器,并进行安装。 +git clone https://github.com/geekan/MetaGPT.git +cd MetaGPT +pip3 install -e. # 或者 pip3 install metagpt # 安装稳定版本 + +# 第 3 步:执行startup.py +# 拷贝config.yaml为key.yaml,并设置你自己的OPENAI_API_KEY +python3 startup.py "Write a cli snake game" + +# 第 4 步【可选的】:如果你想在执行过程中保存像象限图、系统设计、序列流程等图表这些产物,可以在第3步前执行该步骤。默认的,框架做了兼容,在不执行该步的情况下,也可以完整跑完整个流程。 +# 如果执行,确保您的系统上安装了 NPM。并使用npm安装mermaid-js npm --version sudo npm install -g @mermaid-js/mermaid-cli - -# 第 2 步:确保您的系统上安装了 Python 3.9+。您可以使用以下命令进行检查: -python --version - -# 第 3 步:克隆仓库到您的本地机器,并进行安装。 -git clone https://github.com/geekan/metagpt -cd metagpt -pip install -e. ``` -**注意:** - -- 如果已经安装了Chrome、Chromium或MS Edge,可以通过将环境变量`PUPPETEER_SKIP_CHROMIUM_DOWNLOAD`设置为`true`来跳过下载Chromium。 - -- 一些人在全局安装此工具时遇到问题。在本地安装是替代解决方案, - - ```bash - npm install @mermaid-js/mermaid-cli - ``` - -- 不要忘记在config.yml中为mmdc配置配置, - - ```yml - PUPPETEER_CONFIG: "./config/puppeteer-config.json" - MMDC: "./node_modules/.bin/mmdc" - ``` - -- 如果`pip install -e.`失败并显示错误`[Errno 13] Permission denied: '/usr/local/lib/python3.11/dist-packages/test-easy-install-13129.write-test'`,请尝试使用`pip install -e. --user`运行。 +详细的安装请安装 [cli_install](https://docs.deepwisdom.ai/guide/get_started/installation.html#install-stable-version) ### Docker安装 +> 注意:在Windows中,你需要将 "/opt/metagpt" 替换为Docker具有创建权限的目录,比如"D:\Users\x\metagpt" ```bash # 步骤1: 下载metagpt官方镜像并准备好config.yaml @@ -99,121 +76,41 @@ # 步骤2: 使用容器运行metagpt演示 -v /opt/metagpt/workspace:/app/metagpt/workspace \ metagpt/metagpt:latest \ python startup.py "Write a cli snake game" - -# 您也可以启动一个容器并在其中执行命令 -docker run --name metagpt -d \ - --privileged \ - -v /opt/metagpt/config/key.yaml:/app/metagpt/config/key.yaml \ - -v /opt/metagpt/workspace:/app/metagpt/workspace \ - metagpt/metagpt:latest - -docker exec -it metagpt /bin/bash -$ python startup.py "Write a cli snake game" ``` -`docker run ...`做了以下事情: +详细的安装请安装 [docker_install](https://docs.deepwisdom.ai/zhcn/guide/get_started/installation.html#%E4%BD%BF%E7%94%A8docker%E5%AE%89%E8%A3%85) -- 以特权模式运行,有权限运行浏览器 -- 将主机文件 `/opt/metagpt/config/key.yaml` 映射到容器文件 `/app/metagpt/config/key.yaml` -- 将主机目录 `/opt/metagpt/workspace` 映射到容器目录 `/app/metagpt/workspace` -- 执行示例命令 `python startup.py "Write a cli snake game"` +### 快速开始的演示视频 +- 在 [MetaGPT Huggingface Space](https://huggingface.co/spaces/deepwisdom/MetaGPT) 上进行体验 +- [Matthew Berman: How To Install MetaGPT - Build A Startup With One Prompt!!](https://youtu.be/uT75J_KG_aY) +- [官方演示视频](https://github.com/geekan/MetaGPT/assets/2707039/5e8c1062-8c35-440f-bb20-2b0320f8d27d) -### 自己构建镜像 +https://github.com/geekan/MetaGPT/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419 -```bash -# 您也可以自己构建metagpt镜像 -git clone https://github.com/geekan/MetaGPT.git -cd MetaGPT && docker build -t metagpt:custom . -``` +## 教程 +- 🗒 [在线文档](https://docs.deepwisdom.ai/zhcn/) +- 💻 [如何使用](https://docs.deepwisdom.ai/zhcn/guide/get_started/quickstart.html) +- 🔎 [MetaGPT的能力及应用场景](https://docs.deepwisdom.ai/zhcn/guide/get_started/introduction.html) +- 🛠 如何构建你自己的智能体? + - [MetaGPT的使用和开发教程 | 智能体入门](https://docs.deepwisdom.ai/zhcn/guide/tutorials/agent_101.html) + - [MetaGPT的使用和开发教程 | 多智能体入门](https://docs.deepwisdom.ai/zhcn/guide/tutorials/multi_agent_101.html) +- 🧑💻 贡献 + - [开发路线图](ROADMAP.md) +- 🔖 示例 + - [辩论](https://docs.deepwisdom.ai/zhcn/guide/use_cases/multi_agent/debate.html) + - [调研员](https://docs.deepwisdom.ai/zhcn/guide/use_cases/agent/researcher.html) + - [票据助手](https://docs.deepwisdom.ai/zhcn/guide/use_cases/agent/receipt_assistant.html) +- ❓ [常见问题解答](https://docs.deepwisdom.ai/zhcn/guide/faq.html) -## 配置 +## 支持 -- 在 `config/key.yaml / config/config.yaml / env` 中配置您的 `OPENAI_API_KEY` -- 优先级顺序:`config/key.yaml > config/config.yaml > env` +### 加入我们 -```bash -# 复制配置文件并进行必要的修改 -cp config/config.yaml config/key.yaml -``` +📢 加入我们的[Discord频道](https://discord.gg/ZRHeExS6xv)! -| 变量名 | config/key.yaml | env | -| ----------------------------------- | ----------------------------------------- | ----------------------------------------------- | -| OPENAI_API_KEY # 用您自己的密钥替换 | OPENAI_API_KEY: "sk-..." | export OPENAI_API_KEY="sk-..." | -| OPENAI_API_BASE # 可选 | OPENAI_API_BASE: "https://
-
@@ -60,17 +59,22 @@ ### インストールビデオガイド
### 伝統的なインストール
```bash
-# ステップ 1: NPM がシステムにインストールされていることを確認してください。次に mermaid-js をインストールします。(お使いのコンピューターに npm がない場合は、Node.js 公式サイトで Node.js https://nodejs.org/ をインストールしてください。)
-npm --version
-sudo npm install -g @mermaid-js/mermaid-cli
-
-# ステップ 2: Python 3.9+ がシステムにインストールされていることを確認してください。これを確認するには:
+# ステップ 1: Python 3.9+ がシステムにインストールされていることを確認してください。これを確認するには:
python --version
-# ステップ 3: リポジトリをローカルマシンにクローンし、インストールする。
-git clone https://github.com/geekan/metagpt
-cd metagpt
+# ステップ 2: リポジトリをローカルマシンにクローンし、インストールする。
+git clone https://github.com/geekan/MetaGPT.git
+cd MetaGPT
pip install -e.
+
+# ステップ 3: startup.py を実行する
+# config.yaml を key.yaml にコピーし、独自の OPENAI_API_KEY を設定します
+python3 startup.py "Write a cli snake game"
+
+# ステップ 4 [オプション]: 実行中に PRD ファイルなどのアーティファクトを保存する場合は、ステップ 3 の前にこのステップを実行できます。デフォルトでは、フレームワークには互換性があり、この手順を実行しなくてもプロセス全体を完了できます。
+# NPM がシステムにインストールされていることを確認してください。次に mermaid-js をインストールします。(お使いのコンピューターに npm がない場合は、Node.js 公式サイトで Node.js https://nodejs.org/ をインストールしてください。)
+npm --version
+sudo npm install -g @mermaid-js/mermaid-cli
```
**注:**
@@ -159,6 +163,7 @@ # ステップ 3: リポジトリをローカルマシンにクローンし、
注: この方法は pdf エクスポートに対応していません。
### Docker によるインストール
+> Windowsでは、"/opt/metagpt"をDockerが作成する権限を持つディレクトリに置き換える必要があります。例えば、"D:\Users\x\metagpt"などです。
```bash
# ステップ 1: metagpt 公式イメージをダウンロードし、config.yaml を準備する
@@ -270,12 +275,12 @@ ### 使用方法
### コードウォークスルー
```python
-from metagpt.software_company import SoftwareCompany
+from metagpt.team import Team
from metagpt.roles import ProjectManager, ProductManager, Architect, Engineer
async def startup(idea: str, investment: float = 3.0, n_round: int = 5):
"""スタートアップを実行する。ボスになる。"""
- company = SoftwareCompany()
+ company = Team()
company.hire([ProductManager(), Architect(), ProjectManager(), Engineer()])
company.invest(investment)
company.start_project(idea)
@@ -295,12 +300,12 @@ ## クイックスタート
## 引用
-現時点では、[Arxiv 論文](https://arxiv.org/abs/2308.00352)を引用してください:
+現時点では、[arXiv 論文](https://arxiv.org/abs/2308.00352)を引用してください:
```bibtex
@misc{hong2023metagpt,
- title={MetaGPT: Meta Programming for Multi-Agent Collaborative Framework},
- author={Sirui Hong and Xiawu Zheng and Jonathan Chen and Yuheng Cheng and Jinlin Wang and Ceyao Zhang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu},
+ title={MetaGPT: Meta Programming for A Multi-Agent Collaborative Framework},
+ author={Sirui Hong and Mingchen Zhuge and Jonathan Chen and Xiawu Zheng and Yuheng Cheng and Ceyao Zhang and Jinlin Wang and Zili Wang and Steven Ka Shing Yau and Zijuan Lin and Liyang Zhou and Chenyu Ran and Lingfeng Xiao and Chenglin Wu and Jürgen Schmidhuber},
year={2023},
eprint={2308.00352},
archivePrefix={arXiv},
diff --git a/examples/build_customized_agent.py b/examples/build_customized_agent.py
index 87d7a9c76..be34e5e5e 100644
--- a/examples/build_customized_agent.py
+++ b/examples/build_customized_agent.py
@@ -9,6 +9,7 @@ import asyncio
import fire
+from metagpt.llm import LLM
from metagpt.actions import Action
from metagpt.roles import Role
from metagpt.schema import Message
@@ -19,19 +20,10 @@ class SimpleWriteCode(Action):
PROMPT_TEMPLATE = """
Write a python function that can {instruction} and provide two runnnable test cases.
Return ```python your_code_here ``` with NO other texts,
- example:
- ```python
- # function
- def add(a, b):
- return a + b
- # test cases
- print(add(1, 2))
- print(add(3, 4))
- ```
your code:
"""
- def __init__(self, name="SimpleWriteCode", context=None, llm=None):
+ def __init__(self, name: str = "SimpleWriteCode", context=None, llm: LLM = None):
super().__init__(name, context, llm)
async def run(self, instruction: str):
@@ -51,8 +43,9 @@ class SimpleWriteCode(Action):
code_text = match.group(1) if match else rsp
return code_text
+
class SimpleRunCode(Action):
- def __init__(self, name="SimpleRunCode", context=None, llm=None):
+ def __init__(self, name: str = "SimpleRunCode", context=None, llm: LLM = None):
super().__init__(name, context, llm)
async def run(self, code_text: str):
@@ -61,6 +54,7 @@ class SimpleRunCode(Action):
logger.info(f"{code_result=}")
return code_result
+
class SimpleCoder(Role):
def __init__(
self,
@@ -73,16 +67,16 @@ class SimpleCoder(Role):
async def _act(self) -> Message:
logger.info(f"{self._setting}: ready to {self._rc.todo}")
- todo = self._rc.todo
+ todo = self._rc.todo # todo will be SimpleWriteCode()
- msg = self._rc.memory.get()[-1] # retrieve the latest memory
- instruction = msg.content
+ msg = self.get_memories(k=1)[0] # find the most recent messages
- code_text = await SimpleWriteCode().run(instruction)
- msg = Message(content=code_text, role=self.profile, cause_by=todo)
+ code_text = await todo.run(msg.content)
+ msg = Message(content=code_text, role=self.profile, cause_by=type(todo))
return msg
+
class RunnableCoder(Role):
def __init__(
self,
@@ -92,43 +86,23 @@ class RunnableCoder(Role):
):
super().__init__(name, profile, **kwargs)
self._init_actions([SimpleWriteCode, SimpleRunCode])
-
- async def _think(self) -> None:
- if self._rc.todo is None:
- self._set_state(0)
- return
-
- if self._rc.state + 1 < len(self._states):
- self._set_state(self._rc.state + 1)
- else:
- self._rc.todo = None
+ self._set_react_mode(react_mode="by_order")
async def _act(self) -> Message:
logger.info(f"{self._setting}: ready to {self._rc.todo}")
+ # By choosing the Action by order under the hood
+ # todo will be first SimpleWriteCode() then SimpleRunCode()
todo = self._rc.todo
- msg = self._rc.memory.get()[-1]
- if isinstance(todo, SimpleWriteCode):
- instruction = msg.content
- result = await SimpleWriteCode().run(instruction)
+ msg = self.get_memories(k=1)[0] # find the most k recent messages
+ result = await todo.run(msg.content)
- elif isinstance(todo, SimpleRunCode):
- code_text = msg.content
- result = await SimpleRunCode().run(code_text)
-
- msg = Message(content=result, role=self.profile, cause_by=todo)
+ msg = Message(content=result, role=self.profile, cause_by=type(todo))
self._rc.memory.add(msg)
return msg
- async def _react(self) -> Message:
- while True:
- await self._think()
- if self._rc.todo is None:
- break
- await self._act()
- return Message(content="All job done", role=self.profile)
-def main(msg="write a function that calculates the sum of a list"):
+def main(msg="write a function that calculates the product of a list and run it"):
# role = SimpleCoder()
role = RunnableCoder()
logger.info(msg)
diff --git a/examples/debate.py b/examples/debate.py
index 05db28070..a37e60848 100644
--- a/examples/debate.py
+++ b/examples/debate.py
@@ -7,14 +7,14 @@ import asyncio
import platform
import fire
-from metagpt.software_company import SoftwareCompany
+from metagpt.team import Team
from metagpt.actions import Action, BossRequirement
from metagpt.roles import Role
from metagpt.schema import Message
from metagpt.logs import logger
-class ShoutOut(Action):
- """Action: Shout out loudly in a debate (quarrel)"""
+class SpeakAloud(Action):
+ """Action: Speak out aloud in a debate (quarrel)"""
PROMPT_TEMPLATE = """
## BACKGROUND
@@ -27,7 +27,7 @@ class ShoutOut(Action):
craft a strong and emotional response in 80 words, in {name}'s rhetoric and viewpoints, your will argue:
"""
- def __init__(self, name="ShoutOut", context=None, llm=None):
+ def __init__(self, name="SpeakAloud", context=None, llm=None):
super().__init__(name, context, llm)
async def run(self, context: str, name: str, opponent_name: str):
@@ -39,96 +39,57 @@ class ShoutOut(Action):
return rsp
-class Trump(Role):
+class Debator(Role):
def __init__(
self,
- name: str = "Trump",
- profile: str = "Republican",
+ name: str,
+ profile: str,
+ opponent_name: str,
**kwargs,
):
super().__init__(name, profile, **kwargs)
- self._init_actions([ShoutOut])
- self._watch([ShoutOut])
- self.name = "Trump"
- self.opponent_name = "Biden"
+ self._init_actions([SpeakAloud])
+ self._watch([BossRequirement, SpeakAloud])
+ self.name = name
+ self.opponent_name = opponent_name
async def _observe(self) -> int:
await super()._observe()
# accept messages sent (from opponent) to self, disregard own messages from the last round
- self._rc.news = [msg for msg in self._rc.news if msg.send_to == self.name]
+ self._rc.news = [msg for msg in self._rc.news if msg.send_to == self.name]
return len(self._rc.news)
async def _act(self) -> Message:
logger.info(f"{self._setting}: ready to {self._rc.todo}")
+ todo = self._rc.todo # An instance of SpeakAloud
- msg_history = self._rc.memory.get_by_actions([ShoutOut])
- context = []
- for m in msg_history:
- context.append(str(m))
- context = "\n".join(context)
+ memories = self.get_memories()
+ context = "\n".join(f"{msg.sent_from}: {msg.content}" for msg in memories)
+ # print(context)
- rsp = await ShoutOut().run(context=context, name=self.name, opponent_name=self.opponent_name)
+ rsp = await todo.run(context=context, name=self.name, opponent_name=self.opponent_name)
msg = Message(
content=rsp,
role=self.profile,
- cause_by=ShoutOut,
+ cause_by=type(todo),
sent_from=self.name,
send_to=self.opponent_name,
)
- return msg
-
-class Biden(Role):
- def __init__(
- self,
- name: str = "Biden",
- profile: str = "Democrat",
- **kwargs,
- ):
- super().__init__(name, profile, **kwargs)
- self._init_actions([ShoutOut])
- self._watch([BossRequirement, ShoutOut])
- self.name = "Biden"
- self.opponent_name = "Trump"
-
- async def _observe(self) -> int:
- await super()._observe()
- # accept the very first human instruction (the debate topic) or messages sent (from opponent) to self,
- # disregard own messages from the last round
- self._rc.news = [msg for msg in self._rc.news if msg.cause_by == BossRequirement or msg.send_to == self.name]
- return len(self._rc.news)
-
- async def _act(self) -> Message:
- logger.info(f"{self._setting}: ready to {self._rc.todo}")
-
- msg_history = self._rc.memory.get_by_actions([BossRequirement, ShoutOut])
- context = []
- for m in msg_history:
- context.append(str(m))
- context = "\n".join(context)
-
- rsp = await ShoutOut().run(context=context, name=self.name, opponent_name=self.opponent_name)
-
- msg = Message(
- content=rsp,
- role=self.profile,
- cause_by=ShoutOut,
- sent_from=self.name,
- send_to=self.opponent_name,
- )
+ self._rc.memory.add(msg)
return msg
-async def startup(idea: str, investment: float = 3.0, n_round: int = 5,
- code_review: bool = False, run_tests: bool = False):
- """We reuse the startup paradigm for roles to interact with each other.
- Now we run a startup of presidents and watch they quarrel. :) """
- company = SoftwareCompany()
- company.hire([Biden(), Trump()])
- company.invest(investment)
- company.start_project(idea)
- await company.run(n_round=n_round)
+async def debate(idea: str, investment: float = 3.0, n_round: int = 5):
+ """Run a team of presidents and watch they quarrel. :) """
+ Biden = Debator(name="Biden", profile="Democrat", opponent_name="Trump")
+ Trump = Debator(name="Trump", profile="Republican", opponent_name="Biden")
+ team = Team()
+ team.hire([Biden, Trump])
+ team.invest(investment)
+ team.start_project(idea, send_to="Biden") # send debate topic to Biden and let him speak first
+ await team.run(n_round=n_round)
def main(idea: str, investment: float = 3.0, n_round: int = 10):
@@ -141,7 +102,7 @@ def main(idea: str, investment: float = 3.0, n_round: int = 10):
"""
if platform.system() == "Windows":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
- asyncio.run(startup(idea, investment, n_round))
+ asyncio.run(debate(idea, investment, n_round))
if __name__ == '__main__':
diff --git a/metagpt/config.py b/metagpt/config.py
index 27455d38d..3f9e742bd 100644
--- a/metagpt/config.py
+++ b/metagpt/config.py
@@ -45,10 +45,11 @@ class Config(metaclass=Singleton):
self.global_proxy = self._get("GLOBAL_PROXY")
self.openai_api_key = self._get("OPENAI_API_KEY")
self.anthropic_api_key = self._get("Anthropic_API_KEY")
- if (not self.openai_api_key or "YOUR_API_KEY" == self.openai_api_key) and (
- not self.anthropic_api_key or "YOUR_API_KEY" == self.anthropic_api_key
- ):
- raise NotConfiguredException("Set OPENAI_API_KEY or Anthropic_API_KEY first")
+ self.zhipuai_api_key = self._get("ZHIPUAI_API_KEY")
+ if (not self.openai_api_key or "YOUR_API_KEY" == self.openai_api_key) and \
+ (not self.anthropic_api_key or "YOUR_API_KEY" == self.anthropic_api_key) and \
+ (not self.zhipuai_api_key or "YOUR_API_KEY" == self.zhipuai_api_key):
+ raise NotConfiguredException("Set OPENAI_API_KEY or Anthropic_API_KEY or ZHIPUAI_API_KEY first")
self.openai_api_base = self._get("OPENAI_API_BASE")
openai_proxy = self._get("OPENAI_PROXY") or self.global_proxy
if openai_proxy:
diff --git a/metagpt/const.py b/metagpt/const.py
index 7f3f87dfa..407ce803a 100644
--- a/metagpt/const.py
+++ b/metagpt/const.py
@@ -6,7 +6,7 @@
@File : const.py
"""
from pathlib import Path
-
+from loguru import logger
def get_project_root():
"""Search upwards to find the project root directory."""
@@ -17,10 +17,15 @@ def get_project_root():
or (current_path / ".project_root").exists()
or (current_path / ".gitignore").exists()
):
+ # use metagpt with git clone will land here
+ logger.info(f"PROJECT_ROOT set to {str(current_path)}")
return current_path
parent_path = current_path.parent
if parent_path == current_path:
- raise Exception("Project root not found.")
+ # use metagpt with pip install will land here
+ cwd = Path.cwd()
+ logger.info(f"PROJECT_ROOT set to current working directory: {str(cwd)}")
+ return cwd
current_path = parent_path
diff --git a/metagpt/llm.py b/metagpt/llm.py
index e6f815950..4edcd7a83 100644
--- a/metagpt/llm.py
+++ b/metagpt/llm.py
@@ -6,14 +6,27 @@
@File : llm.py
"""
+from metagpt.logs import logger
+from metagpt.config import CONFIG
from metagpt.provider.anthropic_api import Claude2 as Claude
-from metagpt.provider.openai_api import OpenAIGPTAPI as LLM
+from metagpt.provider.openai_api import OpenAIGPTAPI
+from metagpt.provider.zhipuai_api import ZhiPuAIGPTAPI
+from metagpt.provider.spark_api import SparkAPI
+from metagpt.provider.human_provider import HumanProvider
-DEFAULT_LLM = LLM()
-CLAUDE_LLM = Claude()
-async def ai_func(prompt):
- """使用LLM进行QA
- QA with LLMs
- """
- return await DEFAULT_LLM.aask(prompt)
+def LLM() -> "BaseGPTAPI":
+ """ initialize different LLM instance according to the key field existence"""
+ # TODO a little trick, can use registry to initialize LLM instance further
+ if CONFIG.openai_api_key:
+ llm = OpenAIGPTAPI()
+ elif CONFIG.claude_api_key:
+ llm = Claude()
+ elif CONFIG.spark_api_key:
+ llm = SparkAPI()
+ elif CONFIG.zhipuai_api_key:
+ llm = ZhiPuAIGPTAPI()
+ else:
+ raise RuntimeError("You should config a LLM configuration first")
+
+ return llm
diff --git a/metagpt/provider/base_chatbot.py b/metagpt/provider/base_chatbot.py
index abdf423f4..72e6c94f9 100644
--- a/metagpt/provider/base_chatbot.py
+++ b/metagpt/provider/base_chatbot.py
@@ -13,6 +13,7 @@ from dataclasses import dataclass
class BaseChatbot(ABC):
"""Abstract GPT class"""
mode: str = "API"
+ use_system_prompt: bool = True
@abstractmethod
def ask(self, msg: str) -> str:
diff --git a/metagpt/provider/base_gpt_api.py b/metagpt/provider/base_gpt_api.py
index de61167b9..b6b034329 100644
--- a/metagpt/provider/base_gpt_api.py
+++ b/metagpt/provider/base_gpt_api.py
@@ -5,6 +5,7 @@
@Author : alexanderwu
@File : base_gpt_api.py
"""
+import json
from abc import abstractmethod
from typing import Optional
@@ -14,7 +15,8 @@ from metagpt.provider.base_chatbot import BaseChatbot
class BaseGPTAPI(BaseChatbot):
"""GPT API abstract class, requiring all inheritors to provide a series of standard capabilities"""
- system_prompt = 'You are a helpful assistant.'
+
+ system_prompt = "You are a helpful assistant."
def _user_msg(self, msg: str) -> dict[str, str]:
return {"role": "user", "content": msg}
@@ -32,15 +34,17 @@ class BaseGPTAPI(BaseChatbot):
return self._system_msg(self.system_prompt)
def ask(self, msg: str) -> str:
- message = [self._default_system_msg(), self._user_msg(msg)]
+ message = [self._default_system_msg(), self._user_msg(msg)] if self.use_system_prompt else [self._user_msg(msg)]
rsp = self.completion(message)
return self.get_choice_text(rsp)
async def aask(self, msg: str, system_msgs: Optional[list[str]] = None) -> str:
if system_msgs:
- message = self._system_msgs(system_msgs) + [self._user_msg(msg)]
+ message = self._system_msgs(system_msgs) + [self._user_msg(msg)] if self.use_system_prompt \
+ else [self._user_msg(msg)]
else:
- message = [self._default_system_msg(), self._user_msg(msg)]
+ message = [self._default_system_msg(), self._user_msg(msg)] if self.use_system_prompt \
+ else [self._user_msg(msg)]
rsp = await self.acompletion_text(message, stream=True)
logger.debug(message)
# logger.debug(rsp)
@@ -108,11 +112,50 @@ class BaseGPTAPI(BaseChatbot):
"""Required to provide the first text of choice"""
return rsp.get("choices")[0]["message"]["content"]
+ def get_choice_function(self, rsp: dict) -> dict:
+ """Required to provide the first function of choice
+ :param dict rsp: OpenAI chat.comletion respond JSON, Note "message" must include "tool_calls",
+ and "tool_calls" must include "function", for example:
+ {...
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": null,
+ "tool_calls": [
+ {
+ "id": "call_Y5r6Ddr2Qc2ZrqgfwzPX5l72",
+ "type": "function",
+ "function": {
+ "name": "execute",
+ "arguments": "{\n \"language\": \"python\",\n \"code\": \"print('Hello, World!')\"\n}"
+ }
+ }
+ ]
+ },
+ "finish_reason": "stop"
+ }
+ ],
+ ...}
+ :return dict: return first function of choice, for exmaple,
+ {'name': 'execute', 'arguments': '{\n "language": "python",\n "code": "print(\'Hello, World!\')"\n}'}
+ """
+ return rsp.get("choices")[0]["message"]["tool_calls"][0]["function"].to_dict()
+
+ def get_choice_function_arguments(self, rsp: dict) -> dict:
+ """Required to provide the first function arguments of choice.
+
+ :param dict rsp: same as in self.get_choice_function(rsp)
+ :return dict: return the first function arguments of choice, for example,
+ {'language': 'python', 'code': "print('Hello, World!')"}
+ """
+ return json.loads(self.get_choice_function(rsp)["arguments"])
+
def messages_to_prompt(self, messages: list[dict]):
"""[{"role": "user", "content": msg}] to user: