diff --git a/README.md b/README.md index f8db80a80..9a496cff4 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,24 @@ # Step 3: Clone the repository to your local machine, and install it. python setup.py install ``` +**Note:** + +- If already have Chrome, Chromium, or MS Edge installed, you can skip downloading Chromium by setting the environment variable +`PUPPETEER_SKIP_CHROMIUM_DOWNLOAD` to `true`. + +- Some people are [having issues](https://github.com/mermaidjs/mermaid.cli/issues/15) installing this tool globally. Installing it locally is an alternative solution, + + ```bash + npm install @mermaid-js/mermaid-cli + ``` + +- don't forget to the configuration for mmdc in config.yml + + ```yml + PUPPETEER_CONFIG: "./config/puppeteer-config.json" + MMDC: "./node_modules/.bin/mmdc" + ``` + ### Installation by Docker ```bash @@ -120,7 +138,12 @@ # Use code review will cost more money, but will opt for better code quality. ``` After running the script, you can find your new project in the `workspace/` directory. +### Preference of Platform or Tool +You can tell which platform or tool you want to use when stating your requirements. +```shell +python startup.py "Write a cli snake game based on pygame" +``` ### Usage ``` diff --git a/puppeteer-config.json b/config/puppeteer-config.json similarity index 100% rename from puppeteer-config.json rename to config/puppeteer-config.json diff --git a/docs/README_CN.md b/docs/README_CN.md index 72188a415..6458861c9 100644 --- a/docs/README_CN.md +++ b/docs/README_CN.md @@ -1,4 +1,4 @@ -# MetaGPT:多智能体元编程框架 +# MetaGPT: 多智能体框架
@@ -37,6 +37,8 @@ ## 示例(均由 GPT-4 生成)
## 安装
+### 传统安装
+
```bash
# 第 1 步:确保您的系统上安装了 NPM。并使用npm安装mermaid-js
npm --version
@@ -51,13 +53,56 @@ # 第 3 步:克隆仓库到您的本地机器,并进行安装。
python setup.py install
```
+### Docker安装
+
+```bash
+# 步骤1: 下载metagpt官方镜像并准备好config.yaml
+docker pull metagpt/metagpt:v0.3
+mkdir -p /opt/metagpt/{config,workspace}
+docker run --rm metagpt/metagpt:v0.3 cat /app/metagpt/config/config.yaml > /opt/metagpt/config/config.yaml
+vim /opt/metagpt/config/config.yaml # 修改config
+
+# 步骤2: 使用容器运行metagpt演示
+docker run --rm \
+ --privileged \
+ -v /opt/metagpt/config:/app/metagpt/config \
+ -v /opt/metagpt/workspace:/app/metagpt/workspace \
+ metagpt/metagpt:v0.3 \
+ python startup.py "Write a cli snake game"
+
+# 您也可以启动一个容器并在其中执行命令
+docker run --name metagpt -d \
+ --privileged \
+ -v /opt/metagpt/config:/app/metagpt/config \
+ -v /opt/metagpt/workspace:/app/metagpt/workspace \
+ metagpt/metagpt:v0.3
+
+docker exec -it metagpt /bin/bash
+$ python startup.py "Write a cli snake game"
+```
+
+`docker run ...`做了以下事情:
+
+- 以特权模式运行,有权限运行浏览器
+- 将主机目录 `/opt/metagpt/config` 映射到容器目录`/app/metagpt/config`
+- 将主机目录 `/opt/metagpt/workspace` 映射到容器目录 `/app/metagpt/workspace`
+- 执行演示命令 `python startup.py "Write a cli snake game"`
+
+### 自己构建镜像
+
+```bash
+# 您也可以自己构建metagpt镜像
+git clone https://github.com/geekan/MetaGPT.git
+cd MetaGPT && docker build -t metagpt:v0.3 .
+```
+
## 配置
- 在 `config/key.yaml / config/config.yaml / env` 中配置您的 `OPENAI_API_KEY`
- 优先级顺序:`config/key.yaml > config/config.yaml > env`
```bash
-# 复制配置文件并进行必要的修改。
+# 复制配置文件并进行必要的修改
cp config/config.yaml config/key.yaml
```
@@ -71,10 +116,47 @@ ## 示例:启动一个创业公司
```shell
python startup.py "写一个命令行贪吃蛇"
# 开启code review模式会会花费更多的money, 但是会提升代码质量和成功率
-python startup.py "写一个命令行贪吃蛇" --code_review True
+python startup.py "写一个命令行贪吃蛇" --code_review True
```
运行脚本后,您可以在 `workspace/` 目录中找到您的新项目。
+### 平台或工具的倾向性
+可以在阐述需求时说明想要使用的平台或工具。
+例如:
+
+```shell
+python startup.py "写一个基于pygame的命令行贪吃蛇"
+```
+
+### 使用
+
+```
+名称
+ startup.py - 我们是一家AI软件创业公司。通过投资我们,您将赋能一个充满无限可能的未来。
+
+概要
+ startup.py IDEA
-
+如果群已满,请添加负责人微信,会邀请进群
+
+
\ No newline at end of file
diff --git a/docs/README_JA.md b/docs/README_JA.md
index 8742d98d6..a5e5f6552 100644
--- a/docs/README_JA.md
+++ b/docs/README_JA.md
@@ -53,6 +53,24 @@ # ステップ 3: リポジトリをローカルマシンにクローンし、
python setup.py install
```
+**注:**
+
+- すでに Chrome、Chromium、MS Edge がインストールされている場合は、環境変数 `PUPPETEER_SKIP_CHROMIUM_DOWNLOAD` を `true` に設定することで、
+Chromium のダウンロードをスキップすることができます。
+
+- このツールをグローバルにインストールする[問題を抱えている](https://github.com/mermaidjs/mermaid.cli/issues/15)人もいます。ローカルにインストールするのが代替の解決策です、
+
+ ```bash
+ npm install @mermaid-js/mermaid-cli
+ ```
+
+- config.yml に mmdc のコンフィギュレーションを記述するのを忘れないこと
+
+ ```yml
+ PUPPETEER_CONFIG: "./config/puppeteer-config.json"
+ MMDC: "./node_modules/.bin/mmdc"
+ ```
+
### Docker によるインストール
```bash
@@ -120,6 +138,12 @@ # コードレビューを利用すれば、コストはかかるが、より良
```
スクリプトを実行すると、`workspace/` ディレクトリに新しいプロジェクトが見つかります。
+### プラットフォームまたはツールの設定
+
+要件を述べるときに、どのプラットフォームまたはツールを使用するかを指定できます。
+```shell
+python startup.py "Write a cli snake game based on pygame"
+```
### 使用方法
diff --git a/docs/resources/MetaGPT-WeChat-Group4.jpeg b/docs/resources/MetaGPT-WeChat-Group4.jpeg
new file mode 100644
index 000000000..f665f8b1d
Binary files /dev/null and b/docs/resources/MetaGPT-WeChat-Group4.jpeg differ
diff --git a/docs/resources/MetaGPT-WeChat-Personal.jpeg b/docs/resources/MetaGPT-WeChat-Personal.jpeg
index aaf6c775f..f6b48577d 100644
Binary files a/docs/resources/MetaGPT-WeChat-Personal.jpeg and b/docs/resources/MetaGPT-WeChat-Personal.jpeg differ
diff --git a/examples/search_with_specific_engine.py b/examples/search_with_specific_engine.py
index d63981c88..7cc431cd4 100644
--- a/examples/search_with_specific_engine.py
+++ b/examples/search_with_specific_engine.py
@@ -6,11 +6,11 @@ from metagpt.tools import SearchEngineType
async def main():
# Serper API
- await Searcher(engine=SearchEngineType.SERPER_GOOGLE).run("What are some good sun protection products?")
- # Serper API
- # await Searcher(engine=SearchEngineType.SERPAPI_GOOGLE).run("What are the best ski brands for skiers?")
+ #await Searcher(engine = SearchEngineType.SERPER_GOOGLE).run(["What are some good sun protection products?","What are some of the best beaches?"])
+ # SerpAPI
+ #await Searcher(engine=SearchEngineType.SERPAPI_GOOGLE).run("What are the best ski brands for skiers?")
# Google API
- # await Searcher(engine=SearchEngineType.DIRECT_GOOGLE).run("What are the most interesting human facts?")
+ await Searcher(engine=SearchEngineType.DIRECT_GOOGLE).run("What are the most interesting human facts?")
if __name__ == '__main__':
asyncio.run(main())
diff --git a/metagpt/config.py b/metagpt/config.py
index e479ab018..3753bb3b0 100644
--- a/metagpt/config.py
+++ b/metagpt/config.py
@@ -30,7 +30,7 @@ class Config(metaclass=Singleton):
"""
Regular usage method:
config = Config("config.yaml")
- secret_key = config.get("MY_SECRET_KEY")
+ secret_key = config.get_key("MY_SECRET_KEY")
print("Secret key:", secret_key)
"""
@@ -79,6 +79,9 @@ class Config(metaclass=Singleton):
self.total_cost = 0.0
self.puppeteer_config = self._get("PUPPETEER_CONFIG","")
self.mmdc = self._get("MMDC","mmdc")
+ self.update_costs = self._get("UPDATE_COSTS",True)
+ self.calc_usage = self._get("CALC_USAGE",True)
+
def _init_with_config_files_and_env(self, configs: dict, yaml_file):
"""Load from config/key.yaml, config/config.yaml, and env in decreasing order of priority"""
diff --git a/metagpt/roles/role.py b/metagpt/roles/role.py
index bfdd7f18a..fa2971ae7 100644
--- a/metagpt/roles/role.py
+++ b/metagpt/roles/role.py
@@ -77,7 +77,7 @@ class RoleContext(BaseModel):
def check(self, role_id: str):
if hasattr(CONFIG, "long_term_memory") and CONFIG.long_term_memory:
self.long_term_memory.recover_memory(role_id, self)
- self.memory = self.long_term_memory # use memory to act as long_term_memory for unified operation
+ self.memory = self.long_term_memory # use memory to act as long_term_memory for unify operation
@property
def important_memory(self) -> list[Message]:
@@ -158,85 +158,85 @@ class Role:
next_state = "0"
self._set_state(int(next_state))
-async def _act(self) -> Message:
- # prompt = self.get_prefix()
- # prompt += ROLE_TEMPLATE.format(name=self.profile, state=self.states[self.state], result=response,
- # history=self.history)
+ async def _act(self) -> Message:
+ # prompt = self.get_prefix()
+ # prompt += ROLE_TEMPLATE.format(name=self.profile, state=self.states[self.state], result=response,
+ # history=self.history)
- logger.info(f"{self._setting}: ready to {self._rc.todo}")
- response = await self._rc.todo.run(self._rc.important_memory)
- # logger.info(response)
- if isinstance(response, ActionOutput):
- msg = Message(content=response.content, instruct_content=response.instruct_content,
- role=self.profile, cause_by=type(self._rc.todo))
- else:
- msg = Message(content=response, role=self.profile, cause_by=type(self._rc.todo))
- self._rc.memory.add(msg)
- # logger.debug(f"{response}")
+ logger.info(f"{self._setting}: ready to {self._rc.todo}")
+ response = await self._rc.todo.run(self._rc.important_memory)
+ # logger.info(response)
+ if isinstance(response, ActionOutput):
+ msg = Message(content=response.content, instruct_content=response.instruct_content,
+ role=self.profile, cause_by=type(self._rc.todo))
+ else:
+ msg = Message(content=response, role=self.profile, cause_by=type(self._rc.todo))
+ self._rc.memory.add(msg)
+ # logger.debug(f"{response}")
- return msg
+ return msg
-async def _observe(self) -> int:
- """Observe from the environment, obtain important information, and add it to memory"""
- if not self._rc.env:
- return 0
- env_msgs = self._rc.env.memory.get()
+ async def _observe(self) -> int:
+ """Observe from the environment, obtain important information, and add it to memory"""
+ if not self._rc.env:
+ return 0
+ env_msgs = self._rc.env.memory.get()
- observed = self._rc.env.memory.get_by_actions(self._rc.watch)
+ observed = self._rc.env.memory.get_by_actions(self._rc.watch)
- news = self._rc.memory.remember(observed) # remember recent exact or similar memories
+ news = self._rc.memory.remember(observed) # remember recent exact or similar memories
- for i in env_msgs:
- self.recv(i)
+ for i in env_msgs:
+ self.recv(i)
- news_text = [f"{i.role}: {i.content[:20]}..." for i in news]
- if news_text:
- logger.debug(f'{self._setting} observed: {news_text}')
- return len(news)
+ news_text = [f"{i.role}: {i.content[:20]}..." for i in news]
+ if news_text:
+ logger.debug(f'{self._setting} observed: {news_text}')
+ return len(news)
-def _publish_message(self, msg):
- """If the role belongs to env, then the role's messages will be broadcast to env"""
- if not self._rc.env:
- # If env does not exist, do not publish the message
- return
- self._rc.env.publish_message(msg)
+ def _publish_message(self, msg):
+ """If the role belongs to env, then the role's messages will be broadcast to env"""
+ if not self._rc.env:
+ # If env does not exist, do not publish the message
+ return
+ self._rc.env.publish_message(msg)
-async def _react(self) -> Message:
- """Think first, then act"""
- await self._think()
- logger.debug(f"{self._setting}: {self._rc.state=}, will do {self._rc.todo}")
- return await self._act()
+ async def _react(self) -> Message:
+ """Think first, then act"""
+ await self._think()
+ logger.debug(f"{self._setting}: {self._rc.state=}, will do {self._rc.todo}")
+ return await self._act()
-def recv(self, message: Message) -> None:
- """add message to history."""
- # self._history += f"\n{message}"
- # self._context = self._history
- if message in self._rc.memory.get():
- return
- self._rc.memory.add(message)
+ def recv(self, message: Message) -> None:
+ """add message to history."""
+ # self._history += f"\n{message}"
+ # self._context = self._history
+ if message in self._rc.memory.get():
+ return
+ self._rc.memory.add(message)
-async def handle(self, message: Message) -> Message:
- """Receive information and reply with actions"""
- # logger.debug(f"{self.name=}, {self.profile=}, {message.role=}")
- self.recv(message)
+ async def handle(self, message: Message) -> Message:
+ """Receive information and reply with actions"""
+ # logger.debug(f"{self.name=}, {self.profile=}, {message.role=}")
+ self.recv(message)
- return await self._react()
+ return await self._react()
-async def run(self, message=None):
- """Observe, and think and act based on the results of the observation"""
- if message:
- if isinstance(message, str):
- message = Message(message)
- if isinstance(message, Message):
- self.recv(message)
- if isinstance(message, list):
- self.recv(Message("\n".join(message)))
- elif not await self._observe():
- # If there is no new information, suspend and wait
- logger.debug(f"{self._setting}: no news. waiting.")
- return
+ async def run(self, message=None):
+ """Observe, and think and act based on the results of the observation"""
+ if message:
+ if isinstance(message, str):
+ message = Message(message)
+ if isinstance(message, Message):
+ self.recv(message)
+ if isinstance(message, list):
+ self.recv(Message("\n".join(message)))
+ elif not await self._observe():
+ # If there is no new information, suspend and wait
+ logger.debug(f"{self._setting}: no news. waiting.")
+ return
- rsp = await self._react()
- # Publish the reply to the environment, waiting for the next subscriber to process
- self._publish_message(rsp)
- return rsp
+ rsp = await self._react()
+ # Publish the reply to the environment, waiting for the next subscriber to process
+ self._publish_message(rsp)
+ return rsp