diff --git a/.devcontainer/README.md b/.devcontainer/README.md
index 5af5bfc90..dd088aab1 100644
--- a/.devcontainer/README.md
+++ b/.devcontainer/README.md
@@ -1,53 +1,39 @@
# Dev container
-This project includes a [dev container](https://containers.dev/), which lets you use a container as a full-featured dev
-environment.
+This project includes a [dev container](https://containers.dev/), which lets you use a container as a full-featured dev environment.
-You can use the dev container configuration in this folder to build and start running MetaGPT locally! For more, refer
-to the main README under the home directory.
-You can use it in [GitHub Codespaces](https://github.com/features/codespaces) or
-the [VS Code Dev Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers).
+You can use the dev container configuration in this folder to build and start running MetaGPT locally! For more, refer to the main README under the home directory.
+You can use it in [GitHub Codespaces](https://github.com/features/codespaces) or the [VS Code Dev Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers).
## GitHub Codespaces
-
You may use the button above to open this repo in a Codespace
-For more info, check out
-the [GitHub documentation](https://docs.github.com/en/free-pro-team@latest/github/developing-online-with-codespaces/creating-a-codespace#creating-a-codespace).
-
+For more info, check out the [GitHub documentation](https://docs.github.com/en/free-pro-team@latest/github/developing-online-with-codespaces/creating-a-codespace#creating-a-codespace).
+
## VS Code Dev Containers
-
-Note: If you click this link you will open the main repo and not your local cloned repo, you can use this link and
-replace with your username and cloned repo name:
+Note: If you click this link you will open the main repo and not your local cloned repo, you can use this link and replace with your username and cloned repo name:
https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/geekan/MetaGPT
-If you already have VS Code and Docker installed, you can use the button above to get started. This will cause VS Code
-to automatically install the Dev Containers extension if needed, clone the source code into a container volume, and spin
-up a dev container for use.
+
+If you already have VS Code and Docker installed, you can use the button above to get started. This will cause VS Code to automatically install the Dev Containers extension if needed, clone the source code into a container volume, and spin up a dev container for use.
You can also follow these steps to open this repo in a container using the VS Code Dev Containers extension:
-1. If this is your first time using a development container, please ensure your system meets the pre-reqs (i.e. have
- Docker installed) in the [getting started steps](https://aka.ms/vscode-remote/containers/getting-started).
+1. If this is your first time using a development container, please ensure your system meets the pre-reqs (i.e. have Docker installed) in the [getting started steps](https://aka.ms/vscode-remote/containers/getting-started).
2. Open a locally cloned copy of the code:
- - Fork and Clone this repository to your local filesystem.
- - Press F1 and select the **Dev Containers: Open Folder in Container...** command.
- - Select the cloned copy of this folder, wait for the container to start, and try things out!
+ - Fork and Clone this repository to your local filesystem.
+ - Press F1 and select the **Dev Containers: Open Folder in Container...** command.
+ - Select the cloned copy of this folder, wait for the container to start, and try things out!
You can learn more in the [Dev Containers documentation](https://code.visualstudio.com/docs/devcontainers/containers).
## Tips and tricks
-* If you are working with the same repository folder in a container and Windows, you'll want consistent line endings (
- otherwise you may see hundreds of changes in the SCM view). The `.gitattributes` file in the root of this repo will
- disable line ending conversion and should prevent this.
- See [tips and tricks](https://code.visualstudio.com/docs/devcontainers/tips-and-tricks#_resolving-git-line-ending-issues-in-containers-resulting-in-many-modified-files)
- for more info.
-* If you'd like to review the contents of the image used in this dev container, you can check it out in
- the [devcontainers/images](https://github.com/devcontainers/images/tree/main/src/python) repo.
+* If you are working with the same repository folder in a container and Windows, you'll want consistent line endings (otherwise you may see hundreds of changes in the SCM view). The `.gitattributes` file in the root of this repo will disable line ending conversion and should prevent this. See [tips and tricks](https://code.visualstudio.com/docs/devcontainers/tips-and-tricks#_resolving-git-line-ending-issues-in-containers-resulting-in-many-modified-files) for more info.
+* If you'd like to review the contents of the image used in this dev container, you can check it out in the [devcontainers/images](https://github.com/devcontainers/images/tree/main/src/python) repo.
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index 6ad3b598d..a774d0ed1 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -1,25 +1,27 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/python
{
- "name": "Python 3",
- // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
- "image": "mcr.microsoft.com/devcontainers/python:0-3.11",
- // Features to add to the dev container. More info: https://containers.dev/features.
- // "features": {},
+ "name": "Python 3",
+ // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
+ "image": "mcr.microsoft.com/devcontainers/python:0-3.11",
- // Configure tool-specific properties.
- "customizations": {
- // Configure properties specific to VS Code.
- "vscode": {
- "settings": {},
- "extensions": [
- "streetsidesoftware.code-spell-checker"
- ]
- }
- },
- // Use 'postCreateCommand' to run commands after the container is created.
- "postCreateCommand": "./.devcontainer/postCreateCommand.sh"
+ // Features to add to the dev container. More info: https://containers.dev/features.
+ // "features": {},
- // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
- // "remoteUser": "root"
+ // Configure tool-specific properties.
+ "customizations": {
+ // Configure properties specific to VS Code.
+ "vscode": {
+ "settings": {},
+ "extensions": [
+ "streetsidesoftware.code-spell-checker"
+ ]
+ }
+ },
+
+ // Use 'postCreateCommand' to run commands after the container is created.
+ "postCreateCommand": "./.devcontainer/postCreateCommand.sh"
+
+ // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
+ // "remoteUser": "root"
}
diff --git a/.devcontainer/docker-compose.yaml b/.devcontainer/docker-compose.yaml
index 2f0116bf8..a9988b1f3 100644
--- a/.devcontainer/docker-compose.yaml
+++ b/.devcontainer/docker-compose.yaml
@@ -5,10 +5,10 @@ services:
dockerfile: Dockerfile
context: ..
volumes:
- # Update this to wherever you want VS Code to mount the folder of your project
+ # Update this to wherever you want VS Code to mount the folder of your project
- ..:/workspaces:cached
networks:
- - metagpt-network
+ - metagpt-network
# environment:
# MONGO_ROOT_USERNAME: root
# MONGO_ROOT_PASSWORD: example123
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index db11ddbb5..b1892a709 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -8,11 +8,11 @@ repos:
rev: 5.11.5
hooks:
- id: isort
- args: [ '--profile', 'black' ]
+ args: ['--profile', 'black']
exclude: >-
- (?x)^(
- .*__init__\.py$
- )
+ (?x)^(
+ .*__init__\.py$
+ )
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
@@ -24,4 +24,4 @@ repos:
rev: 23.3.0
hooks:
- id: black
- args: [ '--line-length', '120' ]
\ No newline at end of file
+ args: ['--line-length', '120']
\ No newline at end of file
diff --git a/README.md b/README.md
index f6dd33791..4bc480a01 100644
--- a/README.md
+++ b/README.md
@@ -25,11 +25,9 @@ # MetaGPT: The Multi-Agent Framework
-1. MetaGPT takes a **one line requirement** as input and outputs **user stories / competitive analysis / requirements /
- data structures / APIs / documents, etc.**
-2. Internally, MetaGPT includes **product managers / architects / project managers / engineers.** It provides the entire
- process of a **software company along with carefully orchestrated SOPs.**
- 1. `Code = SOP(Team)` is the core philosophy. We materialize SOP and apply it to teams composed of LLMs.
+1. MetaGPT takes a **one line requirement** as input and outputs **user stories / competitive analysis / requirements / data structures / APIs / documents, etc.**
+2. Internally, MetaGPT includes **product managers / architects / project managers / engineers.** It provides the entire process of a **software company along with carefully orchestrated SOPs.**
+ 1. `Code = SOP(Team)` is the core philosophy. We materialize SOP and apply it to teams composed of LLMs.

@@ -37,17 +35,21 @@ # MetaGPT: The Multi-Agent Framework
## MetaGPT's Abilities
+
https://github.com/geekan/MetaGPT/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419
+
+
## Examples (fully generated by GPT-4)
-For example, if you type `python startup.py "Design a RecSys like Toutiao"`, you would get many outputs, one of them is
-data & api design
+For example, if you type `python startup.py "Design a RecSys like Toutiao"`, you would get many outputs, one of them is data & api design

-It costs approximately **$0.2** (in GPT-4 API fees) to generate one example with analysis and design, and around **$2.0
-** for a full project.
+It costs approximately **$0.2** (in GPT-4 API fees) to generate one example with analysis and design, and around **$2.0** for a full project.
+
+
+
## Installation
@@ -73,12 +75,10 @@ # Step 3: Clone the repository to your local machine, and install it.
**Note:**
-- If already have Chrome, Chromium, or MS Edge installed, you can skip downloading Chromium by setting the environment
- variable
+- If already have Chrome, Chromium, or MS Edge installed, you can skip downloading Chromium by setting the environment variable
`PUPPETEER_SKIP_CHROMIUM_DOWNLOAD` to `true`.
-- Some people are [having issues](https://github.com/mermaidjs/mermaid.cli/issues/15) installing this tool globally.
- Installing it locally is an alternative solution,
+- Some people are [having issues](https://github.com/mermaidjs/mermaid.cli/issues/15) installing this tool globally. Installing it locally is an alternative solution,
```bash
npm install @mermaid-js/mermaid-cli
@@ -91,75 +91,72 @@ # Step 3: Clone the repository to your local machine, and install it.
MMDC: "./node_modules/.bin/mmdc"
```
-- if `pip install -e.` fails with
- error `[Errno 13] Permission denied: '/usr/local/lib/python3.11/dist-packages/test-easy-install-13129.write-test'`,
- try instead running `pip install -e. --user`
+- if `pip install -e.` fails with error `[Errno 13] Permission denied: '/usr/local/lib/python3.11/dist-packages/test-easy-install-13129.write-test'`, try instead running `pip install -e. --user`
-- To convert Mermaid charts to SVG, PNG, and PDF formats. In addition to the Node.js version of Mermaid-CLI, you now
- have the option to use Python version Playwright, pyppeteer or mermaid.ink for this task.
+- To convert Mermaid charts to SVG, PNG, and PDF formats. In addition to the Node.js version of Mermaid-CLI, you now have the option to use Python version Playwright, pyppeteer or mermaid.ink for this task.
- - Playwright
- - **Install Playwright**
+ - Playwright
+ - **Install Playwright**
- ```bash
- pip install playwright
- ```
+ ```bash
+ pip install playwright
+ ```
- - **Install the Required Browsers**
+ - **Install the Required Browsers**
- to support PDF conversion, please install Chrominum.
+ to support PDF conversion, please install Chrominum.
- ```bash
- playwright install --with-deps chromium
- ```
+ ```bash
+ playwright install --with-deps chromium
+ ```
- - **modify `config.yaml`**
+ - **modify `config.yaml`**
- uncomment MERMAID_ENGINE from config.yaml and change it to `playwright`
+ uncomment MERMAID_ENGINE from config.yaml and change it to `playwright`
- ```yaml
- MERMAID_ENGINE: playwright
- ```
+ ```yaml
+ MERMAID_ENGINE: playwright
+ ```
- - pyppeteer
- - **Install pyppeteer**
+ - pyppeteer
+ - **Install pyppeteer**
- ```bash
- pip install pyppeteer
- ```
+ ```bash
+ pip install pyppeteer
+ ```
- - **Use your own Browsers**
+ - **Use your own Browsers**
- pyppeteer alow you use installed browsers, please set the following envirment
+ pyppeteer alow you use installed browsers, please set the following envirment
+
+ ```bash
+ export PUPPETEER_EXECUTABLE_PATH = /path/to/your/chromium or edge or chrome
+ ```
- ```bash
- export PUPPETEER_EXECUTABLE_PATH = /path/to/your/chromium or edge or chrome
- ```
+ please do not use this command to install browser, it is too old
- please do not use this command to install browser, it is too old
+ ```bash
+ pyppeteer-install
+ ```
- ```bash
- pyppeteer-install
- ```
+ - **modify `config.yaml`**
- - **modify `config.yaml`**
+ uncomment MERMAID_ENGINE from config.yaml and change it to `pyppeteer`
- uncomment MERMAID_ENGINE from config.yaml and change it to `pyppeteer`
+ ```yaml
+ MERMAID_ENGINE: pyppeteer
+ ```
- ```yaml
- MERMAID_ENGINE: pyppeteer
- ```
+ - mermaid.ink
+ - **modify `config.yaml`**
- - mermaid.ink
- - **modify `config.yaml`**
+ uncomment MERMAID_ENGINE from config.yaml and change it to `ink`
- uncomment MERMAID_ENGINE from config.yaml and change it to `ink`
+ ```yaml
+ MERMAID_ENGINE: ink
+ ```
- ```yaml
- MERMAID_ENGINE: ink
- ```
-
- Note: this method does not support pdf export.
+ Note: this method does not support pdf export.
### Installation by Docker
@@ -215,7 +212,7 @@ # Copy the configuration file and make the necessary modifications.
```
| Variable Name | config/key.yaml | env |
-|--------------------------------------------|-------------------------------------------|-------------------------------------------------|
+| ------------------------------------------ | ----------------------------------------- | ----------------------------------------------- |
| OPENAI_API_KEY # Replace with your own key | OPENAI_API_KEY: "sk-..." | export OPENAI_API_KEY="sk-..." |
| OPENAI_API_BASE # Optional | OPENAI_API_BASE: "https:///v1" | export OPENAI_API_BASE="https:///v1" |
@@ -289,13 +286,11 @@ ### Code walkthrough
## QuickStart
-It is difficult to install and configure the local environment for some users. The following tutorials will allow you to
-quickly experience the charm of MetaGPT.
+It is difficult to install and configure the local environment for some users. The following tutorials will allow you to quickly experience the charm of MetaGPT.
- [MetaGPT quickstart](https://deepwisdom.feishu.cn/wiki/CyY9wdJc4iNqArku3Lncl4v8n2b)
Try it on Huggingface Space
-
- https://huggingface.co/spaces/deepwisdom/MetaGPT
## Citation
@@ -315,12 +310,10 @@ ## Citation
## Contact Information
-If you have any questions or feedback about this project, please feel free to contact us. We highly appreciate your
-suggestions!
+If you have any questions or feedback about this project, please feel free to contact us. We highly appreciate your suggestions!
- **Email:** alexanderwu@fuzhi.ai
-- **GitHub Issues:** For more technical inquiries, you can also create a new issue in
- our [GitHub repository](https://github.com/geekan/metagpt/issues).
+- **GitHub Issues:** For more technical inquiries, you can also create a new issue in our [GitHub repository](https://github.com/geekan/metagpt/issues).
We will respond to all questions within 2-3 business days.
diff --git a/config/config.yaml b/config/config.yaml
index 583f25433..444f55efd 100644
--- a/config/config.yaml
+++ b/config/config.yaml
@@ -15,19 +15,6 @@ RPM: 10
#### if Anthropic
#Anthropic_API_KEY: "YOUR_API_KEY"
-#### if Xinghuo
-#XINGHUO_APPID : "YOUR_APPID"
-#XINGHUO_API_SECRET : "YOUR_APISecret"
-#XINGHUO_API_KEY : "YOUR_APIKey"
-#DOMAIN : "generalv2"
-#SPARK_URL : "ws://spark-api.xf-yun.com/v2.1/chat"
-
-XINGHUO_APPID: "ae5e30f4"
-XINGHUO_API_SECRET: "MDhlOWE2NmFhOWMxZWRkOTdlYjY2Njk1"
-XINGHUO_API_KEY: "97b635fe5927d34a857333e11d15f29f"
-DOMAIN: "generalv2"
-SPARK_URL: "ws://spark-api.xf-yun.com/v2.1/chat"
-
#### if AZURE, check https://github.com/openai/openai-cookbook/blob/main/examples/azure/chat.ipynb
#### You can use ENGINE or DEPLOYMENT mode
#OPENAI_API_TYPE: "azure"
diff --git a/config/puppeteer-config.json b/config/puppeteer-config.json
index da5d808d9..7b2851c29 100644
--- a/config/puppeteer-config.json
+++ b/config/puppeteer-config.json
@@ -1,6 +1,6 @@
{
- "executablePath": "/usr/bin/chromium",
- "args": [
- "--no-sandbox"
- ]
+ "executablePath": "/usr/bin/chromium",
+ "args": [
+ "--no-sandbox"
+ ]
}
\ No newline at end of file
diff --git a/docs/FAQ-EN.md b/docs/FAQ-EN.md
index fdd5d846b..4c86ed150 100644
--- a/docs/FAQ-EN.md
+++ b/docs/FAQ-EN.md
@@ -1,5 +1,4 @@
-Our vision is to [extend human life](https://github.com/geekan/HowToLiveLonger)
-and [reduce working hours](https://github.com/geekan/MetaGPT/).
+Our vision is to [extend human life](https://github.com/geekan/HowToLiveLonger) and [reduce working hours](https://github.com/geekan/MetaGPT/).
1. ### Convenient Link for Sharing this Document:
@@ -11,36 +10,30 @@
-1. Code:https://github.com/geekan/MetaGPT
+1. Code:https://github.com/geekan/MetaGPT
-1. Roadmap:https://github.com/geekan/MetaGPT/blob/main/docs/ROADMAP.md
+1. Roadmap:https://github.com/geekan/MetaGPT/blob/main/docs/ROADMAP.md
-1. EN
+1. EN
- 1. Demo Video: [MetaGPT: Multi-Agent AI Programming Framework](https://www.youtube.com/watch?v=8RNzxZBTW8M)
- 2.
- Tutorial: [MetaGPT: Deploy POWERFUL Autonomous Ai Agents BETTER Than SUPERAGI!](https://www.youtube.com/watch?v=q16Gi9pTG_M&t=659s)
+ 1. Demo Video: [MetaGPT: Multi-Agent AI Programming Framework](https://www.youtube.com/watch?v=8RNzxZBTW8M)
+ 2. Tutorial: [MetaGPT: Deploy POWERFUL Autonomous Ai Agents BETTER Than SUPERAGI!](https://www.youtube.com/watch?v=q16Gi9pTG_M&t=659s)
3. Author's thoughts video(EN): [MetaGPT Matthew Berman](https://youtu.be/uT75J_KG_aY?si=EgbfQNAwD8F5Y1Ak)
-1. CN
-
- 1. Demo
- Video: [MetaGPT:一行代码搭建你的虚拟公司_哔哩哔哩_bilibili](https://www.bilibili.com/video/BV1NP411C7GW/?spm_id_from=333.999.0.0&vd_source=735773c218b47da1b4bd1b98a33c5c77)
- 1.
- Tutorial: [一个提示词写游戏 Flappy bird, 比AutoGPT强10倍的MetaGPT,最接近AGI的AI项目](https://youtu.be/Bp95b8yIH5c)
- 2. Author's thoughts video(
- CN): [MetaGPT作者深度解析直播回放_哔哩哔哩_bilibili](https://www.bilibili.com/video/BV1Ru411V7XL/?spm_id_from=333.337.search-card.all.click)
+1. CN
+ 1. Demo Video: [MetaGPT:一行代码搭建你的虚拟公司_哔哩哔哩_bilibili](https://www.bilibili.com/video/BV1NP411C7GW/?spm_id_from=333.999.0.0&vd_source=735773c218b47da1b4bd1b98a33c5c77)
+ 1. Tutorial: [一个提示词写游戏 Flappy bird, 比AutoGPT强10倍的MetaGPT,最接近AGI的AI项目](https://youtu.be/Bp95b8yIH5c)
+ 2. Author's thoughts video(CN): [MetaGPT作者深度解析直播回放_哔哩哔哩_bilibili](https://www.bilibili.com/video/BV1Ru411V7XL/?spm_id_from=333.337.search-card.all.click)
+
3. ### How to become a contributor?
-1. Choose a task from the Roadmap (or you can propose one). By submitting a PR, you can become a contributor and join
- the dev team.
-1. Current contributors come from backgrounds including: ByteDance AI Lab/DingDong/Didi/Xiaohongshu,
- Tencent/Baidu/MSRA/TikTok/BloomGPT Infra/Bilibili/CUHK/HKUST/CMU/UCB
+1. Choose a task from the Roadmap (or you can propose one). By submitting a PR, you can become a contributor and join the dev team.
+1. Current contributors come from backgrounds including: ByteDance AI Lab/DingDong/Didi/Xiaohongshu, Tencent/Baidu/MSRA/TikTok/BloomGPT Infra/Bilibili/CUHK/HKUST/CMU/UCB
@@ -48,13 +41,11 @@
MetaGPT Community - The position of Chief Evangelist rotates on a monthly basis. The primary responsibilities include:
-1. Maintaining community FAQ documents, announcements, Github resources/READMEs.
-1. Responding to, answering, and distributing community questions within an average of 30 minutes, including on
- platforms like Github Issues, Discord and WeChat.
-1. Upholding a community atmosphere that is enthusiastic, genuine, and friendly.
-1. Encouraging everyone to become contributors and participate in projects that are closely related to achieving AGI (
- Artificial General Intelligence).
-1. (Optional) Organizing small-scale events, such as hackathons.
+1. Maintaining community FAQ documents, announcements, Github resources/READMEs.
+1. Responding to, answering, and distributing community questions within an average of 30 minutes, including on platforms like Github Issues, Discord and WeChat.
+1. Upholding a community atmosphere that is enthusiastic, genuine, and friendly.
+1. Encouraging everyone to become contributors and participate in projects that are closely related to achieving AGI (Artificial General Intelligence).
+1. (Optional) Organizing small-scale events, such as hackathons.
@@ -62,165 +53,131 @@
-1. Experience with the generated repo code:
+1. Experience with the generated repo code:
- 1. https://github.com/geekan/MetaGPT/releases/tag/v0.1.0
+ 1. https://github.com/geekan/MetaGPT/releases/tag/v0.1.0
-1. Code truncation/ Parsing failure:
+1. Code truncation/ Parsing failure:
- 1. Check if it's due to exceeding length. Consider using the gpt-3.5-turbo-16k or other long token versions.
+ 1. Check if it's due to exceeding length. Consider using the gpt-3.5-turbo-16k or other long token versions.
-1. Success rate:
+1. Success rate:
- 1. There hasn't been a quantitative analysis yet, but the success rate of code generated by GPT-4 is significantly
- higher than that of gpt-3.5-turbo.
+ 1. There hasn't been a quantitative analysis yet, but the success rate of code generated by GPT-4 is significantly higher than that of gpt-3.5-turbo.
-1. Support for incremental, differential updates (if you wish to continue a half-done task):
+1. Support for incremental, differential updates (if you wish to continue a half-done task):
- 1. Several prerequisite tasks are listed on the ROADMAP.
+ 1. Several prerequisite tasks are listed on the ROADMAP.
-1. Can existing code be loaded?
+1. Can existing code be loaded?
- 1. It's not on the ROADMAP yet, but there are plans in place. It just requires some time.
+ 1. It's not on the ROADMAP yet, but there are plans in place. It just requires some time.
-1. Support for multiple programming languages and natural languages?
+1. Support for multiple programming languages and natural languages?
- 1. It's listed on ROADMAP.
+ 1. It's listed on ROADMAP.
-1. Want to join the contributor team? How to proceed?
+1. Want to join the contributor team? How to proceed?
- 1. Merging a PR will get you into the contributor's team. The main ongoing tasks are all listed on the ROADMAP.
+ 1. Merging a PR will get you into the contributor's team. The main ongoing tasks are all listed on the ROADMAP.
-1. PRD stuck / unable to access/ connection interrupted
+1. PRD stuck / unable to access/ connection interrupted
- 1. The official OPENAI_API_BASE address is `https://api.openai.com/v1`
- 1. If the official OPENAI_API_BASE address is inaccessible in your environment (this can be verified with curl),
- it's recommended to configure using the reverse proxy OPENAI_API_BASE provided by libraries such as
- openai-forward. For instance, `OPENAI_API_BASE: "``https://api.openai-forward.com/v1``"`
- 1. If the official OPENAI_API_BASE address is inaccessible in your environment (again, verifiable via curl), another
- option is to configure the OPENAI_PROXY parameter. This way, you can access the official OPENAI_API_BASE via a
- local proxy. If you don't need to access via a proxy, please do not enable this configuration; if accessing
- through a proxy is required, modify it to the correct proxy address. Note that when OPENAI_PROXY is enabled,
- don't set OPENAI_API_BASE.
- 1. Note: OpenAI's default API design ends with a v1. An example of the correct configuration
- is: `OPENAI_API_BASE: "``https://api.openai.com/v1``"`
+ 1. The official OPENAI_API_BASE address is `https://api.openai.com/v1`
+ 1. If the official OPENAI_API_BASE address is inaccessible in your environment (this can be verified with curl), it's recommended to configure using the reverse proxy OPENAI_API_BASE provided by libraries such as openai-forward. For instance, `OPENAI_API_BASE: "``https://api.openai-forward.com/v1``"`
+ 1. If the official OPENAI_API_BASE address is inaccessible in your environment (again, verifiable via curl), another option is to configure the OPENAI_PROXY parameter. This way, you can access the official OPENAI_API_BASE via a local proxy. If you don't need to access via a proxy, please do not enable this configuration; if accessing through a proxy is required, modify it to the correct proxy address. Note that when OPENAI_PROXY is enabled, don't set OPENAI_API_BASE.
+ 1. Note: OpenAI's default API design ends with a v1. An example of the correct configuration is: `OPENAI_API_BASE: "``https://api.openai.com/v1``"`
-1. Absolutely! How can I assist you today?
+1. Absolutely! How can I assist you today?
- 1. Did you use Chi or a similar service? These services are prone to errors, and it seems that the error rate is
- higher when consuming 3.5k-4k tokens in GPT-4
+ 1. Did you use Chi or a similar service? These services are prone to errors, and it seems that the error rate is higher when consuming 3.5k-4k tokens in GPT-4
-1. What does Max token mean?
+1. What does Max token mean?
- 1. It's a configuration for OpenAI's maximum response length. If the response exceeds the max token, it will be
- truncated.
+ 1. It's a configuration for OpenAI's maximum response length. If the response exceeds the max token, it will be truncated.
-1. How to change the investment amount?
+1. How to change the investment amount?
- 1. You can view all commands by typing `python startup.py --help`
+ 1. You can view all commands by typing `python startup.py --help`
-1. Which version of Python is more stable?
+1. Which version of Python is more stable?
- 1. python3.9 / python3.10
+ 1. python3.9 / python3.10
-1. Can't use GPT-4, getting the error "The model gpt-4 does not exist."
+1. Can't use GPT-4, getting the error "The model gpt-4 does not exist."
- 1. OpenAI's official requirement: You can use GPT-4 only after spending $1 on OpenAI.
- 1. Tip: Run some data with gpt-3.5-turbo (consume the free quota and $1), and then you should be able to use gpt-4.
+ 1. OpenAI's official requirement: You can use GPT-4 only after spending $1 on OpenAI.
+ 1. Tip: Run some data with gpt-3.5-turbo (consume the free quota and $1), and then you should be able to use gpt-4.
-1. Can games whose code has never been seen before be written?
+1. Can games whose code has never been seen before be written?
- 1. Refer to the README. The recommendation system of Toutiao is one of the most complex systems in the world
- currently. Although it's not on GitHub, many discussions about it exist online. If it can visualize these, it
- suggests it can also summarize these discussions and convert them into code. The prompt would be something like "
- write a recommendation system similar to Toutiao". Note: this was approached in earlier versions of the software.
- The SOP of those versions was different; the current one adopts Elon Musk's five-step work method, emphasizing
- trimming down requirements as much as possible.
+ 1. Refer to the README. The recommendation system of Toutiao is one of the most complex systems in the world currently. Although it's not on GitHub, many discussions about it exist online. If it can visualize these, it suggests it can also summarize these discussions and convert them into code. The prompt would be something like "write a recommendation system similar to Toutiao". Note: this was approached in earlier versions of the software. The SOP of those versions was different; the current one adopts Elon Musk's five-step work method, emphasizing trimming down requirements as much as possible.
-1. Under what circumstances would there typically be errors?
+1. Under what circumstances would there typically be errors?
- 1. More than 500 lines of code: some function implementations may be left blank.
- 1. When using a database, it often gets the implementation wrong — since the SQL database initialization process is
- usually not in the code.
- 1. With more lines of code, there's a higher chance of false impressions, leading to calls to non-existent APIs.
+ 1. More than 500 lines of code: some function implementations may be left blank.
+ 1. When using a database, it often gets the implementation wrong — since the SQL database initialization process is usually not in the code.
+ 1. With more lines of code, there's a higher chance of false impressions, leading to calls to non-existent APIs.
-1. Instructions for using SD Skills/UI Role:
+1. Instructions for using SD Skills/UI Role:
- 1. Currently, there is a test script located in /tests/metagpt/roles. The file ui_role provides the corresponding
- code implementation. For testing, you can refer to the test_ui in the same directory.
+ 1. Currently, there is a test script located in /tests/metagpt/roles. The file ui_role provides the corresponding code implementation. For testing, you can refer to the test_ui in the same directory.
- 1. The UI role takes over from the product manager role, extending the output from the 【UI Design draft】 provided by
- the product manager role. The UI role has implemented the UIDesign Action. Within the run of UIDesign, it
- processes the respective context, and based on the set template, outputs the UI. The output from the UI role
- includes:
+ 1. The UI role takes over from the product manager role, extending the output from the 【UI Design draft】 provided by the product manager role. The UI role has implemented the UIDesign Action. Within the run of UIDesign, it processes the respective context, and based on the set template, outputs the UI. The output from the UI role includes:
- 1. UI Design Description:Describes the content to be designed and the design objectives.
- 1. Selected Elements:Describes the elements in the design that need to be illustrated.
- 1. HTML Layout:Outputs the HTML code for the page.
- 1. CSS Styles (styles.css):Outputs the CSS code for the page.
+ 1. UI Design Description:Describes the content to be designed and the design objectives.
+ 1. Selected Elements:Describes the elements in the design that need to be illustrated.
+ 1. HTML Layout:Outputs the HTML code for the page.
+ 1. CSS Styles (styles.css):Outputs the CSS code for the page.
- 1. Currently, the SD skill is a tool invoked by UIDesign. It instantiates the SDEngine, with specific code found in
- metagpt/tools/sd_engine.
+ 1. Currently, the SD skill is a tool invoked by UIDesign. It instantiates the SDEngine, with specific code found in metagpt/tools/sd_engine.
- 1. Configuration instructions for SD Skills: The SD interface is currently deployed based on
- *https://github.com/AUTOMATIC1111/stable-diffusion-webui* **For environmental configurations and model downloads,
- please refer to the aforementioned GitHub repository. To initiate the SD service that supports API calls, run the
- command specified in cmd with the parameter nowebui, i.e.,
+ 1. Configuration instructions for SD Skills: The SD interface is currently deployed based on *https://github.com/AUTOMATIC1111/stable-diffusion-webui* **For environmental configurations and model downloads, please refer to the aforementioned GitHub repository. To initiate the SD service that supports API calls, run the command specified in cmd with the parameter nowebui, i.e.,
1. > python webui.py --enable-insecure-extension-access --port xxx --no-gradio-queue --nowebui
- 1. Once it runs without errors, the interface will be accessible after approximately 1 minute when the model
- finishes loading.
- 1. Configure SD_URL and SD_T2I_API in the config.yaml/key.yaml files.
- 1. 
- 1. SD_URL is the deployed server/machine IP, and Port is the specified port above, defaulting to 7860.
+ 1. Once it runs without errors, the interface will be accessible after approximately 1 minute when the model finishes loading.
+ 1. Configure SD_URL and SD_T2I_API in the config.yaml/key.yaml files.
+ 1. 
+ 1. SD_URL is the deployed server/machine IP, and Port is the specified port above, defaulting to 7860.
1. > SD_URL: IP:Port
-1. An error occurred during installation: "Another program is using this file...egg".
+1. An error occurred during installation: "Another program is using this file...egg".
- 1. Delete the file and try again.
- 1. Or manually execute`pip install -r requirements.txt`
+ 1. Delete the file and try again.
+ 1. Or manually execute`pip install -r requirements.txt`
-1. The origin of the name MetaGPT?
+1. The origin of the name MetaGPT?
- 1. The name was derived after iterating with GPT-4 over a dozen rounds. GPT-4 scored and suggested it.
+ 1. The name was derived after iterating with GPT-4 over a dozen rounds. GPT-4 scored and suggested it.
-1. Is there a more step-by-step installation tutorial?
+1. Is there a more step-by-step installation tutorial?
- 1.
- Youtube(CN):[一个提示词写游戏 Flappy bird, 比AutoGPT强10倍的MetaGPT,最接近AGI的AI项目=一个软件公司产品经理+程序员](https://youtu.be/Bp95b8yIH5c)
- 1. Youtube(EN)https://www.youtube.com/watch?v=q16Gi9pTG_M&t=659s
- 2. video(EN): [MetaGPT Matthew Berman](https://youtu.be/uT75J_KG_aY?si=EgbfQNAwD8F5Y1Ak)
+ 1. Youtube(CN):[一个提示词写游戏 Flappy bird, 比AutoGPT强10倍的MetaGPT,最接近AGI的AI项目=一个软件公司产品经理+程序员](https://youtu.be/Bp95b8yIH5c)
+ 1. Youtube(EN)https://www.youtube.com/watch?v=q16Gi9pTG_M&t=659s
+ 2. video(EN): [MetaGPT Matthew Berman](https://youtu.be/uT75J_KG_aY?si=EgbfQNAwD8F5Y1Ak)
-1. openai.error.RateLimitError: You exceeded your current quota, please check your plan and billing details
+1. openai.error.RateLimitError: You exceeded your current quota, please check your plan and billing details
- 1. If you haven't exhausted your free quota, set RPM to 3 or lower in the settings.
- 1. If your free quota is used up, consider adding funds to your account.
+ 1. If you haven't exhausted your free quota, set RPM to 3 or lower in the settings.
+ 1. If your free quota is used up, consider adding funds to your account.
-1. What does "borg" mean in n_borg?
+1. What does "borg" mean in n_borg?
- 1. [Wikipedia borg meaning ](https://en.wikipedia.org/wiki/Borg)
- 1. The Borg civilization operates based on a hive or collective mentality, known as "the Collective." Every Borg
- individual is connected to the collective via a sophisticated subspace network, ensuring continuous oversight and
- guidance for every member. This collective consciousness allows them to not only "share the same thoughts" but
- also to adapt swiftly to new strategies. While individual members of the collective rarely communicate, the
- collective "voice" sometimes transmits aboard ships.
+ 1. [Wikipedia borg meaning ](https://en.wikipedia.org/wiki/Borg)
+ 1. The Borg civilization operates based on a hive or collective mentality, known as "the Collective." Every Borg individual is connected to the collective via a sophisticated subspace network, ensuring continuous oversight and guidance for every member. This collective consciousness allows them to not only "share the same thoughts" but also to adapt swiftly to new strategies. While individual members of the collective rarely communicate, the collective "voice" sometimes transmits aboard ships.
-1. How to use the Claude API?
+1. How to use the Claude API?
- 1. The full implementation of the Claude API is not provided in the current code.
- 1. You can use the Claude API through third-party API conversion projects
- like: https://github.com/jtsang4/claude-to-chatgpt
+ 1. The full implementation of the Claude API is not provided in the current code.
+ 1. You can use the Claude API through third-party API conversion projects like: https://github.com/jtsang4/claude-to-chatgpt
-1. Is Llama2 supported?
+1. Is Llama2 supported?
- 1. On the day Llama2 was released, some of the community members began experiments and found that output can be
- generated based on MetaGPT's structure. However, Llama2's context is too short to generate a complete project.
- Before regularly using Llama2, it's necessary to expand the context window to at least 8k. If anyone has good
- recommendations for expansion models or methods, please leave a comment.
+ 1. On the day Llama2 was released, some of the community members began experiments and found that output can be generated based on MetaGPT's structure. However, Llama2's context is too short to generate a complete project. Before regularly using Llama2, it's necessary to expand the context window to at least 8k. If anyone has good recommendations for expansion models or methods, please leave a comment.
-1. `mermaid-cli getElementsByTagName SyntaxError: Unexpected token '.'`
+1. `mermaid-cli getElementsByTagName SyntaxError: Unexpected token '.'`
- 1. Upgrade node to version 14.x or above:
+ 1. Upgrade node to version 14.x or above:
- 1. `npm install -g n`
- 1. `n stable` to install the stable version of node(v18.x)
+ 1. `npm install -g n`
+ 1. `n stable` to install the stable version of node(v18.x)
diff --git a/docs/README_CN.md b/docs/README_CN.md
index cc4dfee34..308d6a131 100644
--- a/docs/README_CN.md
+++ b/docs/README_CN.md
@@ -27,7 +27,7 @@ # MetaGPT: 多智能体框架
1. MetaGPT输入**一句话的老板需求**,输出**用户故事 / 竞品分析 / 需求 / 数据结构 / APIs / 文件等**
2. MetaGPT内部包括**产品经理 / 架构师 / 项目经理 / 工程师**,它提供了一个**软件公司**的全过程与精心调配的SOP
- 1. `Code = SOP(Team)` 是核心哲学。我们将SOP具象化,并且用于LLM构成的团队
+ 1. `Code = SOP(Team)` 是核心哲学。我们将SOP具象化,并且用于LLM构成的团队

@@ -37,6 +37,7 @@ ## MetaGPT 的能力
https://github.com/geekan/MetaGPT/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419
+
## 示例(均由 GPT-4 生成)
例如,键入`python startup.py "写个类似今日头条的推荐系统"`并回车,你会获得一系列输出,其一是数据结构与API设计
@@ -80,9 +81,7 @@ # 第 3 步:克隆仓库到您的本地机器,并进行安装。
MMDC: "./node_modules/.bin/mmdc"
```
-- 如果`pip install -e.`
- 失败并显示错误`[Errno 13] Permission denied: '/usr/local/lib/python3.11/dist-packages/test-easy-install-13129.write-test'`
- ,请尝试使用`pip install -e. --user`运行。
+- 如果`pip install -e.`失败并显示错误`[Errno 13] Permission denied: '/usr/local/lib/python3.11/dist-packages/test-easy-install-13129.write-test'`,请尝试使用`pip install -e. --user`运行。
### Docker安装
@@ -137,10 +136,10 @@ # 复制配置文件并进行必要的修改
cp config/config.yaml config/key.yaml
```
-| 变量名 | config/key.yaml | env |
-|----------------------------|-------------------------------------------|-------------------------------------------------|
+| 变量名 | config/key.yaml | env |
+| ----------------------------------- | ----------------------------------------- | ----------------------------------------------- |
| OPENAI_API_KEY # 用您自己的密钥替换 | OPENAI_API_KEY: "sk-..." | export OPENAI_API_KEY="sk-..." |
-| OPENAI_API_BASE # 可选 | OPENAI_API_BASE: "https:///v1" | export OPENAI_API_BASE="https:///v1" |
+| OPENAI_API_BASE # 可选 | OPENAI_API_BASE: "https:///v1" | export OPENAI_API_BASE="https:///v1" |
## 示例:启动一个创业公司
@@ -151,12 +150,9 @@ # 开启code review模式会花费更多的金钱, 但是会提升代码质量
```
运行脚本后,您可以在 `workspace/` 目录中找到您的新项目。
-
### 平台或工具的倾向性
-
可以在阐述需求时说明想要使用的平台或工具。
例如:
-
```shell
python startup.py "写一个基于pygame的命令行贪吃蛇"
```
@@ -209,8 +205,7 @@ ### 代码实现
你可以查看`examples`,其中有单角色(带知识库)的使用例子与仅LLM的使用例子。
## 快速体验
-
-对一些用户来说,安装配置本地环境是有困难的,下面这些教程能够让你快速体验到MetaGPT的魅力。
+对一些用户来说,安装配置本地环境是有困难的,下面这些教程能够让你快速体验到MetaGPT的魅力。
- [MetaGPT快速体验](https://deepwisdom.feishu.cn/wiki/Q8ycw6J9tiNXdHk66MRcIN8Pnlg)
@@ -223,8 +218,7 @@ ## 联系信息
如果您对这个项目有任何问题或反馈,欢迎联系我们。我们非常欢迎您的建议!
- **邮箱:** alexanderwu@fuzhi.ai
-- **GitHub 问题:** 对于更技术性的问题,您也可以在我们的 [GitHub 仓库](https://github.com/geekan/metagpt/issues)
- 中创建一个新的问题。
+- **GitHub 问题:** 对于更技术性的问题,您也可以在我们的 [GitHub 仓库](https://github.com/geekan/metagpt/issues) 中创建一个新的问题。
我们会在2-3个工作日内回复所有问题。
diff --git a/docs/README_JA.md b/docs/README_JA.md
index 040a852f6..6ffc80ac7 100644
--- a/docs/README_JA.md
+++ b/docs/README_JA.md
@@ -26,9 +26,8 @@ # MetaGPT: マルチエージェントフレームワーク
1. MetaGPT は、**1 行の要件** を入力とし、**ユーザーストーリー / 競合分析 / 要件 / データ構造 / API / 文書など** を出力します。
-2. MetaGPT には、**プロダクト マネージャー、アーキテクト、プロジェクト マネージャー、エンジニア** が含まれています。MetaGPT
- は、**ソフトウェア会社のプロセス全体を、慎重に調整された SOP とともに提供します。**
- 1. `Code = SOP(Team)` が基本理念です。私たちは SOP を具体化し、LLM で構成されるチームに適用します。
+2. MetaGPT には、**プロダクト マネージャー、アーキテクト、プロジェクト マネージャー、エンジニア** が含まれています。MetaGPT は、**ソフトウェア会社のプロセス全体を、慎重に調整された SOP とともに提供します。**
+ 1. `Code = SOP(Team)` が基本理念です。私たちは SOP を具体化し、LLM で構成されるチームに適用します。

@@ -38,6 +37,7 @@ ## MetaGPTの能力
https://github.com/geekan/MetaGPT/assets/34952977/34345016-5d13-489d-b9f9-b82ace413419
+
## 例(GPT-4 で完全生成)
例えば、`python startup.py "Toutiao のような RecSys をデザインする"`と入力すると、多くの出力が得られます
@@ -70,12 +70,10 @@ # ステップ 3: リポジトリをローカルマシンにクローンし、
**注:**
-- すでに Chrome、Chromium、MS Edge がインストールされている場合は、環境変数 `PUPPETEER_SKIP_CHROMIUM_DOWNLOAD` を `true`
- に設定することで、
- Chromium のダウンロードをスキップすることができます。
+- すでに Chrome、Chromium、MS Edge がインストールされている場合は、環境変数 `PUPPETEER_SKIP_CHROMIUM_DOWNLOAD` を `true` に設定することで、
+Chromium のダウンロードをスキップすることができます。
-- このツールをグローバルにインストールする[問題を抱えている](https://github.com/mermaidjs/mermaid.cli/issues/15)
- 人もいます。ローカルにインストールするのが代替の解決策です、
+- このツールをグローバルにインストールする[問題を抱えている](https://github.com/mermaidjs/mermaid.cli/issues/15)人もいます。ローカルにインストールするのが代替の解決策です、
```bash
npm install @mermaid-js/mermaid-cli
@@ -88,9 +86,7 @@ # ステップ 3: リポジトリをローカルマシンにクローンし、
MMDC: "./node_modules/.bin/mmdc"
```
-- もし `pip install -e.`
- がエラー `[Errno 13] Permission denied: '/usr/local/lib/python3.11/dist-packages/test-easy-install-13129.write-test'`
- で失敗したら、代わりに `pip install -e. --user` を実行してみてください
+- もし `pip install -e.` がエラー `[Errno 13] Permission denied: '/usr/local/lib/python3.11/dist-packages/test-easy-install-13129.write-test'` で失敗したら、代わりに `pip install -e. --user` を実行してみてください
### Docker によるインストール
@@ -145,10 +141,10 @@ # 設定ファイルをコピーし、必要な修正を加える。
cp config/config.yaml config/key.yaml
```
-| 変数名 | config/key.yaml | env |
-|------------------------------|-------------------------------------------|-------------------------------------------------|
+| 変数名 | config/key.yaml | env |
+| --------------------------------------- | ----------------------------------------- | ----------------------------------------------- |
| OPENAI_API_KEY # 自分のキーに置き換える | OPENAI_API_KEY: "sk-..." | export OPENAI_API_KEY="sk-..." |
-| OPENAI_API_BASE # オプション | OPENAI_API_BASE: "https:///v1" | export OPENAI_API_BASE="https:///v1" |
+| OPENAI_API_BASE # オプション | OPENAI_API_BASE: "https:///v1" | export OPENAI_API_BASE="https:///v1" |
## チュートリアル: スタートアップの開始
@@ -225,7 +221,6 @@ ## クイックスタート
- [MetaGPT クイックスタート](https://deepwisdom.feishu.cn/wiki/CyY9wdJc4iNqArku3Lncl4v8n2b)
Hugging Face Space で試す
-
- https://huggingface.co/spaces/deepwisdom/MetaGPT
## 引用
@@ -248,8 +243,7 @@ ## お問い合わせ先
このプロジェクトに関するご質問やご意見がございましたら、お気軽にお問い合わせください。皆様のご意見をお待ちしております!
- **Email:** alexanderwu@fuzhi.ai
-- **GitHub Issues:** 技術的なお問い合わせについては、[GitHub リポジトリ](https://github.com/geekan/metagpt/issues) に新しい
- issue を作成することもできます。
+- **GitHub Issues:** 技術的なお問い合わせについては、[GitHub リポジトリ](https://github.com/geekan/metagpt/issues) に新しい issue を作成することもできます。
ご質問には 2-3 営業日以内に回答いたします。
diff --git a/docs/ROADMAP.md b/docs/ROADMAP.md
index a205157c5..005a59ab2 100644
--- a/docs/ROADMAP.md
+++ b/docs/ROADMAP.md
@@ -1,3 +1,4 @@
+
## Roadmap
### Long-term Objective
@@ -15,69 +16,67 @@ ### Tasks
To reach version v0.5, approximately 70% of the following tasks need to be completed.
1. Usability
- 1. Release v0.01 pip package to try to solve issues like npm installation (though not necessarily successfully)
- 2. Support for overall save and recovery of software companies
- 3. Support human confirmation and modification during the process
- 4. Support process caching: Consider carefully whether to add server caching mechanism
- 5. Resolve occasional failure to follow instruction under current prompts, causing code parsing errors, through
- stricter system prompts
- 6. Write documentation, describing the current features and usage at all levels
- 7. ~~Support Docker~~
+ 1. Release v0.01 pip package to try to solve issues like npm installation (though not necessarily successfully)
+ 2. Support for overall save and recovery of software companies
+ 3. Support human confirmation and modification during the process
+ 4. Support process caching: Consider carefully whether to add server caching mechanism
+ 5. Resolve occasional failure to follow instruction under current prompts, causing code parsing errors, through stricter system prompts
+ 6. Write documentation, describing the current features and usage at all levels
+ 7. ~~Support Docker~~
2. Features
- 1. Support a more standard and stable parser (need to analyze the format that the current LLM is better at)
- 2. ~~Establish a separate output queue, differentiated from the message queue~~
- 3. Attempt to atomize all role work, but this may significantly increase token overhead
- 4. Complete the design and implementation of module breakdown
- 5. Support various modes of memory: clearly distinguish between long-term and short-term memory
- 6. Perfect the test role, and carry out necessary interactions with humans
- 7. Provide full mode instead of the current fast mode, allowing natural communication between roles
- 8. Implement SkillManager and the process of incremental Skill learning
- 9. Automatically get RPM and configure it by calling the corresponding openai page, so that each key does not need
- to be manually configured
+ 1. Support a more standard and stable parser (need to analyze the format that the current LLM is better at)
+ 2. ~~Establish a separate output queue, differentiated from the message queue~~
+ 3. Attempt to atomize all role work, but this may significantly increase token overhead
+ 4. Complete the design and implementation of module breakdown
+ 5. Support various modes of memory: clearly distinguish between long-term and short-term memory
+ 6. Perfect the test role, and carry out necessary interactions with humans
+ 7. Provide full mode instead of the current fast mode, allowing natural communication between roles
+ 8. Implement SkillManager and the process of incremental Skill learning
+ 9. Automatically get RPM and configure it by calling the corresponding openai page, so that each key does not need to be manually configured
3. Strategies
- 1. Support ReAct strategy
- 2. Support CoT strategy
- 3. Support ToT strategy
- 4. Support Reflection strategy
+ 1. Support ReAct strategy
+ 2. Support CoT strategy
+ 3. Support ToT strategy
+ 4. Support Reflection strategy
4. Actions
- 1. Implementation: Search
- 2. Implementation: Knowledge search, supporting 10+ data formats
- 3. Implementation: Data EDA
- 4. Implementation: Review
- 5. Implementation: Add Document
- 6. Implementation: Delete Document
- 7. Implementation: Self-training
- 8. Implementation: DebugError
- 9. Implementation: Generate reliable unit tests based on YAPI
- 10. Implementation: Self-evaluation
- 11. Implementation: AI Invocation
- 12. Implementation: Learning and using third-party standard libraries
- 13. Implementation: Data collection
- 14. Implementation: AI training
- 15. Implementation: Run code
- 16. Implementation: Web access
+ 1. Implementation: Search
+ 2. Implementation: Knowledge search, supporting 10+ data formats
+ 3. Implementation: Data EDA
+ 4. Implementation: Review
+ 5. Implementation: Add Document
+ 6. Implementation: Delete Document
+ 7. Implementation: Self-training
+ 8. Implementation: DebugError
+ 9. Implementation: Generate reliable unit tests based on YAPI
+ 10. Implementation: Self-evaluation
+ 11. Implementation: AI Invocation
+ 12. Implementation: Learning and using third-party standard libraries
+ 13. Implementation: Data collection
+ 14. Implementation: AI training
+ 15. Implementation: Run code
+ 16. Implementation: Web access
5. Plugins: Compatibility with plugin system
6. Tools
- 1. ~~Support SERPER api~~
- 2. ~~Support Selenium apis~~
- 3. ~~Support Playwright apis~~
+ 1. ~~Support SERPER api~~
+ 2. ~~Support Selenium apis~~
+ 3. ~~Support Playwright apis~~
7. Roles
- 1. Perfect the action pool/skill pool for each role
- 2. Red Book blogger
- 3. E-commerce seller
- 4. Data analyst
- 5. News observer
- 6. Institutional researcher
+ 1. Perfect the action pool/skill pool for each role
+ 2. Red Book blogger
+ 3. E-commerce seller
+ 4. Data analyst
+ 5. News observer
+ 6. Institutional researcher
8. Evaluation
- 1. Support an evaluation on a game dataset
- 2. Reproduce papers, implement full skill acquisition for a single game role, achieving SOTA results
- 3. Support an evaluation on a math dataset
- 4. Reproduce papers, achieving SOTA results for current mathematical problem solving process
+ 1. Support an evaluation on a game dataset
+ 2. Reproduce papers, implement full skill acquisition for a single game role, achieving SOTA results
+ 3. Support an evaluation on a math dataset
+ 4. Reproduce papers, achieving SOTA results for current mathematical problem solving process
9. LLM
- 1. Support Claude underlying API
- 2. ~~Support Azure asynchronous API~~
- 3. Support streaming version of all APIs
- 4. ~~Make gpt-3.5-turbo available (HARD)~~
+ 1. Support Claude underlying API
+ 2. ~~Support Azure asynchronous API~~
+ 3. Support streaming version of all APIs
+ 4. ~~Make gpt-3.5-turbo available (HARD)~~
10. Other
1. Clean up existing unused code
2. Unify all code styles and establish contribution standards
diff --git a/docs/resources/workspace/content_rec_sys/resources/competitive_analysis.svg b/docs/resources/workspace/content_rec_sys/resources/competitive_analysis.svg
index c186e4ce5..785fdafcb 100644
--- a/docs/resources/workspace/content_rec_sys/resources/competitive_analysis.svg
+++ b/docs/resources/workspace/content_rec_sys/resources/competitive_analysis.svg
@@ -1,128 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/content_rec_sys/resources/data_api_design.svg b/docs/resources/workspace/content_rec_sys/resources/data_api_design.svg
index f9bf692df..a39c84375 100644
--- a/docs/resources/workspace/content_rec_sys/resources/data_api_design.svg
+++ b/docs/resources/workspace/content_rec_sys/resources/data_api_design.svg
@@ -1,1020 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/content_rec_sys/resources/seq_flow.svg b/docs/resources/workspace/content_rec_sys/resources/seq_flow.svg
index e73110109..d73482917 100644
--- a/docs/resources/workspace/content_rec_sys/resources/seq_flow.svg
+++ b/docs/resources/workspace/content_rec_sys/resources/seq_flow.svg
@@ -1,342 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/llmops_framework/resources/competitive_analysis.svg b/docs/resources/workspace/llmops_framework/resources/competitive_analysis.svg
index 0b3ac4860..541df8d18 100644
--- a/docs/resources/workspace/llmops_framework/resources/competitive_analysis.svg
+++ b/docs/resources/workspace/llmops_framework/resources/competitive_analysis.svg
@@ -1,128 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/llmops_framework/resources/data_api_design.svg b/docs/resources/workspace/llmops_framework/resources/data_api_design.svg
index 6423bb4c3..244af9965 100644
--- a/docs/resources/workspace/llmops_framework/resources/data_api_design.svg
+++ b/docs/resources/workspace/llmops_framework/resources/data_api_design.svg
@@ -1,631 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/llmops_framework/resources/seq_flow.svg b/docs/resources/workspace/llmops_framework/resources/seq_flow.svg
index 2519bd2ff..02a826df8 100644
--- a/docs/resources/workspace/llmops_framework/resources/seq_flow.svg
+++ b/docs/resources/workspace/llmops_framework/resources/seq_flow.svg
@@ -1,323 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/match3_puzzle_game/resources/competitive_analysis.svg b/docs/resources/workspace/match3_puzzle_game/resources/competitive_analysis.svg
index 684e608ef..43a164f19 100644
--- a/docs/resources/workspace/match3_puzzle_game/resources/competitive_analysis.svg
+++ b/docs/resources/workspace/match3_puzzle_game/resources/competitive_analysis.svg
@@ -1,128 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/match3_puzzle_game/resources/data_api_design.svg b/docs/resources/workspace/match3_puzzle_game/resources/data_api_design.svg
index 4977a8e7a..95268f914 100644
--- a/docs/resources/workspace/match3_puzzle_game/resources/data_api_design.svg
+++ b/docs/resources/workspace/match3_puzzle_game/resources/data_api_design.svg
@@ -1,947 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/match3_puzzle_game/resources/seq_flow.svg b/docs/resources/workspace/match3_puzzle_game/resources/seq_flow.svg
index 533860c56..d1f914c1e 100644
--- a/docs/resources/workspace/match3_puzzle_game/resources/seq_flow.svg
+++ b/docs/resources/workspace/match3_puzzle_game/resources/seq_flow.svg
@@ -1,425 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/minimalist_pomodoro_timer/resources/competitive_analysis.svg b/docs/resources/workspace/minimalist_pomodoro_timer/resources/competitive_analysis.svg
index 98686dff0..57edac51c 100644
--- a/docs/resources/workspace/minimalist_pomodoro_timer/resources/competitive_analysis.svg
+++ b/docs/resources/workspace/minimalist_pomodoro_timer/resources/competitive_analysis.svg
@@ -1,116 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/minimalist_pomodoro_timer/resources/data_api_design.svg b/docs/resources/workspace/minimalist_pomodoro_timer/resources/data_api_design.svg
index d58333117..78378cee8 100644
--- a/docs/resources/workspace/minimalist_pomodoro_timer/resources/data_api_design.svg
+++ b/docs/resources/workspace/minimalist_pomodoro_timer/resources/data_api_design.svg
@@ -1,203 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/minimalist_pomodoro_timer/resources/seq_flow.svg b/docs/resources/workspace/minimalist_pomodoro_timer/resources/seq_flow.svg
index 50f75d8b7..cc5926374 100644
--- a/docs/resources/workspace/minimalist_pomodoro_timer/resources/seq_flow.svg
+++ b/docs/resources/workspace/minimalist_pomodoro_timer/resources/seq_flow.svg
@@ -1,219 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/pyrogue/resources/competitive_analysis.svg b/docs/resources/workspace/pyrogue/resources/competitive_analysis.svg
index 7c284f60b..14d378ed6 100644
--- a/docs/resources/workspace/pyrogue/resources/competitive_analysis.svg
+++ b/docs/resources/workspace/pyrogue/resources/competitive_analysis.svg
@@ -1,128 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/pyrogue/resources/data_api_design.svg b/docs/resources/workspace/pyrogue/resources/data_api_design.svg
index dc4ccc940..1558d388b 100644
--- a/docs/resources/workspace/pyrogue/resources/data_api_design.svg
+++ b/docs/resources/workspace/pyrogue/resources/data_api_design.svg
@@ -1,747 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/pyrogue/resources/seq_flow.svg b/docs/resources/workspace/pyrogue/resources/seq_flow.svg
index a389ade4b..7b4400ed6 100644
--- a/docs/resources/workspace/pyrogue/resources/seq_flow.svg
+++ b/docs/resources/workspace/pyrogue/resources/seq_flow.svg
@@ -1,354 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/search_algorithm_framework/resources/competitive_analysis.svg b/docs/resources/workspace/search_algorithm_framework/resources/competitive_analysis.svg
index 0c6c0398c..3348a097c 100644
--- a/docs/resources/workspace/search_algorithm_framework/resources/competitive_analysis.svg
+++ b/docs/resources/workspace/search_algorithm_framework/resources/competitive_analysis.svg
@@ -1,128 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/search_algorithm_framework/resources/data_api_design.svg b/docs/resources/workspace/search_algorithm_framework/resources/data_api_design.svg
index a3939f0fb..2443dacc5 100644
--- a/docs/resources/workspace/search_algorithm_framework/resources/data_api_design.svg
+++ b/docs/resources/workspace/search_algorithm_framework/resources/data_api_design.svg
@@ -1,515 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/resources/workspace/search_algorithm_framework/resources/seq_flow.svg b/docs/resources/workspace/search_algorithm_framework/resources/seq_flow.svg
index 4053034e6..19e39db28 100644
--- a/docs/resources/workspace/search_algorithm_framework/resources/seq_flow.svg
+++ b/docs/resources/workspace/search_algorithm_framework/resources/seq_flow.svg
@@ -1,321 +1 @@
-
\ No newline at end of file
+
\ No newline at end of file
diff --git a/examples/agent_creator.py b/examples/agent_creator.py
index 9131fc5ef..e03a88c6b 100644
--- a/examples/agent_creator.py
+++ b/examples/agent_creator.py
@@ -5,18 +5,18 @@ Author: garylin2099
'''
import re
-from metagpt.actions import Action
from metagpt.const import PROJECT_ROOT, WORKSPACE_ROOT
-from metagpt.logs import logger
+from metagpt.actions import Action
from metagpt.roles import Role
from metagpt.schema import Message
+from metagpt.logs import logger
with open(PROJECT_ROOT / "examples/build_customized_agent.py", "r") as f:
# use official example script to guide AgentCreator
MULTI_ACTION_AGENT_CODE_EXAMPLE = f.read()
-
class CreateAgent(Action):
+
PROMPT_TEMPLATE = """
### BACKGROUND
You are using an agent framework called metagpt to write agents capable of different actions,
@@ -34,6 +34,7 @@ class CreateAgent(Action):
"""
async def run(self, example: str, instruction: str):
+
prompt = self.PROMPT_TEMPLATE.format(example=example, instruction=instruction)
# logger.info(prompt)
@@ -52,14 +53,13 @@ class CreateAgent(Action):
f.write(code_text)
return code_text
-
class AgentCreator(Role):
def __init__(
- self,
- name: str = "Matrix",
- profile: str = "AgentCreator",
- agent_template: str = MULTI_ACTION_AGENT_CODE_EXAMPLE,
- **kwargs,
+ self,
+ name: str = "Matrix",
+ profile: str = "AgentCreator",
+ agent_template: str = MULTI_ACTION_AGENT_CODE_EXAMPLE,
+ **kwargs,
):
super().__init__(name, profile, **kwargs)
self._init_actions([CreateAgent])
@@ -76,12 +76,11 @@ class AgentCreator(Role):
return msg
-
if __name__ == "__main__":
import asyncio
-
async def main():
+
agent_template = MULTI_ACTION_AGENT_CODE_EXAMPLE
creator = AgentCreator(agent_template=agent_template)
@@ -98,5 +97,4 @@ if __name__ == "__main__":
"""
await creator.run(msg)
-
asyncio.run(main())
diff --git a/examples/build_customized_agent.py b/examples/build_customized_agent.py
index b9e27608b..87d7a9c76 100644
--- a/examples/build_customized_agent.py
+++ b/examples/build_customized_agent.py
@@ -3,19 +3,19 @@ Filename: MetaGPT/examples/build_customized_agent.py
Created Date: Tuesday, September 19th 2023, 6:52:25 pm
Author: garylin2099
'''
-import asyncio
import re
import subprocess
+import asyncio
import fire
from metagpt.actions import Action
-from metagpt.logs import logger
from metagpt.roles import Role
from metagpt.schema import Message
-
+from metagpt.logs import logger
class SimpleWriteCode(Action):
+
PROMPT_TEMPLATE = """
Write a python function that can {instruction} and provide two runnnable test cases.
Return ```python your_code_here ``` with NO other texts,
@@ -35,6 +35,7 @@ class SimpleWriteCode(Action):
super().__init__(name, context, llm)
async def run(self, instruction: str):
+
prompt = self.PROMPT_TEMPLATE.format(instruction=instruction)
rsp = await self._aask(prompt)
@@ -50,7 +51,6 @@ class SimpleWriteCode(Action):
code_text = match.group(1) if match else rsp
return code_text
-
class SimpleRunCode(Action):
def __init__(self, name="SimpleRunCode", context=None, llm=None):
super().__init__(name, context, llm)
@@ -61,13 +61,12 @@ class SimpleRunCode(Action):
logger.info(f"{code_result=}")
return code_result
-
class SimpleCoder(Role):
def __init__(
- self,
- name: str = "Alice",
- profile: str = "SimpleCoder",
- **kwargs,
+ self,
+ name: str = "Alice",
+ profile: str = "SimpleCoder",
+ **kwargs,
):
super().__init__(name, profile, **kwargs)
self._init_actions([SimpleWriteCode])
@@ -76,7 +75,7 @@ class SimpleCoder(Role):
logger.info(f"{self._setting}: ready to {self._rc.todo}")
todo = self._rc.todo
- msg = self._rc.memory.get()[-1] # retrieve the latest memory
+ msg = self._rc.memory.get()[-1] # retrieve the latest memory
instruction = msg.content
code_text = await SimpleWriteCode().run(instruction)
@@ -84,13 +83,12 @@ class SimpleCoder(Role):
return msg
-
class RunnableCoder(Role):
def __init__(
- self,
- name: str = "Alice",
- profile: str = "RunnableCoder",
- **kwargs,
+ self,
+ name: str = "Alice",
+ profile: str = "RunnableCoder",
+ **kwargs,
):
super().__init__(name, profile, **kwargs)
self._init_actions([SimpleWriteCode, SimpleRunCode])
@@ -130,7 +128,6 @@ class RunnableCoder(Role):
await self._act()
return Message(content="All job done", role=self.profile)
-
def main(msg="write a function that calculates the sum of a list"):
# role = SimpleCoder()
role = RunnableCoder()
@@ -138,6 +135,5 @@ def main(msg="write a function that calculates the sum of a list"):
result = asyncio.run(role.run(msg))
logger.info(result)
-
if __name__ == '__main__':
fire.Fire(main)
diff --git a/examples/debate.py b/examples/debate.py
index d0b9fecd8..05db28070 100644
--- a/examples/debate.py
+++ b/examples/debate.py
@@ -5,15 +5,13 @@ Author: garylin2099
'''
import asyncio
import platform
-
import fire
+from metagpt.software_company import SoftwareCompany
from metagpt.actions import Action, BossRequirement
-from metagpt.logs import logger
from metagpt.roles import Role
from metagpt.schema import Message
-from metagpt.software_company import SoftwareCompany
-
+from metagpt.logs import logger
class ShoutOut(Action):
"""Action: Shout out loudly in a debate (quarrel)"""
@@ -33,6 +31,7 @@ class ShoutOut(Action):
super().__init__(name, context, llm)
async def run(self, context: str, name: str, opponent_name: str):
+
prompt = self.PROMPT_TEMPLATE.format(context=context, name=name, opponent_name=opponent_name)
# logger.info(prompt)
@@ -40,13 +39,12 @@ class ShoutOut(Action):
return rsp
-
class Trump(Role):
def __init__(
- self,
- name: str = "Trump",
- profile: str = "Republican",
- **kwargs,
+ self,
+ name: str = "Trump",
+ profile: str = "Republican",
+ **kwargs,
):
super().__init__(name, profile, **kwargs)
self._init_actions([ShoutOut])
@@ -57,7 +55,7 @@ class Trump(Role):
async def _observe(self) -> int:
await super()._observe()
# accept messages sent (from opponent) to self, disregard own messages from the last round
- self._rc.news = [msg for msg in self._rc.news if msg.send_to == self.name]
+ self._rc.news = [msg for msg in self._rc.news if msg.send_to == self.name]
return len(self._rc.news)
async def _act(self) -> Message:
@@ -81,13 +79,12 @@ class Trump(Role):
return msg
-
class Biden(Role):
def __init__(
- self,
- name: str = "Biden",
- profile: str = "Democrat",
- **kwargs,
+ self,
+ name: str = "Biden",
+ profile: str = "Democrat",
+ **kwargs,
):
super().__init__(name, profile, **kwargs)
self._init_actions([ShoutOut])
@@ -123,7 +120,6 @@ class Biden(Role):
return msg
-
async def startup(idea: str, investment: float = 3.0, n_round: int = 5,
code_review: bool = False, run_tests: bool = False):
"""We reuse the startup paradigm for roles to interact with each other.
diff --git a/examples/search_with_specific_engine.py b/examples/search_with_specific_engine.py
index 9309e18bd..7cc431cd4 100644
--- a/examples/search_with_specific_engine.py
+++ b/examples/search_with_specific_engine.py
@@ -6,12 +6,11 @@ from metagpt.tools import SearchEngineType
async def main():
# Serper API
- # await Searcher(engine = SearchEngineType.SERPER_GOOGLE).run(["What are some good sun protection products?","What are some of the best beaches?"])
+ #await Searcher(engine = SearchEngineType.SERPER_GOOGLE).run(["What are some good sun protection products?","What are some of the best beaches?"])
# SerpAPI
- # await Searcher(engine=SearchEngineType.SERPAPI_GOOGLE).run("What are the best ski brands for skiers?")
+ #await Searcher(engine=SearchEngineType.SERPAPI_GOOGLE).run("What are the best ski brands for skiers?")
# Google API
await Searcher(engine=SearchEngineType.DIRECT_GOOGLE).run("What are the most interesting human facts?")
-
if __name__ == '__main__':
asyncio.run(main())
diff --git a/examples/sk_agent.py b/examples/sk_agent.py
index 5236ed8ce..a7513e838 100644
--- a/examples/sk_agent.py
+++ b/examples/sk_agent.py
@@ -9,6 +9,7 @@ import asyncio
from semantic_kernel.core_skills import FileIOSkill, MathSkill, TextSkill, TimeSkill
from semantic_kernel.planning import SequentialPlanner
+
# from semantic_kernel.planning import SequentialPlanner
from semantic_kernel.planning.action_planner.action_planner import ActionPlanner
diff --git a/examples/use_off_the_shelf_agent.py b/examples/use_off_the_shelf_agent.py
index 0debcf780..2e10068bd 100644
--- a/examples/use_off_the_shelf_agent.py
+++ b/examples/use_off_the_shelf_agent.py
@@ -5,9 +5,8 @@ Author: garylin2099
'''
import asyncio
-from metagpt.logs import logger
from metagpt.roles.product_manager import ProductManager
-
+from metagpt.logs import logger
async def main():
msg = "Write a PRD for a snake game"
@@ -15,6 +14,5 @@ async def main():
result = await role.run(msg)
logger.info(result.content[:100])
-
if __name__ == '__main__':
asyncio.run(main())
diff --git a/examples/write_tutorial.py b/examples/write_tutorial.py
index 73a9c71b7..71ece5527 100644
--- a/examples/write_tutorial.py
+++ b/examples/write_tutorial.py
@@ -18,3 +18,4 @@ async def main():
if __name__ == '__main__':
asyncio.run(main())
+
diff --git a/metagpt/_compat.py b/metagpt/_compat.py
index 30abcde2f..c442bd7de 100644
--- a/metagpt/_compat.py
+++ b/metagpt/_compat.py
@@ -8,14 +8,12 @@ if sys.implementation.name == "cpython" and platform.system() == "Windows":
if sys.version_info[:2] == (3, 9):
from asyncio.proactor_events import _ProactorBasePipeTransport
-
# https://github.com/python/cpython/pull/92842
def pacth_del(self, _warn=warnings.warn):
if self._sock is not None:
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
self._sock.close()
-
_ProactorBasePipeTransport.__del__ = pacth_del
if sys.version_info >= (3, 9, 0):
diff --git a/metagpt/actions/action.py b/metagpt/actions/action.py
index 0ae7b148b..790295d55 100644
--- a/metagpt/actions/action.py
+++ b/metagpt/actions/action.py
@@ -51,12 +51,12 @@ class Action(ABC):
@retry(stop=stop_after_attempt(3), wait=wait_fixed(1))
async def _aask_v1(
- self,
- prompt: str,
- output_class_name: str,
- output_data_mapping: dict,
- system_msgs: Optional[list[str]] = None,
- format="markdown", # compatible to original format
+ self,
+ prompt: str,
+ output_class_name: str,
+ output_data_mapping: dict,
+ system_msgs: Optional[list[str]] = None,
+ format="markdown", # compatible to original format
) -> ActionOutput:
"""Append default prefix"""
if not system_msgs:
diff --git a/metagpt/actions/action_output.py b/metagpt/actions/action_output.py
index c0b88dcf9..ea7f4fb80 100644
--- a/metagpt/actions/action_output.py
+++ b/metagpt/actions/action_output.py
@@ -40,3 +40,4 @@ class ActionOutput:
new_class.__validator_check_name = classmethod(check_name)
new_class.__root_validator_check_missing_fields = classmethod(check_missing_fields)
return new_class
+
\ No newline at end of file
diff --git a/metagpt/actions/add_requirement.py b/metagpt/actions/add_requirement.py
index 16e14b3a4..7dc09d062 100644
--- a/metagpt/actions/add_requirement.py
+++ b/metagpt/actions/add_requirement.py
@@ -10,6 +10,5 @@ from metagpt.actions import Action
class BossRequirement(Action):
"""Boss Requirement without any implementation details"""
-
async def run(self, *args, **kwargs):
raise NotImplementedError
diff --git a/metagpt/actions/clone_function.py b/metagpt/actions/clone_function.py
index 7529a60c7..cf7d22f04 100644
--- a/metagpt/actions/clone_function.py
+++ b/metagpt/actions/clone_function.py
@@ -1,5 +1,5 @@
-import traceback
from pathlib import Path
+import traceback
from metagpt.actions.write_code import WriteCode
from metagpt.logs import logger
diff --git a/metagpt/actions/debug_error.py b/metagpt/actions/debug_error.py
index 304b1bc3e..d69a22dba 100644
--- a/metagpt/actions/debug_error.py
+++ b/metagpt/actions/debug_error.py
@@ -7,8 +7,8 @@
"""
import re
-from metagpt.actions.action import Action
from metagpt.logs import logger
+from metagpt.actions.action import Action
from metagpt.utils.common import CodeParser
PROMPT_TEMPLATE = """
@@ -24,8 +24,6 @@ The message is as follows:
Now you should start rewriting the code:
## file name of the code to rewrite: Write code with triple quoto. Do your best to implement THIS IN ONLY ONE FILE.
"""
-
-
class DebugError(Action):
def __init__(self, name="DebugError", context=None, llm=None):
super().__init__(name, context, llm)
@@ -35,17 +33,17 @@ class DebugError(Action):
# f"\n\n{error}\n\nPlease try to fix the error in this code."
# fixed_code = await self._aask(prompt)
# return fixed_code
-
+
async def run(self, context):
if "PASS" in context:
return "", "the original code works fine, no need to debug"
-
+
file_name = re.search("## File To Rewrite:\s*(.+\\.py)", context).group(1)
logger.info(f"Debug and rewrite {file_name}")
prompt = PROMPT_TEMPLATE.format(context=context)
-
+
rsp = await self._aask(prompt)
code = CodeParser.parse_code(block="", text=rsp)
diff --git a/metagpt/actions/design_api_review.py b/metagpt/actions/design_api_review.py
index 687a33652..9bb822a62 100644
--- a/metagpt/actions/design_api_review.py
+++ b/metagpt/actions/design_api_review.py
@@ -19,3 +19,4 @@ class DesignReview(Action):
api_review = await self._aask(prompt)
return api_review
+
\ No newline at end of file
diff --git a/metagpt/actions/design_filenames.py b/metagpt/actions/design_filenames.py
index 6c3d8e803..29400e950 100644
--- a/metagpt/actions/design_filenames.py
+++ b/metagpt/actions/design_filenames.py
@@ -26,3 +26,4 @@ class DesignFilenames(Action):
logger.debug(prompt)
logger.debug(design_filenames)
return design_filenames
+
\ No newline at end of file
diff --git a/metagpt/actions/detail_mining.py b/metagpt/actions/detail_mining.py
index ffae26f9f..e29d6911b 100644
--- a/metagpt/actions/detail_mining.py
+++ b/metagpt/actions/detail_mining.py
@@ -6,6 +6,7 @@
@File : detail_mining.py
"""
from metagpt.actions import Action, ActionOutput
+from metagpt.logs import logger
PROMPT_TEMPLATE = """
##TOPIC
@@ -42,7 +43,6 @@ OUTPUT_MAPPING = {
class DetailMining(Action):
"""This class allows LLM to further mine noteworthy details based on specific "##TOPIC"(discussion topic) and "##RECORD" (discussion records), thereby deepening the discussion.
"""
-
def __init__(self, name="", context=None, llm=None):
super().__init__(name, context, llm)
diff --git a/metagpt/actions/prepare_interview.py b/metagpt/actions/prepare_interview.py
index cbaa1d56b..5db3a9f37 100644
--- a/metagpt/actions/prepare_interview.py
+++ b/metagpt/actions/prepare_interview.py
@@ -27,7 +27,6 @@ Requirement: Provide a list of questions for the interviewer to ask the intervie
Attention: Provide as markdown block as the format above, at least 10 questions.
"""
-
# prepare for a interview
@@ -39,3 +38,4 @@ class PrepareInterview(Action):
prompt = PROMPT_TEMPLATE.format(context=context)
question_list = await self._aask_v1(prompt)
return question_list
+
diff --git a/metagpt/actions/research.py b/metagpt/actions/research.py
index 8a5778230..49a981e86 100644
--- a/metagpt/actions/research.py
+++ b/metagpt/actions/research.py
@@ -3,6 +3,7 @@
from __future__ import annotations
import asyncio
+import json
from typing import Callable
from pydantic import parse_obj_as
@@ -59,6 +60,7 @@ a comprehensive summary of the text.
{content}
'''
+
CONDUCT_RESEARCH_PROMPT = '''### Reference Information
{content}
@@ -76,13 +78,12 @@ above. The report must meet the following requirements:
class CollectLinks(Action):
"""Action class to collect links from a search engine."""
-
def __init__(
- self,
- name: str = "",
- *args,
- rank_func: Callable[[list[str]], None] | None = None,
- **kwargs,
+ self,
+ name: str = "",
+ *args,
+ rank_func: Callable[[list[str]], None] | None = None,
+ **kwargs,
):
super().__init__(name, *args, **kwargs)
self.desc = "Collect links from a search engine."
@@ -90,11 +91,11 @@ class CollectLinks(Action):
self.rank_func = rank_func
async def run(
- self,
- topic: str,
- decomposition_nums: int = 4,
- url_per_query: int = 4,
- system_text: str | None = None,
+ self,
+ topic: str,
+ decomposition_nums: int = 4,
+ url_per_query: int = 4,
+ system_text: str | None = None,
) -> dict[str, list[str]]:
"""Run the action to collect links.
@@ -119,16 +120,13 @@ class CollectLinks(Action):
def gen_msg():
while True:
- search_results = "\n".join(
- f"#### Keyword: {i}\n Search Result: {j}\n" for (i, j) in zip(keywords, results))
- prompt = SUMMARIZE_SEARCH_PROMPT.format(decomposition_nums=decomposition_nums,
- search_results=search_results)
+ search_results = "\n".join(f"#### Keyword: {i}\n Search Result: {j}\n" for (i, j) in zip(keywords, results))
+ prompt = SUMMARIZE_SEARCH_PROMPT.format(decomposition_nums=decomposition_nums, search_results=search_results)
yield prompt
remove = max(results, key=len)
remove.pop()
if len(remove) == 0:
break
-
prompt = reduce_message_length(gen_msg(), self.llm.model, system_text, CONFIG.max_tokens_rsp)
logger.debug(prompt)
queries = await self._aask(prompt, [system_text])
@@ -174,12 +172,11 @@ class CollectLinks(Action):
class WebBrowseAndSummarize(Action):
"""Action class to explore the web and provide summaries of articles and webpages."""
-
def __init__(
- self,
- *args,
- browse_func: Callable[[list[str]], None] | None = None,
- **kwargs,
+ self,
+ *args,
+ browse_func: Callable[[list[str]], None] | None = None,
+ **kwargs,
):
super().__init__(*args, **kwargs)
if CONFIG.model_for_researcher_summary:
@@ -191,11 +188,11 @@ class WebBrowseAndSummarize(Action):
self.desc = "Explore the web and provide summaries of articles and webpages."
async def run(
- self,
- url: str,
- *urls: str,
- query: str,
- system_text: str = RESEARCH_BASE_SYSTEM,
+ self,
+ url: str,
+ *urls: str,
+ query: str,
+ system_text: str = RESEARCH_BASE_SYSTEM,
) -> dict[str, str]:
"""Run the action to browse the web and provide summaries.
@@ -217,8 +214,7 @@ class WebBrowseAndSummarize(Action):
for u, content in zip([url, *urls], contents):
content = content.inner_text
chunk_summaries = []
- for prompt in generate_prompt_chunk(content, prompt_template, self.llm.model, system_text,
- CONFIG.max_tokens_rsp):
+ for prompt in generate_prompt_chunk(content, prompt_template, self.llm.model, system_text, CONFIG.max_tokens_rsp):
logger.debug(prompt)
summary = await self._aask(prompt, [system_text])
if summary == "Not relevant.":
@@ -242,17 +238,16 @@ class WebBrowseAndSummarize(Action):
class ConductResearch(Action):
"""Action class to conduct research and generate a research report."""
-
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if CONFIG.model_for_researcher_report:
self.llm.model = CONFIG.model_for_researcher_report
async def run(
- self,
- topic: str,
- content: str,
- system_text: str = RESEARCH_BASE_SYSTEM,
+ self,
+ topic: str,
+ content: str,
+ system_text: str = RESEARCH_BASE_SYSTEM,
) -> str:
"""Run the action to conduct research and generate a research report.
diff --git a/metagpt/actions/run_code.py b/metagpt/actions/run_code.py
index 52f95d3bf..f69d2cd1a 100644
--- a/metagpt/actions/run_code.py
+++ b/metagpt/actions/run_code.py
@@ -99,7 +99,7 @@ class RunCode(Action):
return stdout.decode("utf-8"), stderr.decode("utf-8")
async def run(
- self, code, mode="script", code_file_name="", test_code="", test_file_name="", command=[], **kwargs
+ self, code, mode="script", code_file_name="", test_code="", test_file_name="", command=[], **kwargs
) -> str:
logger.info(f"Running {' '.join(command)}")
if mode == "script":
diff --git a/metagpt/actions/search_and_summarize.py b/metagpt/actions/search_and_summarize.py
index a13db28d2..069f2a977 100644
--- a/metagpt/actions/search_and_summarize.py
+++ b/metagpt/actions/search_and_summarize.py
@@ -54,6 +54,7 @@ SEARCH_AND_SUMMARIZE_PROMPT = """
"""
+
SEARCH_AND_SUMMARIZE_SALES_SYSTEM = """## Requirements
1. Please summarize the latest dialogue based on the reference information (secondary) and dialogue history (primary). Do not include text that is irrelevant to the conversation.
- The context is for reference only. If it is irrelevant to the user's search request history, please reduce its reference and usage.
@@ -139,3 +140,4 @@ class SearchAndSummarize(Action):
logger.debug(prompt)
logger.debug(result)
return result
+
\ No newline at end of file
diff --git a/metagpt/actions/write_code.py b/metagpt/actions/write_code.py
index 8e26dd9be..c000805c5 100644
--- a/metagpt/actions/write_code.py
+++ b/metagpt/actions/write_code.py
@@ -5,14 +5,13 @@
@Author : alexanderwu
@File : write_code.py
"""
-from tenacity import retry, stop_after_attempt, wait_fixed
-
from metagpt.actions import WriteDesign
from metagpt.actions.action import Action
from metagpt.const import WORKSPACE_ROOT
from metagpt.logs import logger
from metagpt.schema import Message
from metagpt.utils.common import CodeParser
+from tenacity import retry, stop_after_attempt, wait_fixed
PROMPT_TEMPLATE = """
NOTICE
@@ -80,3 +79,4 @@ class WriteCode(Action):
# code_rsp = await self._aask_v1(prompt, "code_rsp", OUTPUT_MAPPING)
# self._save(context, filename, code)
return code
+
\ No newline at end of file
diff --git a/metagpt/actions/write_code_review.py b/metagpt/actions/write_code_review.py
index 6211d5e1e..4ff4d6cf6 100644
--- a/metagpt/actions/write_code_review.py
+++ b/metagpt/actions/write_code_review.py
@@ -6,12 +6,11 @@
@File : write_code_review.py
"""
-from tenacity import retry, stop_after_attempt, wait_fixed
-
from metagpt.actions.action import Action
from metagpt.logs import logger
from metagpt.schema import Message
from metagpt.utils.common import CodeParser
+from tenacity import retry, stop_after_attempt, wait_fixed
PROMPT_TEMPLATE = """
NOTICE
@@ -80,3 +79,4 @@ class WriteCodeReview(Action):
# code_rsp = await self._aask_v1(prompt, "code_rsp", OUTPUT_MAPPING)
# self._save(context, filename, code)
return code
+
\ No newline at end of file
diff --git a/metagpt/actions/write_docstring.py b/metagpt/actions/write_docstring.py
index a9c90ce9b..5c7815793 100644
--- a/metagpt/actions/write_docstring.py
+++ b/metagpt/actions/write_docstring.py
@@ -162,9 +162,9 @@ class WriteDocstring(Action):
self.desc = "Write docstring for code."
async def run(
- self, code: str,
- system_text: str = PYTHON_DOCSTRING_SYSTEM,
- style: Literal["google", "numpy", "sphinx"] = "google",
+ self, code: str,
+ system_text: str = PYTHON_DOCSTRING_SYSTEM,
+ style: Literal["google", "numpy", "sphinx"] = "google",
) -> str:
"""Writes docstrings for the given code and system text in the specified style.
@@ -202,7 +202,6 @@ def _simplify_python_code(code: str) -> None:
if __name__ == "__main__":
import fire
-
async def run(filename: str, overwrite: bool = False, style: Literal["google", "numpy", "sphinx"] = "google"):
with open(filename) as f:
code = f.read()
@@ -212,5 +211,4 @@ if __name__ == "__main__":
f.write(code)
return code
-
fire.Fire(run)
diff --git a/metagpt/actions/write_prd_review.py b/metagpt/actions/write_prd_review.py
index 5ff9624c5..5c922d3bc 100644
--- a/metagpt/actions/write_prd_review.py
+++ b/metagpt/actions/write_prd_review.py
@@ -25,3 +25,4 @@ class WritePRDReview(Action):
prompt = self.prd_review_prompt_template.format(prd=self.prd)
review = await self._aask(prompt)
return review
+
\ No newline at end of file
diff --git a/metagpt/actions/write_tutorial.py b/metagpt/actions/write_tutorial.py
index 7bf844e43..23e3560e8 100644
--- a/metagpt/actions/write_tutorial.py
+++ b/metagpt/actions/write_tutorial.py
@@ -65,3 +65,4 @@ class WriteContent(Action):
"""
prompt = CONTENT_PROMPT.format(topic=topic, language=self.language, directory=self.directory)
return await self._aask(prompt=prompt)
+
diff --git a/metagpt/const.py b/metagpt/const.py
index 457eba698..b8b08628e 100644
--- a/metagpt/const.py
+++ b/metagpt/const.py
@@ -13,9 +13,9 @@ def get_project_root():
current_path = Path.cwd()
while True:
if (
- (current_path / ".git").exists()
- or (current_path / ".project_root").exists()
- or (current_path / ".gitignore").exists()
+ (current_path / ".git").exists()
+ or (current_path / ".project_root").exists()
+ or (current_path / ".gitignore").exists()
):
return current_path
parent_path = current_path.parent
diff --git a/metagpt/document_store/base_store.py b/metagpt/document_store/base_store.py
index 3969ce289..5d7015e8b 100644
--- a/metagpt/document_store/base_store.py
+++ b/metagpt/document_store/base_store.py
@@ -53,3 +53,4 @@ class LocalStore(BaseStore, ABC):
@abstractmethod
def _write(self, docs, metadatas):
raise NotImplementedError
+
\ No newline at end of file
diff --git a/metagpt/document_store/chromadb_store.py b/metagpt/document_store/chromadb_store.py
index 6ec097592..d2ecc05f6 100644
--- a/metagpt/document_store/chromadb_store.py
+++ b/metagpt/document_store/chromadb_store.py
@@ -10,7 +10,6 @@ import chromadb
class ChromaStore:
"""If inherited from BaseStore, or importing other modules from metagpt, a Python exception occurs, which is strange."""
-
def __init__(self, name):
client = chromadb.Client()
collection = client.create_collection(name)
diff --git a/metagpt/document_store/document.py b/metagpt/document_store/document.py
index 85e416c65..e4b9473c7 100644
--- a/metagpt/document_store/document.py
+++ b/metagpt/document_store/document.py
@@ -79,3 +79,4 @@ class Document:
return self._get_docs_and_metadatas_by_langchain()
else:
raise NotImplementedError
+
\ No newline at end of file
diff --git a/metagpt/document_store/qdrant_store.py b/metagpt/document_store/qdrant_store.py
index 80016e4ad..98b82cf87 100644
--- a/metagpt/document_store/qdrant_store.py
+++ b/metagpt/document_store/qdrant_store.py
@@ -38,11 +38,11 @@ class QdrantStore(BaseStore):
raise Exception("please check QdrantConnection.")
def create_collection(
- self,
- collection_name: str,
- vectors_config: VectorParams,
- force_recreate=False,
- **kwargs,
+ self,
+ collection_name: str,
+ vectors_config: VectorParams,
+ force_recreate=False,
+ **kwargs,
):
"""
create a collection
@@ -97,12 +97,12 @@ class QdrantStore(BaseStore):
)
def search(
- self,
- collection_name: str,
- query: List[float],
- query_filter: Filter = None,
- k=10,
- return_vector=False,
+ self,
+ collection_name: str,
+ query: List[float],
+ query_filter: Filter = None,
+ k=10,
+ return_vector=False,
):
"""
vector search
diff --git a/metagpt/inspect_module.py b/metagpt/inspect_module.py
index fcdd4f0b7..a89ac1c5e 100644
--- a/metagpt/inspect_module.py
+++ b/metagpt/inspect_module.py
@@ -25,4 +25,4 @@ def print_classes_and_functions(module):
if __name__ == '__main__':
- print_classes_and_functions(metagpt)
+ print_classes_and_functions(metagpt)
\ No newline at end of file
diff --git a/metagpt/llm.py b/metagpt/llm.py
index 6a9a9132f..e6f815950 100644
--- a/metagpt/llm.py
+++ b/metagpt/llm.py
@@ -12,7 +12,6 @@ from metagpt.provider.openai_api import OpenAIGPTAPI as LLM
DEFAULT_LLM = LLM()
CLAUDE_LLM = Claude()
-
async def ai_func(prompt):
"""使用LLM进行QA
QA with LLMs
diff --git a/metagpt/logs.py b/metagpt/logs.py
index 0adee23ff..b2052e9b8 100644
--- a/metagpt/logs.py
+++ b/metagpt/logs.py
@@ -12,7 +12,6 @@ from loguru import logger as _logger
from metagpt.const import PROJECT_ROOT
-
def define_log_level(print_level="INFO", logfile_level="DEBUG"):
"""调整日志级别到level之上
Adjust the log level to above level
@@ -22,5 +21,4 @@ def define_log_level(print_level="INFO", logfile_level="DEBUG"):
_logger.add(PROJECT_ROOT / 'logs/log.txt', level=logfile_level)
return _logger
-
logger = define_log_level()
diff --git a/metagpt/manager.py b/metagpt/manager.py
index e6bf77c8b..9d238c621 100644
--- a/metagpt/manager.py
+++ b/metagpt/manager.py
@@ -51,7 +51,7 @@ class Manager:
# chosen_role_name = self.llm.ask(self.prompt_template.format(context))
# FIXME: 现在通过简单的字典决定流向,但之后还是应该有思考过程
- # The direction of flow is now determined by a simple dictionary, but there should still be a thought process afterwards
+ #The direction of flow is now determined by a simple dictionary, but there should still be a thought process afterwards
next_role_profile = self.role_directions[message.role]
# logger.debug(f"{next_role_profile}")
for _, role in roles.items():
diff --git a/metagpt/memory/__init__.py b/metagpt/memory/__init__.py
index e65ee7642..710930626 100644
--- a/metagpt/memory/__init__.py
+++ b/metagpt/memory/__init__.py
@@ -6,8 +6,9 @@
@File : __init__.py
"""
-from metagpt.memory.longterm_memory import LongTermMemory
from metagpt.memory.memory import Memory
+from metagpt.memory.longterm_memory import LongTermMemory
+
__all__ = [
"Memory",
diff --git a/metagpt/memory/longterm_memory.py b/metagpt/memory/longterm_memory.py
index e0b8e68c1..f8abea5f3 100644
--- a/metagpt/memory/longterm_memory.py
+++ b/metagpt/memory/longterm_memory.py
@@ -68,3 +68,4 @@ class LongTermMemory(Memory):
def clear(self):
super(LongTermMemory, self).clear()
self.memory_storage.clean()
+
\ No newline at end of file
diff --git a/metagpt/memory/memory.py b/metagpt/memory/memory.py
index 282f5fe33..c818fa707 100644
--- a/metagpt/memory/memory.py
+++ b/metagpt/memory/memory.py
@@ -85,3 +85,4 @@ class Memory:
continue
rsp += self.index[action]
return rsp
+
\ No newline at end of file
diff --git a/metagpt/memory/memory_storage.py b/metagpt/memory/memory_storage.py
index 5cd4cac47..302d96aa7 100644
--- a/metagpt/memory/memory_storage.py
+++ b/metagpt/memory/memory_storage.py
@@ -2,16 +2,16 @@
# -*- coding: utf-8 -*-
# @Desc : the implement of memory storage
-from pathlib import Path
from typing import List
+from pathlib import Path
from langchain.vectorstores.faiss import FAISS
from metagpt.const import DATA_PATH, MEM_TTL
-from metagpt.document_store.faiss_store import FaissStore
from metagpt.logs import logger
from metagpt.schema import Message
from metagpt.utils.serialize import serialize_message, deserialize_message
+from metagpt.document_store.faiss_store import FaissStore
class MemoryStorage(FaissStore):
@@ -104,3 +104,4 @@ class MemoryStorage(FaissStore):
self.store = None
self._initialized = False
+
\ No newline at end of file
diff --git a/metagpt/prompts/decompose.py b/metagpt/prompts/decompose.py
index 4ede8b138..ab0c360d3 100644
--- a/metagpt/prompts/decompose.py
+++ b/metagpt/prompts/decompose.py
@@ -16,6 +16,7 @@ The requirements of the tree-structure plan are:
4. The sub-goals at the bottom level should be basic actions so that I can easily execute them in the game.
"""
+
DECOMPOSE_USER = """USER:
The goal is to {goal description}. Generate the plan according to the requirements.
"""
diff --git a/metagpt/prompts/generate_skill.md b/metagpt/prompts/generate_skill.md
index 1d0a68688..74948cd15 100644
--- a/metagpt/prompts/generate_skill.md
+++ b/metagpt/prompts/generate_skill.md
@@ -66,10 +66,9 @@ # PRD
return prd
```
+
The main class/function is WritePRD.
Then you should write:
-This class is designed to generate a PRD based on input requirements. Notably, there's a template prompt with sections
-for product, function, goals, user scenarios, requirements, constraints, performance metrics. This template gets filled
-with input requirements and then queries a big language model to produce the detailed PRD.
\ No newline at end of file
+This class is designed to generate a PRD based on input requirements. Notably, there's a template prompt with sections for product, function, goals, user scenarios, requirements, constraints, performance metrics. This template gets filled with input requirements and then queries a big language model to produce the detailed PRD.
\ No newline at end of file
diff --git a/metagpt/prompts/sales.py b/metagpt/prompts/sales.py
index 08e7b28f8..a44aacafe 100644
--- a/metagpt/prompts/sales.py
+++ b/metagpt/prompts/sales.py
@@ -6,6 +6,7 @@
@File : sales.py
"""
+
SALES_ASSISTANT = """You are a sales assistant helping your sales agent to determine which stage of a sales conversation should the agent move to, or stay at.
Following '===' is the conversation history.
Use this conversation history to make your decision.
@@ -28,6 +29,7 @@ The answer needs to be one number only, no words.
If there is no conversation history, output 1.
Do not answer anything else nor add anything to you answer."""
+
SALES = """Never forget your name is {salesperson_name}. You work as a {salesperson_role}.
You work at company named {company_name}. {company_name}'s business is the following: {company_business}
Company values are the following. {company_values}
@@ -52,11 +54,10 @@ Conversation history:
{salesperson_name}:
"""
-conversation_stages = {
- '1': "Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional. Your greeting should be welcoming. Always clarify in your greeting the reason why you are contacting the prospect.",
- '2': "Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.",
- '3': "Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.",
- '4': "Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.",
- '5': "Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.",
- '6': "Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.",
- '7': "Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits."}
+conversation_stages = {'1' : "Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional. Your greeting should be welcoming. Always clarify in your greeting the reason why you are contacting the prospect.",
+'2': "Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.",
+'3': "Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.",
+'4': "Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.",
+'5': "Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.",
+'6': "Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.",
+'7': "Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits."}
diff --git a/metagpt/prompts/summarize.py b/metagpt/prompts/summarize.py
index bd5d69558..42d34b8a5 100644
--- a/metagpt/prompts/summarize.py
+++ b/metagpt/prompts/summarize.py
@@ -20,6 +20,7 @@ summary. Pick a suitable emoji for every bullet point. Your response should be i
a YouTube video, use the following text: {{CONTENT}}.
"""
+
# GCP-VertexAI-Text Summarization (SUMMARIZE_PROMPT_2-5 are from this source)
# https://github.com/GoogleCloudPlatform/generative-ai/blob/main/language/examples/prompt-design/text_summarization.ipynb
# Long documents require a map-reduce process, see the following notebook
@@ -38,6 +39,7 @@ Summary:
"""
+
SUMMARIZE_PROMPT_3 = """
Provide a TL;DR for the following article:
@@ -51,6 +53,7 @@ Instead of computing on the individual qubits themselves, we will then compute o
TL;DR:
"""
+
SUMMARIZE_PROMPT_4 = """
Provide a very short summary in four bullet points for the following article:
@@ -65,6 +68,7 @@ Bulletpoints:
"""
+
SUMMARIZE_PROMPT_5 = """
Please generate a summary of the following conversation and at the end summarize the to-do's for the support Agent:
diff --git a/metagpt/prompts/tutorial_assistant.py b/metagpt/prompts/tutorial_assistant.py
index fe31e5f2a..d690aad83 100644
--- a/metagpt/prompts/tutorial_assistant.py
+++ b/metagpt/prompts/tutorial_assistant.py
@@ -36,4 +36,4 @@ Strictly limit output according to the following requirements:
3. The output must be strictly in the specified language, {language}.
4. Do not have redundant output, including concluding remarks.
5. Strict requirement not to output the topic "{topic}".
-"""
+"""
\ No newline at end of file
diff --git a/metagpt/prompts/use_lib_sop.py b/metagpt/prompts/use_lib_sop.py
index edebbe9c7..b43ed5125 100644
--- a/metagpt/prompts/use_lib_sop.py
+++ b/metagpt/prompts/use_lib_sop.py
@@ -73,6 +73,7 @@ The action_list can contain arbitrary number of actions. The args of each action
6. I will execute your code step by step and give you feedback. If some action fails, I will stop at that action and will not execute its following actions. The feedback will include error messages about the failed action. At that time, you should replan and write the new code just starting from that failed action.
"""
+
SOP_USER = """USER:
My current state:
- inventory: {inventory}
diff --git a/metagpt/provider/__init__.py b/metagpt/provider/__init__.py
index 8c64cd4e1..56dc19b4b 100644
--- a/metagpt/provider/__init__.py
+++ b/metagpt/provider/__init__.py
@@ -8,4 +8,5 @@
from metagpt.provider.openai_api import OpenAIGPTAPI
+
__all__ = ["OpenAIGPTAPI"]
diff --git a/metagpt/provider/anthropic_api.py b/metagpt/provider/anthropic_api.py
index 03802a716..7293e2cde 100644
--- a/metagpt/provider/anthropic_api.py
+++ b/metagpt/provider/anthropic_api.py
@@ -32,3 +32,4 @@ class Claude2:
max_tokens_to_sample=1000,
)
return res.completion
+
\ No newline at end of file
diff --git a/metagpt/provider/base_chatbot.py b/metagpt/provider/base_chatbot.py
index a960d1c05..abdf423f4 100644
--- a/metagpt/provider/base_chatbot.py
+++ b/metagpt/provider/base_chatbot.py
@@ -25,3 +25,4 @@ class BaseChatbot(ABC):
@abstractmethod
def ask_code(self, msgs: list) -> str:
"""Ask GPT multiple questions and get a piece of code"""
+
\ No newline at end of file
diff --git a/metagpt/provider/base_gpt_api.py b/metagpt/provider/base_gpt_api.py
index f39e708eb..de61167b9 100644
--- a/metagpt/provider/base_gpt_api.py
+++ b/metagpt/provider/base_gpt_api.py
@@ -115,3 +115,4 @@ class BaseGPTAPI(BaseChatbot):
def messages_to_dict(self, messages):
"""objects to [{"role": "user", "content": msg}] etc."""
return [i.to_dict() for i in messages]
+
\ No newline at end of file
diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py
index 6b2c2941d..7e865f288 100644
--- a/metagpt/provider/openai_api.py
+++ b/metagpt/provider/openai_api.py
@@ -41,7 +41,7 @@ class RateLimiter:
self.rpm = rpm
def split_batches(self, batch):
- return [batch[i: i + self.rpm] for i in range(0, len(batch), self.rpm)]
+ return [batch[i : i + self.rpm] for i in range(0, len(batch), self.rpm)]
async def wait_if_needed(self, num_requests):
current_time = time.time()
@@ -83,9 +83,8 @@ class CostManager(metaclass=Singleton):
self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens
cost = (
- prompt_tokens * TOKEN_COSTS[model]["prompt"] + completion_tokens * TOKEN_COSTS[model][
- "completion"]
- ) / 1000
+ prompt_tokens * TOKEN_COSTS[model]["prompt"] + completion_tokens * TOKEN_COSTS[model]["completion"]
+ ) / 1000
self.total_cost += cost
logger.info(
f"Total running cost: ${self.total_cost:.3f} | Max budget: ${CONFIG.max_budget:.3f} | "
diff --git a/metagpt/roles/__init__.py b/metagpt/roles/__init__.py
index 34ec144a1..1768b786c 100644
--- a/metagpt/roles/__init__.py
+++ b/metagpt/roles/__init__.py
@@ -6,15 +6,16 @@
@File : __init__.py
"""
-from metagpt.roles.architect import Architect
-from metagpt.roles.customer_service import CustomerService
-from metagpt.roles.engineer import Engineer
-from metagpt.roles.product_manager import ProductManager
-from metagpt.roles.project_manager import ProjectManager
-from metagpt.roles.qa_engineer import QaEngineer
from metagpt.roles.role import Role
-from metagpt.roles.sales import Sales
+from metagpt.roles.architect import Architect
+from metagpt.roles.project_manager import ProjectManager
+from metagpt.roles.product_manager import ProductManager
+from metagpt.roles.engineer import Engineer
+from metagpt.roles.qa_engineer import QaEngineer
from metagpt.roles.seacher import Searcher
+from metagpt.roles.sales import Sales
+from metagpt.roles.customer_service import CustomerService
+
__all__ = [
"Role",
diff --git a/metagpt/roles/architect.py b/metagpt/roles/architect.py
index e86bd4eb6..15d5fe5b1 100644
--- a/metagpt/roles/architect.py
+++ b/metagpt/roles/architect.py
@@ -23,11 +23,11 @@ class Architect(Role):
"""
def __init__(
- self,
- name: str = "Bob",
- profile: str = "Architect",
- goal: str = "Design a concise, usable, complete python system",
- constraints: str = "Try to specify good open source tools as much as possible",
+ self,
+ name: str = "Bob",
+ profile: str = "Architect",
+ goal: str = "Design a concise, usable, complete python system",
+ constraints: str = "Try to specify good open source tools as much as possible",
) -> None:
"""Initializes the Architect with given attributes."""
super().__init__(name, profile, goal, constraints)
diff --git a/metagpt/roles/customer_service.py b/metagpt/roles/customer_service.py
index 4aae7cb03..4547f8190 100644
--- a/metagpt/roles/customer_service.py
+++ b/metagpt/roles/customer_service.py
@@ -32,3 +32,4 @@ class CustomerService(Sales):
store=None
):
super().__init__(name, profile, desc=desc, store=store)
+
\ No newline at end of file
diff --git a/metagpt/roles/engineer.py b/metagpt/roles/engineer.py
index d1f11475f..6d65575a8 100644
--- a/metagpt/roles/engineer.py
+++ b/metagpt/roles/engineer.py
@@ -61,13 +61,13 @@ class Engineer(Role):
"""
def __init__(
- self,
- name: str = "Alex",
- profile: str = "Engineer",
- goal: str = "Write elegant, readable, extensible, efficient code",
- constraints: str = "The code should conform to standards like PEP8 and be modular and maintainable",
- n_borg: int = 1,
- use_code_review: bool = False,
+ self,
+ name: str = "Alex",
+ profile: str = "Engineer",
+ goal: str = "Write elegant, readable, extensible, efficient code",
+ constraints: str = "The code should conform to standards like PEP8 and be modular and maintainable",
+ n_borg: int = 1,
+ use_code_review: bool = False,
) -> None:
"""Initializes the Engineer role with given attributes."""
super().__init__(name, profile, goal, constraints)
diff --git a/metagpt/roles/product_manager.py b/metagpt/roles/product_manager.py
index dbf3b5f0f..a58ea5385 100644
--- a/metagpt/roles/product_manager.py
+++ b/metagpt/roles/product_manager.py
@@ -21,11 +21,11 @@ class ProductManager(Role):
"""
def __init__(
- self,
- name: str = "Alice",
- profile: str = "Product Manager",
- goal: str = "Efficiently create a successful product",
- constraints: str = "",
+ self,
+ name: str = "Alice",
+ profile: str = "Product Manager",
+ goal: str = "Efficiently create a successful product",
+ constraints: str = "",
) -> None:
"""
Initializes the ProductManager role with given attributes.
diff --git a/metagpt/roles/project_manager.py b/metagpt/roles/project_manager.py
index 0706b982f..7e7c5699d 100644
--- a/metagpt/roles/project_manager.py
+++ b/metagpt/roles/project_manager.py
@@ -22,11 +22,11 @@ class ProjectManager(Role):
"""
def __init__(
- self,
- name: str = "Eve",
- profile: str = "Project Manager",
- goal: str = "Improve team efficiency and deliver with quality and quantity",
- constraints: str = "",
+ self,
+ name: str = "Eve",
+ profile: str = "Project Manager",
+ goal: str = "Improve team efficiency and deliver with quality and quantity",
+ constraints: str = "",
) -> None:
"""
Initializes the ProjectManager role with given attributes.
diff --git a/metagpt/roles/prompt.py b/metagpt/roles/prompt.py
index fdfe45c02..c22e0226b 100644
--- a/metagpt/roles/prompt.py
+++ b/metagpt/roles/prompt.py
@@ -23,7 +23,6 @@ SUFFIX = """Let's begin!
Question: {input}
Thoughts: {agent_scratchpad}"""
-
class PromptString(Enum):
REFLECTION_QUESTIONS = "Here are some statements:\n{memory_descriptions}\n\nBased solely on the information above, what are the 3 most prominent high-level questions we can answer about the topic in the statements?\n\n{format_instructions}"
diff --git a/metagpt/roles/qa_engineer.py b/metagpt/roles/qa_engineer.py
index 97a4d3c13..a763c2ce8 100644
--- a/metagpt/roles/qa_engineer.py
+++ b/metagpt/roles/qa_engineer.py
@@ -26,12 +26,12 @@ from metagpt.utils.special_tokens import FILENAME_CODE_SEP, MSG_SEP
class QaEngineer(Role):
def __init__(
- self,
- name="Edward",
- profile="QaEngineer",
- goal="Write comprehensive and robust tests to ensure codes will work as expected without bugs",
- constraints="The test code you write should conform to code standard like PEP8, be modular, easy to read and maintain",
- test_round_allowed=5,
+ self,
+ name="Edward",
+ profile="QaEngineer",
+ goal="Write comprehensive and robust tests to ensure codes will work as expected without bugs",
+ constraints="The test code you write should conform to code standard like PEP8, be modular, easy to read and maintain",
+ test_round_allowed=5,
):
super().__init__(name, profile, goal, constraints)
self._init_actions(
diff --git a/metagpt/roles/researcher.py b/metagpt/roles/researcher.py
index cdda49075..acb46c718 100644
--- a/metagpt/roles/researcher.py
+++ b/metagpt/roles/researcher.py
@@ -21,13 +21,13 @@ class Report(BaseModel):
class Researcher(Role):
def __init__(
- self,
- name: str = "David",
- profile: str = "Researcher",
- goal: str = "Gather information and conduct research",
- constraints: str = "Ensure accuracy and relevance of information",
- language: str = "en-us",
- **kwargs,
+ self,
+ name: str = "David",
+ profile: str = "Researcher",
+ goal: str = "Gather information and conduct research",
+ constraints: str = "Ensure accuracy and relevance of information",
+ language: str = "en-us",
+ **kwargs,
):
super().__init__(name, profile, goal, constraints, **kwargs)
self._init_actions([CollectLinks(name), WebBrowseAndSummarize(name), ConductResearch(name)])
@@ -93,10 +93,8 @@ class Researcher(Role):
if __name__ == "__main__":
import fire
-
async def main(topic: str, language="en-us"):
role = Researcher(topic, language=language)
await role.run(topic)
-
fire.Fire(main)
diff --git a/metagpt/roles/sales.py b/metagpt/roles/sales.py
index 51b13f487..a45ad6f1b 100644
--- a/metagpt/roles/sales.py
+++ b/metagpt/roles/sales.py
@@ -32,3 +32,4 @@ class Sales(Role):
else:
action = SearchAndSummarize()
self._init_actions([action])
+
\ No newline at end of file
diff --git a/metagpt/roles/seacher.py b/metagpt/roles/seacher.py
index 1b786f830..0b6e089da 100644
--- a/metagpt/roles/seacher.py
+++ b/metagpt/roles/seacher.py
@@ -23,13 +23,13 @@ class Searcher(Role):
constraints (str): Constraints or limitations for the searcher.
engine (SearchEngineType): The type of search engine to use.
"""
-
- def __init__(self,
- name: str = 'Alice',
- profile: str = 'Smart Assistant',
+
+ def __init__(self,
+ name: str = 'Alice',
+ profile: str = 'Smart Assistant',
goal: str = 'Provide search services for users',
- constraints: str = 'Answer is rich and complete',
- engine=SearchEngineType.SERPAPI_GOOGLE,
+ constraints: str = 'Answer is rich and complete',
+ engine=SearchEngineType.SERPAPI_GOOGLE,
**kwargs) -> None:
"""
Initializes the Searcher role with given attributes.
@@ -53,7 +53,7 @@ class Searcher(Role):
"""Performs the search action in a single process."""
logger.info(f"{self._setting}: ready to {self._rc.todo}")
response = await self._rc.todo.run(self._rc.memory.get(k=0))
-
+
if isinstance(response, ActionOutput):
msg = Message(content=response.content, instruct_content=response.instruct_content,
role=self.profile, cause_by=type(self._rc.todo))
diff --git a/metagpt/roles/sk_agent.py b/metagpt/roles/sk_agent.py
index 05723cc80..b27841d74 100644
--- a/metagpt/roles/sk_agent.py
+++ b/metagpt/roles/sk_agent.py
@@ -29,12 +29,12 @@ class SkAgent(Role):
"""
def __init__(
- self,
- name: str = "Sunshine",
- profile: str = "sk_agent",
- goal: str = "Execute task based on passed in task description",
- constraints: str = "",
- planner_cls=BasicPlanner,
+ self,
+ name: str = "Sunshine",
+ profile: str = "sk_agent",
+ goal: str = "Execute task based on passed in task description",
+ constraints: str = "",
+ planner_cls=BasicPlanner,
) -> None:
"""Initializes the Engineer role with given attributes."""
super().__init__(name, profile, goal, constraints)
diff --git a/metagpt/roles/tutorial_assistant.py b/metagpt/roles/tutorial_assistant.py
index 19327a6d4..9a7df4f4d 100644
--- a/metagpt/roles/tutorial_assistant.py
+++ b/metagpt/roles/tutorial_assistant.py
@@ -29,12 +29,12 @@ class TutorialAssistant(Role):
"""
def __init__(
- self,
- name: str = "Stitch",
- profile: str = "Tutorial Assistant",
- goal: str = "Generate tutorial documents",
- constraints: str = "Strictly follow Markdown's syntax, with neat and standardized layout",
- language: str = "Chinese",
+ self,
+ name: str = "Stitch",
+ profile: str = "Tutorial Assistant",
+ goal: str = "Generate tutorial documents",
+ constraints: str = "Strictly follow Markdown's syntax, with neat and standardized layout",
+ language: str = "Chinese",
):
super().__init__(name, profile, goal, constraints)
self._init_actions([WriteDirectory(language=language)])
diff --git a/metagpt/schema.py b/metagpt/schema.py
index 7d91d87ec..bdca093c2 100644
--- a/metagpt/schema.py
+++ b/metagpt/schema.py
@@ -50,7 +50,6 @@ class UserMessage(Message):
"""便于支持OpenAI的消息
Facilitate support for OpenAI messages
"""
-
def __init__(self, content: str):
super().__init__(content, 'user')
@@ -60,7 +59,6 @@ class SystemMessage(Message):
"""便于支持OpenAI的消息
Facilitate support for OpenAI messages
"""
-
def __init__(self, content: str):
super().__init__(content, 'system')
@@ -70,7 +68,6 @@ class AIMessage(Message):
"""便于支持OpenAI的消息
Facilitate support for OpenAI messages
"""
-
def __init__(self, content: str):
super().__init__(content, 'assistant')
diff --git a/metagpt/skills/WriterSkill/Brainstorm/config.json b/metagpt/skills/WriterSkill/Brainstorm/config.json
index fa2cf3314..f50a354e7 100644
--- a/metagpt/skills/WriterSkill/Brainstorm/config.json
+++ b/metagpt/skills/WriterSkill/Brainstorm/config.json
@@ -8,9 +8,7 @@
"top_p": 1.0,
"presence_penalty": 0.0,
"frequency_penalty": 0.0,
- "stop_sequences": [
- "##END##"
- ]
+ "stop_sequences": ["##END##"]
},
"input": {
"parameters": [
diff --git a/metagpt/software_company.py b/metagpt/software_company.py
index 8f173ebf3..b2bd18c58 100644
--- a/metagpt/software_company.py
+++ b/metagpt/software_company.py
@@ -59,3 +59,4 @@ class SoftwareCompany(BaseModel):
self._check_balance()
await self.environment.run()
return self.environment.history
+
\ No newline at end of file
diff --git a/metagpt/tools/__init__.py b/metagpt/tools/__init__.py
index d5cef0e46..d98087e4b 100644
--- a/metagpt/tools/__init__.py
+++ b/metagpt/tools/__init__.py
@@ -6,6 +6,7 @@
@File : __init__.py
"""
+
from enum import Enum
diff --git a/metagpt/tools/code_interpreter.py b/metagpt/tools/code_interpreter.py
index cb6230bab..97398ccfd 100644
--- a/metagpt/tools/code_interpreter.py
+++ b/metagpt/tools/code_interpreter.py
@@ -1,16 +1,16 @@
-import inspect
import re
-import textwrap
-from pathlib import Path
from typing import List, Callable
+from pathlib import Path
import wrapt
+import textwrap
+import inspect
from interpreter.interpreter import Interpreter
-from metagpt.actions.clone_function import CloneFunction, run_function_code, run_function_script
-from metagpt.config import CONFIG
from metagpt.logs import logger
+from metagpt.config import CONFIG
from metagpt.utils.highlight import highlight
+from metagpt.actions.clone_function import CloneFunction, run_function_code, run_function_script
def extract_python_code(code: str):
@@ -36,7 +36,6 @@ def extract_python_code(code: str):
class OpenCodeInterpreter(object):
"""https://github.com/KillianLucas/open-interpreter"""
-
def __init__(self, auto_run: bool = True) -> None:
interpreter = Interpreter()
interpreter.auto_run = auto_run
@@ -127,5 +126,4 @@ class OpenInterpreterDecorator(object):
except Exception as e:
raise Exception("Could not evaluate Python code", e)
return res
-
return wrapper(wrapped)
diff --git a/metagpt/tools/prompt_writer.py b/metagpt/tools/prompt_writer.py
index 35358307e..d90599206 100644
--- a/metagpt/tools/prompt_writer.py
+++ b/metagpt/tools/prompt_writer.py
@@ -10,7 +10,6 @@ from typing import Union
class GPTPromptGenerator:
"""Using LLM, given an output, request LLM to provide input (supporting instruction, chatbot, and query styles)"""
-
def __init__(self):
self._generators = {i: getattr(self, f"gen_{i}_style") for i in ['instruction', 'chatbot', 'query']}
diff --git a/metagpt/tools/sd_engine.py b/metagpt/tools/sd_engine.py
index 4e40951bc..1d9cd0b2a 100644
--- a/metagpt/tools/sd_engine.py
+++ b/metagpt/tools/sd_engine.py
@@ -10,8 +10,8 @@ import os
from os.path import join
from typing import List
-from PIL import Image, PngImagePlugin
from aiohttp import ClientSession
+from PIL import Image, PngImagePlugin
from metagpt.config import Config
from metagpt.const import WORKSPACE_ROOT
@@ -64,12 +64,12 @@ class SDEngine:
logger.info(self.sd_t2i_url)
def construct_payload(
- self,
- prompt,
- negtive_prompt=default_negative_prompt,
- width=512,
- height=512,
- sd_model="galaxytimemachinesGTM_photoV20",
+ self,
+ prompt,
+ negtive_prompt=default_negative_prompt,
+ width=512,
+ height=512,
+ sd_model="galaxytimemachinesGTM_photoV20",
):
# Configure the payload with provided inputs
self.payload["prompt"] = prompt
@@ -120,13 +120,11 @@ def decode_base64_to_image(img, save_name):
image.save(f"{save_name}.png", pnginfo=pnginfo)
return pnginfo, image
-
def batch_decode_base64_to_image(imgs, save_dir="", save_name=""):
for idx, _img in enumerate(imgs):
save_name = join(save_dir, save_name)
decode_base64_to_image(_img, save_name=save_name)
-
if __name__ == "__main__":
engine = SDEngine()
prompt = "pixel style, game design, a game interface should be minimalistic and intuitive with the score and high score displayed at the top. The snake and its food should be easily distinguishable. The game should have a simple color scheme, with a contrasting color for the snake and its food. Complete interface boundary"
diff --git a/metagpt/tools/search_engine.py b/metagpt/tools/search_engine.py
index e87519291..942ef7edd 100644
--- a/metagpt/tools/search_engine.py
+++ b/metagpt/tools/search_engine.py
@@ -20,7 +20,7 @@ class SkSearchEngine:
@sk_function(
description="searches results from Google. Useful when you need to find short "
- "and succinct answers about a specific topic. Input should be a search query.",
+ "and succinct answers about a specific topic. Input should be a search query.",
name="searchAsync",
input_description="search",
)
@@ -42,7 +42,7 @@ class SearchEngine:
"""
def __init__(
- self,
+ self,
engine: Optional[SearchEngineType] = None,
run_func: Callable[[str, int, bool], Coroutine[None, None, Union[str, list[str]]]] = None,
):
@@ -68,19 +68,19 @@ class SearchEngine:
@overload
def run(
- self,
- query: str,
- max_results: int = 8,
- as_string: Literal[True] = True,
+ self,
+ query: str,
+ max_results: int = 8,
+ as_string: Literal[True] = True,
) -> str:
...
@overload
def run(
- self,
- query: str,
- max_results: int = 8,
- as_string: Literal[False] = False,
+ self,
+ query: str,
+ max_results: int = 8,
+ as_string: Literal[False] = False,
) -> list[dict[str, str]]:
...
diff --git a/metagpt/tools/search_engine_ddg.py b/metagpt/tools/search_engine_ddg.py
index 320a8c621..57bc61b82 100644
--- a/metagpt/tools/search_engine_ddg.py
+++ b/metagpt/tools/search_engine_ddg.py
@@ -25,10 +25,10 @@ class DDGAPIWrapper:
"""
def __init__(
- self,
- *,
- loop: asyncio.AbstractEventLoop | None = None,
- executor: futures.Executor | None = None,
+ self,
+ *,
+ loop: asyncio.AbstractEventLoop | None = None,
+ executor: futures.Executor | None = None,
):
kwargs = {}
if CONFIG.global_proxy:
@@ -39,29 +39,29 @@ class DDGAPIWrapper:
@overload
def run(
- self,
- query: str,
- max_results: int = 8,
- as_string: Literal[True] = True,
- focus: list[str] | None = None,
+ self,
+ query: str,
+ max_results: int = 8,
+ as_string: Literal[True] = True,
+ focus: list[str] | None = None,
) -> str:
...
@overload
def run(
- self,
- query: str,
- max_results: int = 8,
- as_string: Literal[False] = False,
- focus: list[str] | None = None,
+ self,
+ query: str,
+ max_results: int = 8,
+ as_string: Literal[False] = False,
+ focus: list[str] | None = None,
) -> list[dict[str, str]]:
...
async def run(
- self,
- query: str,
- max_results: int = 8,
- as_string: bool = True,
+ self,
+ query: str,
+ max_results: int = 8,
+ as_string: bool = True,
) -> str | list[dict]:
"""Return the results of a Google search using the official Google API
diff --git a/metagpt/tools/search_engine_googleapi.py b/metagpt/tools/search_engine_googleapi.py
index 126067b10..b9faf2ced 100644
--- a/metagpt/tools/search_engine_googleapi.py
+++ b/metagpt/tools/search_engine_googleapi.py
@@ -76,11 +76,11 @@ class GoogleAPIWrapper(BaseModel):
return service.cse()
async def run(
- self,
- query: str,
- max_results: int = 8,
- as_string: bool = True,
- focus: list[str] | None = None,
+ self,
+ query: str,
+ max_results: int = 8,
+ as_string: bool = True,
+ focus: list[str] | None = None,
) -> str | list[dict]:
"""Return the results of a Google search using the official Google API.
diff --git a/metagpt/tools/translator.py b/metagpt/tools/translator.py
index 2e9756abe..910638469 100644
--- a/metagpt/tools/translator.py
+++ b/metagpt/tools/translator.py
@@ -24,4 +24,4 @@ class Translator:
@classmethod
def translate_prompt(cls, original, lang='中文'):
- return prompt.format(LANG=lang, ORIGINAL=original)
+ return prompt.format(LANG=lang, ORIGINAL=original)
\ No newline at end of file
diff --git a/metagpt/tools/web_browser_engine.py b/metagpt/tools/web_browser_engine.py
index 76074aa5e..453d87f31 100644
--- a/metagpt/tools/web_browser_engine.py
+++ b/metagpt/tools/web_browser_engine.py
@@ -12,9 +12,9 @@ from metagpt.utils.parse_html import WebPage
class WebBrowserEngine:
def __init__(
- self,
- engine: WebBrowserEngineType | None = None,
- run_func: Callable[..., Coroutine[Any, Any, WebPage | list[WebPage]]] | None = None,
+ self,
+ engine: WebBrowserEngineType | None = None,
+ run_func: Callable[..., Coroutine[Any, Any, WebPage | list[WebPage]]] | None = None,
):
engine = engine or CONFIG.web_browser_engine
@@ -46,9 +46,7 @@ class WebBrowserEngine:
if __name__ == "__main__":
import fire
-
async def main(url: str, *urls: str, engine_type: Literal["playwright", "selenium"] = "playwright", **kwargs):
return await WebBrowserEngine(WebBrowserEngineType(engine_type), **kwargs).run(url, *urls)
-
fire.Fire(main)
diff --git a/metagpt/tools/web_browser_engine_playwright.py b/metagpt/tools/web_browser_engine_playwright.py
index dd9782c6f..030e7701b 100644
--- a/metagpt/tools/web_browser_engine_playwright.py
+++ b/metagpt/tools/web_browser_engine_playwright.py
@@ -23,10 +23,10 @@ class PlaywrightWrapper:
"""
def __init__(
- self,
- browser_type: Literal["chromium", "firefox", "webkit"] | None = None,
- launch_kwargs: dict | None = None,
- **kwargs,
+ self,
+ browser_type: Literal["chromium", "firefox", "webkit"] | None = None,
+ launch_kwargs: dict | None = None,
+ **kwargs,
) -> None:
if browser_type is None:
browser_type = CONFIG.playwright_browser_type
@@ -139,12 +139,11 @@ async def _log_stream(sr, log_func):
_install_lock: asyncio.Lock = None
_install_cache = set()
+
if __name__ == "__main__":
import fire
-
async def main(url: str, *urls: str, browser_type: str = "chromium", **kwargs):
return await PlaywrightWrapper(browser_type, **kwargs).run(url, *urls)
-
fire.Fire(main)
diff --git a/metagpt/tools/web_browser_engine_selenium.py b/metagpt/tools/web_browser_engine_selenium.py
index 64fdc0522..d727709b8 100644
--- a/metagpt/tools/web_browser_engine_selenium.py
+++ b/metagpt/tools/web_browser_engine_selenium.py
@@ -28,12 +28,12 @@ class SeleniumWrapper:
"""
def __init__(
- self,
- browser_type: Literal["chrome", "firefox", "edge", "ie"] | None = None,
- launch_kwargs: dict | None = None,
- *,
- loop: asyncio.AbstractEventLoop | None = None,
- executor: futures.Executor | None = None,
+ self,
+ browser_type: Literal["chrome", "firefox", "edge", "ie"] | None = None,
+ launch_kwargs: dict | None = None,
+ *,
+ loop: asyncio.AbstractEventLoop | None = None,
+ executor: futures.Executor | None = None,
) -> None:
if browser_type is None:
browser_type = CONFIG.selenium_browser_type
@@ -117,9 +117,7 @@ def _gen_get_driver_func(browser_type, *args, executable_path=None):
if __name__ == "__main__":
import fire
-
async def main(url: str, *urls: str, browser_type: str = "chrome", **kwargs):
return await SeleniumWrapper(browser_type, **kwargs).run(url, *urls)
-
fire.Fire(main)
diff --git a/metagpt/utils/__init__.py b/metagpt/utils/__init__.py
index ac78a6c85..f13175cf8 100644
--- a/metagpt/utils/__init__.py
+++ b/metagpt/utils/__init__.py
@@ -14,6 +14,7 @@ from metagpt.utils.token_counter import (
count_string_tokens,
)
+
__all__ = [
"read_docx",
"Singleton",
diff --git a/metagpt/utils/custom_decoder.py b/metagpt/utils/custom_decoder.py
index 2a274564c..373d16356 100644
--- a/metagpt/utils/custom_decoder.py
+++ b/metagpt/utils/custom_decoder.py
@@ -36,11 +36,11 @@ def py_make_scanner(context):
return parse_object((string, idx + 1), strict, _scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == "[":
return parse_array((string, idx + 1), _scan_once)
- elif nextchar == "n" and string[idx: idx + 4] == "null":
+ elif nextchar == "n" and string[idx : idx + 4] == "null":
return None, idx + 4
- elif nextchar == "t" and string[idx: idx + 4] == "true":
+ elif nextchar == "t" and string[idx : idx + 4] == "true":
return True, idx + 4
- elif nextchar == "f" and string[idx: idx + 5] == "false":
+ elif nextchar == "f" and string[idx : idx + 5] == "false":
return False, idx + 5
m = match_number(string, idx)
@@ -51,11 +51,11 @@ def py_make_scanner(context):
else:
res = parse_int(integer)
return res, m.end()
- elif nextchar == "N" and string[idx: idx + 3] == "NaN":
+ elif nextchar == "N" and string[idx : idx + 3] == "NaN":
return parse_constant("NaN"), idx + 3
- elif nextchar == "I" and string[idx: idx + 8] == "Infinity":
+ elif nextchar == "I" and string[idx : idx + 8] == "Infinity":
return parse_constant("Infinity"), idx + 8
- elif nextchar == "-" and string[idx: idx + 9] == "-Infinity":
+ elif nextchar == "-" and string[idx : idx + 9] == "-Infinity":
return parse_constant("-Infinity"), idx + 9
else:
raise StopIteration(idx)
@@ -89,7 +89,7 @@ WHITESPACE_STR = " \t\n\r"
def JSONObject(
- s_and_end, strict, scan_once, object_hook, object_pairs_hook, memo=None, _w=WHITESPACE.match, _ws=WHITESPACE_STR
+ s_and_end, strict, scan_once, object_hook, object_pairs_hook, memo=None, _w=WHITESPACE.match, _ws=WHITESPACE_STR
):
"""Parse a JSON object from a string and return the parsed object.
@@ -118,12 +118,12 @@ def JSONObject(
memo_get = memo.setdefault
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
- nextchar = s[end: end + 1]
+ nextchar = s[end : end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"' and nextchar != "'":
if nextchar in _ws:
end = _w(s, end).end()
- nextchar = s[end: end + 1]
+ nextchar = s[end : end + 1]
# Trivial empty object
if nextchar == "}":
if object_pairs_hook is not None:
@@ -146,9 +146,9 @@ def JSONObject(
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
- if s[end: end + 1] != ":":
+ if s[end : end + 1] != ":":
end = _w(s, end).end()
- if s[end: end + 1] != ":":
+ if s[end : end + 1] != ":":
raise JSONDecodeError("Expecting ':' delimiter", s, end)
end += 1
@@ -179,7 +179,7 @@ def JSONObject(
elif nextchar != ",":
raise JSONDecodeError("Expecting ',' delimiter", s, end - 1)
end = _w(s, end).end()
- nextchar = s[end: end + 1]
+ nextchar = s[end : end + 1]
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name enclosed in double quotes", s, end - 1)
@@ -257,7 +257,7 @@ def py_scanstring(s, end, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match, delim
else:
uni = _decode_uXXXX(s, end)
end += 5
- if 0xD800 <= uni <= 0xDBFF and s[end: end + 2] == "\\u":
+ if 0xD800 <= uni <= 0xDBFF and s[end : end + 2] == "\\u":
uni2 = _decode_uXXXX(s, end + 1)
if 0xDC00 <= uni2 <= 0xDFFF:
uni = 0x10000 + (((uni - 0xD800) << 10) | (uni2 - 0xDC00))
@@ -272,14 +272,14 @@ scanstring = py_scanstring
class CustomDecoder(json.JSONDecoder):
def __init__(
- self,
- *,
- object_hook=None,
- parse_float=None,
- parse_int=None,
- parse_constant=None,
- strict=True,
- object_pairs_hook=None
+ self,
+ *,
+ object_hook=None,
+ parse_float=None,
+ parse_int=None,
+ parse_constant=None,
+ strict=True,
+ object_pairs_hook=None
):
super().__init__(
object_hook=object_hook,
diff --git a/metagpt/utils/file.py b/metagpt/utils/file.py
index f7c9f2894..f3691549b 100644
--- a/metagpt/utils/file.py
+++ b/metagpt/utils/file.py
@@ -6,9 +6,8 @@
@File : file.py
@Describe : General file operations.
"""
-from pathlib import Path
-
import aiofiles
+from pathlib import Path
from metagpt.logs import logger
@@ -73,3 +72,4 @@ class File:
except Exception as e:
logger.error(f"Error reading file: {e}")
raise e
+
diff --git a/metagpt/utils/highlight.py b/metagpt/utils/highlight.py
index a7f8e7c7a..e6cbb228c 100644
--- a/metagpt/utils/highlight.py
+++ b/metagpt/utils/highlight.py
@@ -1,7 +1,7 @@
# 添加代码语法高亮显示
from pygments import highlight as highlight_
-from pygments.formatters import TerminalFormatter, HtmlFormatter
from pygments.lexers import PythonLexer, SqlLexer
+from pygments.formatters import TerminalFormatter, HtmlFormatter
def highlight(code: str, language: str = 'python', formatter: str = 'terminal'):
diff --git a/metagpt/utils/mermaid.py b/metagpt/utils/mermaid.py
index 2d8d3aed6..5e5b275b0 100644
--- a/metagpt/utils/mermaid.py
+++ b/metagpt/utils/mermaid.py
@@ -135,6 +135,7 @@ MMC2 = """sequenceDiagram
S-->>SE: return summary
SE-->>M: return summary"""
+
if __name__ == "__main__":
loop = asyncio.new_event_loop()
result = loop.run_until_complete(mermaid_to_file(MMC1, PROJECT_ROOT / f"{CONFIG.mermaid_engine}/1"))
diff --git a/metagpt/utils/mmdc_ink.py b/metagpt/utils/mmdc_ink.py
index 45cc2af52..3d91cde9d 100644
--- a/metagpt/utils/mmdc_ink.py
+++ b/metagpt/utils/mmdc_ink.py
@@ -6,9 +6,9 @@
@File : mermaid.py
"""
import base64
+import os
-from aiohttp import ClientSession, ClientError
-
+from aiohttp import ClientSession,ClientError
from metagpt.logs import logger
diff --git a/metagpt/utils/mmdc_playwright.py b/metagpt/utils/mmdc_playwright.py
index 5fef3708b..bdbfd82ff 100644
--- a/metagpt/utils/mmdc_playwright.py
+++ b/metagpt/utils/mmdc_playwright.py
@@ -8,13 +8,10 @@
import os
from urllib.parse import urljoin
-
from playwright.async_api import async_playwright
-
from metagpt.logs import logger
-
-async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048, height=2048) -> int:
+async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048, height=2048)-> int:
"""
Converts the given Mermaid code to various output formats and saves them to files.
@@ -27,21 +24,20 @@ async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048,
Returns:
int: Returns 1 if the conversion and saving were successful, -1 otherwise.
"""
- suffixes = ['png', 'svg', 'pdf']
+ suffixes=['png', 'svg', 'pdf']
__dirname = os.path.dirname(os.path.abspath(__file__))
async with async_playwright() as p:
browser = await p.chromium.launch()
device_scale_factor = 1.0
context = await browser.new_context(
- viewport={'width': width, 'height': height},
- device_scale_factor=device_scale_factor,
- )
+ viewport={'width': width, 'height': height},
+ device_scale_factor=device_scale_factor,
+ )
page = await context.new_page()
async def console_message(msg):
logger.info(msg.text)
-
page.on('console', console_message)
try:
@@ -76,7 +72,7 @@ async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048,
}''', [mermaid_code, mermaid_config, my_css, background_color])
- if 'svg' in suffixes:
+ if 'svg' in suffixes :
svg_xml = await page.evaluate('''() => {
const svg = document.querySelector('svg');
const xmlSerializer = new XMLSerializer();
@@ -86,7 +82,7 @@ async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048,
with open(f'{output_file_without_suffix}.svg', 'wb') as f:
f.write(svg_xml.encode('utf-8'))
- if 'png' in suffixes:
+ if 'png' in suffixes:
clip = await page.evaluate('''() => {
const svg = document.querySelector('svg');
const rect = svg.getBoundingClientRect();
diff --git a/metagpt/utils/mmdc_pyppeteer.py b/metagpt/utils/mmdc_pyppeteer.py
index 690a26eb8..7ec30fd12 100644
--- a/metagpt/utils/mmdc_pyppeteer.py
+++ b/metagpt/utils/mmdc_pyppeteer.py
@@ -7,14 +7,11 @@
"""
import os
from urllib.parse import urljoin
-
from pyppeteer import launch
-
-from metagpt.config import CONFIG
from metagpt.logs import logger
+from metagpt.config import CONFIG
-
-async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048, height=2048) -> int:
+async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048, height=2048)-> int:
"""
Converts the given Mermaid code to various output formats and saves them to files.
@@ -27,14 +24,15 @@ async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048,
Returns:
int: Returns 1 if the conversion and saving were successful, -1 otherwise.
"""
- suffixes = ['png', 'svg', 'pdf']
+ suffixes = ['png', 'svg', 'pdf']
__dirname = os.path.dirname(os.path.abspath(__file__))
+
if CONFIG.pyppeteer_executable_path:
browser = await launch(headless=True,
- executablePath=CONFIG.pyppeteer_executable_path,
- args=['--disable-extensions', "--no-sandbox"]
- )
+ executablePath=CONFIG.pyppeteer_executable_path,
+ args=['--disable-extensions',"--no-sandbox"]
+ )
else:
logger.error("Please set the environment variable:PYPPETEER_EXECUTABLE_PATH.")
return -1
@@ -43,7 +41,6 @@ async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048,
async def console_message(msg):
logger.info(msg.text)
-
page.on('console', console_message)
try:
@@ -76,7 +73,7 @@ async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048,
}
}''', [mermaid_code, mermaid_config, my_css, background_color])
- if 'svg' in suffixes:
+ if 'svg' in suffixes :
svg_xml = await page.evaluate('''() => {
const svg = document.querySelector('svg');
const xmlSerializer = new XMLSerializer();
@@ -86,7 +83,7 @@ async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048,
with open(f'{output_file_without_suffix}.svg', 'wb') as f:
f.write(svg_xml.encode('utf-8'))
- if 'png' in suffixes:
+ if 'png' in suffixes:
clip = await page.evaluate('''() => {
const svg = document.querySelector('svg');
const rect = svg.getBoundingClientRect();
@@ -97,8 +94,7 @@ async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048,
height: Math.ceil(rect.height)
};
}''')
- await page.setViewport({'width': clip['x'] + clip['width'], 'height': clip['y'] + clip['height'],
- 'deviceScaleFactor': device_scale_factor})
+ await page.setViewport({'width': clip['x'] + clip['width'], 'height': clip['y'] + clip['height'], 'deviceScaleFactor': device_scale_factor})
screenshot = await page.screenshot(clip=clip, omit_background=True, scale='device')
logger.info(f"Generating {output_file_without_suffix}.png..")
with open(f'{output_file_without_suffix}.png', 'wb') as f:
@@ -114,3 +110,4 @@ async def mermaid_to_file(mermaid_code, output_file_without_suffix, width=2048,
return -1
finally:
await browser.close()
+
diff --git a/metagpt/utils/parse_html.py b/metagpt/utils/parse_html.py
index f2395026f..62de26541 100644
--- a/metagpt/utils/parse_html.py
+++ b/metagpt/utils/parse_html.py
@@ -16,7 +16,7 @@ class WebPage(BaseModel):
class Config:
underscore_attrs_are_private = True
- _soup: Optional[BeautifulSoup] = None
+ _soup : Optional[BeautifulSoup] = None
_title: Optional[str] = None
@property
@@ -24,7 +24,7 @@ class WebPage(BaseModel):
if self._soup is None:
self._soup = BeautifulSoup(self.html, "html.parser")
return self._soup
-
+
@property
def title(self):
if self._title is None:
diff --git a/metagpt/utils/pycst.py b/metagpt/utils/pycst.py
index 4d1a86c91..afd85a547 100644
--- a/metagpt/utils/pycst.py
+++ b/metagpt/utils/pycst.py
@@ -37,12 +37,12 @@ def get_docstring_statement(body: DocstringNode) -> cst.SimpleStatementLine:
if not isinstance(expr, cst.Expr):
return None
-
+
val = expr.value
if not isinstance(val, (cst.SimpleString, cst.ConcatenatedString)):
return None
-
- evaluated_value = val.evaluated_value
+
+ evaluated_value = val.evaluated_value
if isinstance(evaluated_value, bytes):
return None
@@ -56,7 +56,6 @@ class DocstringCollector(cst.CSTVisitor):
stack: A list to keep track of the current path in the CST.
docstrings: A dictionary mapping paths in the CST to their corresponding docstrings.
"""
-
def __init__(self):
self.stack: list[str] = []
self.docstrings: dict[tuple[str, ...], cst.SimpleStatementLine] = {}
@@ -97,10 +96,9 @@ class DocstringTransformer(cst.CSTTransformer):
stack: A list to keep track of the current path in the CST.
docstrings: A dictionary mapping paths in the CST to their corresponding docstrings.
"""
-
def __init__(
- self,
- docstrings: dict[tuple[str, ...], cst.SimpleStatementLine],
+ self,
+ docstrings: dict[tuple[str, ...], cst.SimpleStatementLine],
):
self.stack: list[str] = []
self.docstrings = docstrings
@@ -127,8 +125,7 @@ class DocstringTransformer(cst.CSTTransformer):
key = tuple(self.stack)
self.stack.pop()
- if hasattr(updated_node, "decorators") and any(
- (i.decorator.value == "overload") for i in updated_node.decorators):
+ if hasattr(updated_node, "decorators") and any((i.decorator.value == "overload") for i in updated_node.decorators):
return updated_node
statement = self.docstrings.get(key)
diff --git a/metagpt/utils/read_document.py b/metagpt/utils/read_document.py
index d2fafbc17..c837baf25 100644
--- a/metagpt/utils/read_document.py
+++ b/metagpt/utils/read_document.py
@@ -8,7 +8,6 @@
import docx
-
def read_docx(file_path: str) -> list:
"""Open a docx file"""
doc = docx.Document(file_path)
diff --git a/metagpt/utils/singleton.py b/metagpt/utils/singleton.py
index a9e0862c0..474b537db 100644
--- a/metagpt/utils/singleton.py
+++ b/metagpt/utils/singleton.py
@@ -20,3 +20,4 @@ class Singleton(abc.ABCMeta, type):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
+
\ No newline at end of file
diff --git a/metagpt/utils/special_tokens.py b/metagpt/utils/special_tokens.py
index 5e780ce05..2adb93c77 100644
--- a/metagpt/utils/special_tokens.py
+++ b/metagpt/utils/special_tokens.py
@@ -1,4 +1,4 @@
# token to separate different code messages in a WriteCode Message content
-MSG_SEP = "#*000*#"
+MSG_SEP = "#*000*#"
# token to seperate file name and the actual code text in a code message
FILENAME_CODE_SEP = "#*001*#"
diff --git a/metagpt/utils/text.py b/metagpt/utils/text.py
index c36058e42..be3c52edd 100644
--- a/metagpt/utils/text.py
+++ b/metagpt/utils/text.py
@@ -3,8 +3,7 @@ from typing import Generator, Sequence
from metagpt.utils.token_counter import TOKEN_MAX, count_string_tokens
-def reduce_message_length(msgs: Generator[str, None, None], model_name: str, system_text: str,
- reserved: int = 0, ) -> str:
+def reduce_message_length(msgs: Generator[str, None, None], model_name: str, system_text: str, reserved: int = 0,) -> str:
"""Reduce the length of concatenated message segments to fit within the maximum token size.
Args:
@@ -28,11 +27,11 @@ def reduce_message_length(msgs: Generator[str, None, None], model_name: str, sys
def generate_prompt_chunk(
- text: str,
- prompt_template: str,
- model_name: str,
- system_text: str,
- reserved: int = 0,
+ text: str,
+ prompt_template: str,
+ model_name: str,
+ system_text: str,
+ reserved: int = 0,
) -> Generator[str, None, None]:
"""Split the text into chunks of a maximum token size.
@@ -50,9 +49,9 @@ def generate_prompt_chunk(
current_token = 0
current_lines = []
- reserved = reserved + count_string_tokens(prompt_template + system_text, model_name)
+ reserved = reserved + count_string_tokens(prompt_template+system_text, model_name)
# 100 is a magic number to ensure the maximum context length is not exceeded
- max_token = TOKEN_MAX.get(model_name, 2048) - reserved - 100
+ max_token = TOKEN_MAX.get(model_name, 2048) - reserved - 100
while paragraphs:
paragraph = paragraphs.pop(0)
@@ -104,7 +103,7 @@ def decode_unicode_escape(text: str) -> str:
return text.encode("utf-8").decode("unicode_escape", "ignore")
-def _split_by_count(lst: Sequence, count: int):
+def _split_by_count(lst: Sequence , count: int):
avg = len(lst) // count
remainder = len(lst) % count
start = 0
diff --git a/metagpt/utils/token_counter.py b/metagpt/utils/token_counter.py
index 2b9f21fb8..a5a65803a 100644
--- a/metagpt/utils/token_counter.py
+++ b/metagpt/utils/token_counter.py
@@ -24,6 +24,7 @@ TOKEN_COSTS = {
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
}
+
TOKEN_MAX = {
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-0301": 4096,
diff --git a/requirements.txt b/requirements.txt
index e855b6e83..562a653f3 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -44,3 +44,8 @@ ta==0.10.2
semantic-kernel==0.3.10.dev0
websocket-client==0.58.0
+
+aiofiles~=23.2.1
+pygments~=2.16.1
+requests~=2.31.0
+yaml~=0.2.5
\ No newline at end of file
diff --git a/startup.py b/startup.py
index df94aeaba..e2a903c9b 100644
--- a/startup.py
+++ b/startup.py
@@ -15,12 +15,12 @@ from metagpt.software_company import SoftwareCompany
async def startup(
- idea: str,
- investment: float = 3.0,
- n_round: int = 5,
- code_review: bool = False,
- run_tests: bool = False,
- implement: bool = True,
+ idea: str,
+ investment: float = 3.0,
+ n_round: int = 5,
+ code_review: bool = False,
+ run_tests: bool = False,
+ implement: bool = True,
):
"""Run a startup. Be a boss."""
company = SoftwareCompany()
@@ -48,12 +48,12 @@ async def startup(
def main(
- idea: str,
- investment: float = 3.0,
- n_round: int = 5,
- code_review: bool = True,
- run_tests: bool = False,
- implement: bool = True,
+ idea: str,
+ investment: float = 3.0,
+ n_round: int = 5,
+ code_review: bool = True,
+ run_tests: bool = False,
+ implement: bool = True,
):
"""
We are a software startup comprised of AI. By investing in us,
diff --git a/tests/conftest.py b/tests/conftest.py
index d2ac8304f..feecc7715 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -6,14 +6,14 @@
@File : conftest.py
"""
-import asyncio
-import re
from unittest.mock import Mock
import pytest
from metagpt.logs import logger
from metagpt.provider.openai_api import OpenAIGPTAPI as GPTAPI
+import asyncio
+import re
class Context:
diff --git a/tests/metagpt/actions/mock.py b/tests/metagpt/actions/mock.py
index fe82fe01e..a800690e8 100644
--- a/tests/metagpt/actions/mock.py
+++ b/tests/metagpt/actions/mock.py
@@ -159,6 +159,7 @@ sequenceDiagram
The original requirements did not specify whether the game should have a save/load feature, multiplayer support, or any specific graphical user interface. More information on these aspects could help in further refining the product design and requirements.
"""
+
PROJECT_MANAGEMENT_SAMPLE = '''## Required Python third-party packages: Provided in requirements.txt format
```python
"pytest==6.2.5"
@@ -216,6 +217,7 @@ The original requirements did not specify whether the game should have a save/lo
```
'''
+
WRITE_CODE_PROMPT_SAMPLE = """
你是一个工程师。下面是背景信息与你的当前任务,请为任务撰写代码。
撰写的代码应该符合PEP8,优雅,模块化,易于阅读与维护,代码本身应该有__main__入口来防止桩函数
@@ -373,6 +375,7 @@ if __name__ == '__main__':
print('No results found.')
"""
+
REFINED_CODE = '''
import requests
diff --git a/tests/metagpt/actions/test_clone_function.py b/tests/metagpt/actions/test_clone_function.py
index e11402c97..6d4432dcd 100644
--- a/tests/metagpt/actions/test_clone_function.py
+++ b/tests/metagpt/actions/test_clone_function.py
@@ -2,6 +2,7 @@ import pytest
from metagpt.actions.clone_function import CloneFunction, run_function_code
+
source_code = """
import pandas as pd
import ta
@@ -36,10 +37,7 @@ def get_expected_res():
stock_data['SMA'] = ta.trend.sma_indicator(stock_data['Close'], window=6)
stock_data[['Date', 'Close', 'SMA']].head()
# 计算布林带
- stock_data['bb_upper'], stock_data['bb_middle'], stock_data['bb_lower'] = ta.volatility.bollinger_hband_indicator(
- stock_data['Close'], window=20), ta.volatility.bollinger_mavg(stock_data['Close'],
- window=20), ta.volatility.bollinger_lband_indicator(
- stock_data['Close'], window=20)
+ stock_data['bb_upper'], stock_data['bb_middle'], stock_data['bb_lower'] = ta.volatility.bollinger_hband_indicator(stock_data['Close'], window=20), ta.volatility.bollinger_mavg(stock_data['Close'], window=20), ta.volatility.bollinger_lband_indicator(stock_data['Close'], window=20)
stock_data[['Date', 'Close', 'bb_upper', 'bb_middle', 'bb_lower']].head()
return stock_data
diff --git a/tests/metagpt/actions/test_debug_error.py b/tests/metagpt/actions/test_debug_error.py
index 2393d2cc9..555c84e4e 100644
--- a/tests/metagpt/actions/test_debug_error.py
+++ b/tests/metagpt/actions/test_debug_error.py
@@ -144,12 +144,12 @@ Engineer
---
'''
-
@pytest.mark.asyncio
async def test_debug_error():
+
debug_error = DebugError("debug_error")
file_name, rewritten_code = await debug_error.run(context=EXAMPLE_MSG_CONTENT)
- assert "class Player" in rewritten_code # rewrite the same class
- assert "while self.score > 21" in rewritten_code # a key logic to rewrite to (original one is "if self.score > 12")
+ assert "class Player" in rewritten_code # rewrite the same class
+ assert "while self.score > 21" in rewritten_code # a key logic to rewrite to (original one is "if self.score > 12")
diff --git a/tests/metagpt/actions/test_detail_mining.py b/tests/metagpt/actions/test_detail_mining.py
index 1266960cc..c9d5331f9 100644
--- a/tests/metagpt/actions/test_detail_mining.py
+++ b/tests/metagpt/actions/test_detail_mining.py
@@ -10,7 +10,6 @@ import pytest
from metagpt.actions.detail_mining import DetailMining
from metagpt.logs import logger
-
@pytest.mark.asyncio
async def test_detail_mining():
topic = "如何做一个生日蛋糕"
@@ -18,6 +17,7 @@ async def test_detail_mining():
detail_mining = DetailMining("detail_mining")
rsp = await detail_mining.run(topic=topic, record=record)
logger.info(f"{rsp.content=}")
-
+
assert '##OUTPUT' in rsp.content
assert '蛋糕' in rsp.content
+
diff --git a/tests/metagpt/actions/test_ui_design.py b/tests/metagpt/actions/test_ui_design.py
index dedd0b30e..d284b20f2 100644
--- a/tests/metagpt/actions/test_ui_design.py
+++ b/tests/metagpt/actions/test_ui_design.py
@@ -4,7 +4,7 @@
#
from tests.metagpt.roles.ui_role import UIDesign
-llm_resp = '''
+llm_resp= '''
# UI Design Description
```The user interface for the snake game will be designed in a way that is simple, clean, and intuitive. The main elements of the game such as the game grid, snake, food, score, and game over message will be clearly defined and easy to understand. The game grid will be centered on the screen with the score displayed at the top. The game controls will be intuitive and easy to use. The design will be modern and minimalist with a pleasing color scheme.```
@@ -100,7 +100,6 @@ body {
font-size: 3em;
'''
-
def test_ui_design_parse_css():
ui_design_work = UIDesign(name="UI design action")
@@ -162,7 +161,7 @@ def test_ui_design_parse_css():
transform: translate(-50%, -50%);
font-size: 3em;
'''
- assert ui_design_work.parse_css_code(context=llm_resp) == css
+ assert ui_design_work.parse_css_code(context=llm_resp)==css
def test_ui_design_parse_html():
@@ -186,4 +185,7 @@ def test_ui_design_parse_html():