diff --git a/Message b/Message deleted file mode 100644 index e69de29bb..000000000 diff --git a/None b/None deleted file mode 100644 index e69de29bb..000000000 diff --git a/README.md b/README.md index adc9d8cea..864d56c53 100644 --- a/README.md +++ b/README.md @@ -12,10 +12,9 @@ # MetaGPT: The Multi-Agent Framework CN doc EN doc JA doc -Discord Follow +Discord Follow License: MIT roadmap -roadmap Twitter Follow

diff --git a/config/config.yaml b/config/config.yaml index ec89a9932..40d37451a 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -16,11 +16,12 @@ RPM: 10 #Anthropic_API_KEY: "YOUR_API_KEY" #### if AZURE, check https://github.com/openai/openai-cookbook/blob/main/examples/azure/chat.ipynb - +#### You can use ENGINE or DEPLOYMENT mode #OPENAI_API_TYPE: "azure" #OPENAI_API_BASE: "YOUR_AZURE_ENDPOINT" #OPENAI_API_KEY: "YOUR_AZURE_API_KEY" #OPENAI_API_VERSION: "YOUR_AZURE_API_VERSION" +#DEPLOYMENT_NAME: "YOUR_DEPLOYMENT_NAME" #DEPLOYMENT_ID: "YOUR_DEPLOYMENT_ID" #### for Search diff --git a/docs/README_CN.md b/docs/README_CN.md index 2180eb518..ae5d954e4 100644 --- a/docs/README_CN.md +++ b/docs/README_CN.md @@ -53,6 +53,25 @@ # 第 3 步:克隆仓库到您的本地机器,并进行安装。 python setup.py install ``` +**注意:** + +- 如果已经安装了Chrome、Chromium或MS Edge,可以通过将环境变量`PUPPETEER_SKIP_CHROMIUM_DOWNLOAD`设置为`true`来跳过下载Chromium。 + +- 一些人在全局安装此工具时遇到问题。在本地安装是替代解决方案, + + ```bash + npm install @mermaid-js/mermaid-cli + ``` + +- 不要忘记在config.yml中为mmdc配置配置, + + ```yml + PUPPETEER_CONFIG: "./config/puppeteer-config.json" + MMDC: "./node_modules/.bin/mmdc" + ``` + +- 如果`python setup.py install`失败并显示错误`[Errno 13] Permission denied: '/usr/local/lib/python3.11/dist-packages/test-easy-install-13129.write-test'`,请尝试使用`python setup.py install --user`运行。 + ### Docker安装 ```bash @@ -115,7 +134,7 @@ ## 示例:启动一个创业公司 ```shell python startup.py "写一个命令行贪吃蛇" -# 开启code review模式会会花费更多的money, 但是会提升代码质量和成功率 +# 开启code review模式会花费更多的金钱, 但是会提升代码质量和成功率 python startup.py "写一个命令行贪吃蛇" --code_review True ``` @@ -123,7 +142,6 @@ # 开启code review模式会会花费更多的money, 但是会提升代码质量 ### 平台或工具的倾向性 可以在阐述需求时说明想要使用的平台或工具。 例如: - ```shell python startup.py "写一个基于pygame的命令行贪吃蛇" ``` diff --git a/int b/int deleted file mode 100644 index e69de29bb..000000000 diff --git a/metagpt/config.py b/metagpt/config.py index b51c81862..9260ae605 100644 --- a/metagpt/config.py +++ b/metagpt/config.py @@ -59,6 +59,7 @@ class Config(metaclass=Singleton): self.openai_api_rpm = self._get("RPM", 3) self.openai_api_model = self._get("OPENAI_API_MODEL", "gpt-4") self.max_tokens_rsp = self._get("MAX_TOKENS", 2048) + self.deployment_name = self._get('DEPLOYMENT_NAME') self.deployment_id = self._get("DEPLOYMENT_ID") self.claude_api_key = self._get("Anthropic_API_KEY") diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py index ff8943d3b..ad9df0396 100644 --- a/metagpt/provider/openai_api.py +++ b/metagpt/provider/openai_api.py @@ -162,10 +162,12 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter): # iterate through the stream of events async for chunk in response: collected_chunks.append(chunk) # save the event response - chunk_message = chunk["choices"][0]["delta"] # extract the message - collected_messages.append(chunk_message) # save the message - if "content" in chunk_message: - print(chunk_message["content"], end="") + choices = chunk["choices"] + if len(choices) > 0: + chunk_message = chunk["choices"][0].get("delta", {}) # extract the message + collected_messages.append(chunk_message) # save the message + if "content" in chunk_message: + print(chunk_message["content"], end="") print() full_reply_content = "".join([m.get("content", "") for m in collected_messages]) @@ -174,25 +176,24 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter): return full_reply_content def _cons_kwargs(self, messages: list[dict]) -> dict: + kwargs = { + "messages": messages, + "max_tokens": self.get_max_tokens(messages), + "n": 1, + "stop": None, + "temperature": 0.3, + "timeout": 3 + } if CONFIG.openai_api_type == "azure": - kwargs = { - "deployment_id": CONFIG.deployment_id, - "messages": messages, - "max_tokens": self.get_max_tokens(messages), - "n": 1, - "stop": None, - "temperature": 0.3, - } + if CONFIG.deployment_name and CONFIG.deployment_id: + raise ValueError("You can only use one of the `deployment_id` or `deployment_name` model") + elif not CONFIG.deployment_name and not CONFIG.deployment_id: + raise ValueError("You must specify `DEPLOYMENT_NAME` or `DEPLOYMENT_ID` parameter") + kwargs_mode = {"engine": CONFIG.deployment_name} if CONFIG.deployment_name \ + else {"deployment_id": CONFIG.deployment_id} else: - kwargs = { - "model": self.model, - "messages": messages, - "max_tokens": self.get_max_tokens(messages), - "n": 1, - "stop": None, - "temperature": 0.3, - } - kwargs["timeout"] = 3 + kwargs_mode = {"model": self.model} + kwargs.update(kwargs_mode) return kwargs async def _achat_completion(self, messages: list[dict]) -> dict: diff --git a/metagpt/roles/prompt.py b/metagpt/roles/prompt.py index d13551203..c22e0226b 100644 --- a/metagpt/roles/prompt.py +++ b/metagpt/roles/prompt.py @@ -32,7 +32,7 @@ class PromptString(Enum): RECENT_ACTIVITY = "Based on the following memory, produce a brief summary of what {full_name} has been up to recently. Do not invent details not explicitly stated in the memory. For any conversation, be sure to mention whether the conversation has concluded or is still ongoing.\n\nMemory: {memory_descriptions}" - MAKE_PLANS = 'You are a plan-generating AI. Your job is to assist the character in formulating new plans based on new information. Given the character's information (profile, objectives, recent activities, current plans, and location context) and their current thought process, produce a new set of plans for them. The final plan should comprise at least {time_window} of activities and no more than 5 individual plans. List the plans in the order they should be executed, with each plan detailing its description, location, start time, stop criteria, and maximum duration.\n\nSample plan: \'{{"index": 1, "description": "Cook dinner", "location_id": "0a3bc22b-36aa-48ab-adb0-18616004caed","start_time": "2022-12-12T20:00:00+00:00","max_duration_hrs": 1.5, "stop_condition": "Dinner is fully prepared"}}\'\n\nFor each plan, choose the most appropriate location name from this list: {allowed_location_descriptions}\n\n{format_instructions}\n\nAlways prioritize completing any unfinished conversations.\n\nLet's begin!\n\nName: {full_name}\nProfile: {private_bio}\nObjectives: {directives}\nLocation Context: {location_context}\nCurrent Plans: {current_plans}\nRecent Activities: {recent_activity}\nThought Process: {thought_process}\nIt's essential to encourage the character to collaborate with other characters in their plans.\n\n' + MAKE_PLANS = "You are a plan-generating AI. Your job is to assist the character in formulating new plans based on new information. Given the character's information (profile, objectives, recent activities, current plans, and location context) and their current thought process, produce a new set of plans for them. The final plan should comprise at least {time_window} of activities and no more than 5 individual plans. List the plans in the order they should be executed, with each plan detailing its description, location, start time, stop criteria, and maximum duration.\n\nSample plan: {{\"index\": 1, \"description\": \"Cook dinner\", \"location_id\": \"0a3bc22b-36aa-48ab-adb0-18616004caed\",\"start_time\": \"2022-12-12T20:00:00+00:00\",\"max_duration_hrs\": 1.5, \"stop_condition\": \"Dinner is fully prepared\"}}\'\n\nFor each plan, choose the most appropriate location name from this list: {allowed_location_descriptions}\n\n{format_instructions}\n\nAlways prioritize completing any unfinished conversations.\n\nLet's begin!\n\nName: {full_name}\nProfile: {private_bio}\nObjectives: {directives}\nLocation Context: {location_context}\nCurrent Plans: {current_plans}\nRecent Activities: {recent_activity}\nThought Process: {thought_process}\nIt's essential to encourage the character to collaborate with other characters in their plans.\n\n" EXECUTE_PLAN = "You are a role-playing AI, playing the role of {your_name}, in front of a live audience. Every word you say can be observed by the audience, so make sure you speak often and make it interesting. You cannot interact directly with the audience.\n\nBased on the following context and tools, proceed as if you are {your_name}. Your primary task is to complete the task given below, however, if you are currently in a conversation with another character, you should always complete the conversation first and then proceed with the task. Do not start working while you are engaged in an unfinished conversation. Use your best judgment to determine whether a conversation involves you and whether it is unfinished. You don't need to respond to every message you receive.\n\nThis is a personal profile of your character:\n{your_private_bio}\n\nThis is a description of your location and other nearby characters you can converse with:\n\n{location_context}\n\nThese are some relevant memories:\n```\n{relevant_memories}\n```\n\nThis is some relevant conversation history:\n```\n{conversation_history}\n```\n\nThese are the tools you can use:\n{tools}\n\nYour response should follow the format below:\n\nTask: The task you must complete\nThoughts: What you should think about doing\nAction: The action to take, must be one of these words: [{tool_names}]\nAction Input: Input for the action\nObservation: The result of the action\n... (This Thoughts/Action/Action Input/Observation can be repeated N times)\nThoughts: 'I have completed the task'\nFinal Response: The final response to the task\n\nIf you are not ready with a final response, then you must take an action.\n\nIf you are certain that you cannot complete the task with the tools provided, return 'Final Response: Need Help', however, if you are in conversation with another character, responses like 'I don't know' are valid. During conversations, you should never break character or admit you are an AI.\nIf the task is completed and no specific response is required, return 'Final Response: Done'\nLet's begin!\n\nTask: {input}\n\n{agent_scratchpad}" diff --git a/metagpt/utils/common.py b/metagpt/utils/common.py index 2e214685c..99038dc64 100644 --- a/metagpt/utils/common.py +++ b/metagpt/utils/common.py @@ -9,6 +9,7 @@ import ast import contextlib import inspect import os +import platform import re from typing import List, Tuple @@ -20,7 +21,10 @@ def check_cmd_exists(command) -> int: :param command: 待检查的命令 :return: 如果命令存在,返回0,如果不存在,返回非0 """ - check_command = 'command -v ' + command + ' >/dev/null 2>&1 || { echo >&2 "no mermaid"; exit 1; }' + if platform.system().lower() == 'windows': + check_command = 'where ' + command + else: + check_command = 'command -v ' + command + ' >/dev/null 2>&1 || { echo >&2 "no mermaid"; exit 1; }' result = os.system(check_command) return result diff --git a/startup.py b/startup.py index 611317fd4..e6d5fc4e9 100644 --- a/startup.py +++ b/startup.py @@ -44,9 +44,9 @@ def main( idea: str, investment: float = 3.0, n_round: int = 5, - code_review: bool = False, + code_review: bool = True, run_tests: bool = False, - implement: bool = False + implement: bool = True ): """ We are a software startup comprised of AI. By investing in us, @@ -66,4 +66,4 @@ def main( if __name__ == '__main__': fire.Fire(main) - \ No newline at end of file +