diff --git a/config/config.yaml b/config/config.yaml index fc6961f9e..bed67083c 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -7,7 +7,7 @@ ## Or, you can configure OPENAI_PROXY to access official OPENAI_API_BASE. OPENAI_API_BASE: "https://api.openai.com/v1" #OPENAI_PROXY: "http://127.0.0.1:8118" -#OPENAI_API_KEY: "YOUR_API_KEY" +#OPENAI_API_KEY: "YOUR_API_KEY" # set the value to sk-xxx if you host the openai interface for open llm model OPENAI_API_MODEL: "gpt-4" MAX_TOKENS: 1500 RPM: 10 @@ -32,7 +32,7 @@ RPM: 10 #DEPLOYMENT_ID: "YOUR_DEPLOYMENT_ID" #### if zhipuai from `https://open.bigmodel.cn`. You can set here or export API_KEY="YOUR_API_KEY" -ZHIPUAI_API_KEY: "YOUR_API_KEY" +# ZHIPUAI_API_KEY: "YOUR_API_KEY" #### for Search diff --git a/metagpt/llm.py b/metagpt/llm.py index e9b80d7a8..13e5a56e0 100644 --- a/metagpt/llm.py +++ b/metagpt/llm.py @@ -17,13 +17,15 @@ from metagpt.provider.spark_api import SparkAPI def LLM() -> "BaseGPTAPI": """ initialize different LLM instance according to the key field existence""" # TODO a little trick, can use registry to initialize LLM instance further - if CONFIG.openai_api_key and CONFIG.openai_api_key.starswith("sk-"): + if CONFIG.openai_api_key and CONFIG.openai_api_key.startswith("sk-"): llm = OpenAIGPTAPI() elif CONFIG.claude_api_key: llm = Claude() elif CONFIG.spark_api_key: llm = SparkAPI() - elif CONFIG.zhipuai_api_key: + elif CONFIG.zhipuai_api_key and CONFIG.zhipuai_api_key != "YOUR_API_KEY": llm = ZhiPuAIGPTAPI() + else: + raise RuntimeError("You should config a LLM configuration first") return llm