diff --git a/config/config.yaml b/config/config.yaml index 5025a4977..e5f8f4573 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -16,6 +16,7 @@ MAX_TOKENS: 4096 RPM: 10 LLM_TYPE: OpenAI # Except for these three major models – OpenAI, MetaGPT LLM, and Azure – other large models can be distinguished based on the validity of the key. TIMEOUT: 60 # Timeout for llm invocation +DEFAULT_PROVIDER: openai #### if Spark #SPARK_APPID : "YOUR_APPID" diff --git a/metagpt/config.py b/metagpt/config.py index eb3636c9a..d633c7d28 100644 --- a/metagpt/config.py +++ b/metagpt/config.py @@ -50,6 +50,9 @@ class LLMProviderEnum(Enum): AZURE_OPENAI = "azure_openai" OLLAMA = "ollama" + def __missing__(self, key): + return self.OPENAI + class Config(metaclass=Singleton): """ @@ -108,6 +111,11 @@ class Config(metaclass=Singleton): if v: provider = k break + if provider is None: + if self.DEFAULT_PROVIDER: + provider = LLMProviderEnum(self.DEFAULT_PROVIDER) + else: + raise NotConfiguredException("You should config a LLM configuration first") if provider is LLMProviderEnum.GEMINI and not require_python_version(req_version=(3, 10)): warnings.warn("Use Gemini requires Python >= 3.10") @@ -117,7 +125,6 @@ class Config(metaclass=Singleton): if provider: logger.info(f"API: {provider}") return provider - raise NotConfiguredException("You should config a LLM configuration first") def get_model_name(self, provider=None) -> str: provider = provider or self.get_default_llm_provider_enum()