MetaGPT/metagpt/llm.py

38 lines
1.2 KiB
Python
Raw Normal View History

2023-06-30 17:10:48 +08:00
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 14:45
@Author : alexanderwu
@File : llm.py
"""
2023-11-27 17:43:20 +08:00
from metagpt.config import CONFIG
2023-12-14 20:34:04 +08:00
from metagpt.provider.base_gpt_api import BaseGPTAPI
from metagpt.provider.fireworks_api import FireWorksGPTAPI
2023-11-29 09:52:26 +08:00
from metagpt.provider.human_provider import HumanProvider
2023-12-14 20:34:04 +08:00
from metagpt.provider.open_llm_api import OpenLLMGPTAPI
2023-11-27 17:43:20 +08:00
from metagpt.provider.openai_api import OpenAIGPTAPI
from metagpt.provider.spark_api import SparkAPI
2023-11-28 18:16:50 +08:00
from metagpt.provider.zhipuai_api import ZhiPuAIGPTAPI
2023-06-30 17:10:48 +08:00
2023-11-29 09:52:55 +08:00
_ = HumanProvider() # Avoid pre-commit error
2023-11-29 09:52:26 +08:00
2023-06-30 17:10:48 +08:00
def LLM() -> BaseGPTAPI:
2023-11-28 18:16:50 +08:00
"""initialize different LLM instance according to the key field existence"""
2023-11-27 17:43:20 +08:00
# TODO a little trick, can use registry to initialize LLM instance further
if CONFIG.openai_api_key:
llm = OpenAIGPTAPI()
elif CONFIG.spark_api_key:
llm = SparkAPI()
elif CONFIG.zhipuai_api_key:
llm = ZhiPuAIGPTAPI()
elif CONFIG.open_llm_api_base:
llm = OpenLLMGPTAPI()
elif CONFIG.fireworks_api_key:
llm = FireWorksGPTAPI()
2023-11-27 17:43:20 +08:00
else:
raise RuntimeError("You should config a LLM configuration first")
2023-11-22 16:26:48 +08:00
2023-11-27 17:43:20 +08:00
return llm