add other llm for LLMProviderRegistry

This commit is contained in:
better629 2023-12-21 12:55:45 +08:00
parent e5a7fdfe3b
commit f3eb9f638e
3 changed files with 23 additions and 12 deletions

View file

@ -6,7 +6,16 @@
@File : __init__.py
"""
from metagpt.provider.fireworks_api import FireWorksGPTAPI
from metagpt.provider.google_gemini_api import GeminiGPTAPI
from metagpt.provider.open_llm_api import OpenLLMGPTAPI
from metagpt.provider.openai_api import OpenAIGPTAPI
from metagpt.provider.zhipuai_api import ZhiPuAIGPTAPI
__all__ = ["OpenAIGPTAPI"]
__all__ = [
"FireWorksGPTAPI",
"GeminiGPTAPI",
"OpenLLMGPTAPI",
"OpenAIGPTAPI",
"ZhiPuAIGPTAPI"
]

View file

@ -2,6 +2,12 @@
# -*- coding: utf-8 -*-
# @Desc : Google Gemini LLM from https://ai.google.dev/tutorials/python_quickstart
import google.generativeai as genai
from google.ai import generativelanguage as glm
from google.generativeai.generative_models import GenerativeModel
from google.generativeai.types import content_types
from google.generativeai.types.generation_types import GenerateContentResponse, AsyncGenerateContentResponse
from google.generativeai.types.generation_types import GenerationConfig
from tenacity import (
after_log,
retry,
@ -9,16 +15,11 @@ from tenacity import (
stop_after_attempt,
wait_random_exponential,
)
import google.generativeai as genai
from google.ai import generativelanguage as glm
from google.generativeai.types import content_types
from google.generativeai.generative_models import GenerativeModel
from google.generativeai.types.generation_types import GenerateContentResponse, AsyncGenerateContentResponse
from google.generativeai.types.generation_types import GenerationConfig
from metagpt.config import CONFIG
from metagpt.config import CONFIG, LLMProviderEnum
from metagpt.logs import logger
from metagpt.provider.base_gpt_api import BaseGPTAPI
from metagpt.provider.llm_provider_registry import register_provider
from metagpt.provider.openai_api import CostManager, log_and_reraise
@ -29,18 +30,19 @@ class GeminiGenerativeModel(GenerativeModel):
"""
def count_tokens(
self, contents: content_types.ContentsType
self, contents: content_types.ContentsType
) -> glm.CountTokensResponse:
contents = content_types.to_contents(contents)
return self._client.count_tokens(model=self.model_name, contents=contents)
async def count_tokens_async(
self, contents: content_types.ContentsType
self, contents: content_types.ContentsType
) -> glm.CountTokensResponse:
contents = content_types.to_contents(contents)
return await self._async_client.count_tokens(model=self.model_name, contents=contents)
@register_provider(LLMProviderEnum.GEMINI)
class GeminiGPTAPI(BaseGPTAPI):
"""
Refs to `https://ai.google.dev/tutorials/python_quickstart`