diff --git a/metagpt/configs/llm_config.py b/metagpt/configs/llm_config.py index 77bfc8466..fa9bc0b1b 100644 --- a/metagpt/configs/llm_config.py +++ b/metagpt/configs/llm_config.py @@ -29,6 +29,7 @@ class LLMType(Enum): DASHSCOPE = "dashscope" # Aliyun LingJi DashScope MOONSHOT = "moonshot" MISTRAL = "mistral" + YI = "yi" # lingyiwanwu def __missing__(self, key): return self.OPENAI diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py index 8f3b71c42..3a53a4548 100644 --- a/metagpt/provider/openai_api.py +++ b/metagpt/provider/openai_api.py @@ -44,7 +44,7 @@ from metagpt.utils.token_counter import ( ) -@register_provider([LLMType.OPENAI, LLMType.FIREWORKS, LLMType.OPEN_LLM, LLMType.MOONSHOT, LLMType.MISTRAL]) +@register_provider([LLMType.OPENAI, LLMType.FIREWORKS, LLMType.OPEN_LLM, LLMType.MOONSHOT, LLMType.MISTRAL, LLMType.YI]) class OpenAILLM(BaseLLM): """Check https://platform.openai.com/examples for examples""" diff --git a/metagpt/utils/token_counter.py b/metagpt/utils/token_counter.py index f7c53706b..cf5f94ba5 100644 --- a/metagpt/utils/token_counter.py +++ b/metagpt/utils/token_counter.py @@ -48,6 +48,8 @@ TOKEN_COSTS = { "claude-2.1": {"prompt": 0.008, "completion": 0.024}, "claude-3-sonnet-20240229": {"prompt": 0.003, "completion": 0.015}, "claude-3-opus-20240229": {"prompt": 0.015, "completion": 0.075}, + "yi-34b-chat-0205": {"prompt": 0.0003, "completion": 0.0003}, + "yi-34b-chat-200k": {"prompt": 0.0017, "completion": 0.0017}, } @@ -176,6 +178,8 @@ TOKEN_MAX = { "claude-2.1": 200000, "claude-3-sonnet-20240229": 200000, "claude-3-opus-20240229": 200000, + "yi-34b-chat-0205": 4000, + "yi-34b-chat-200k": 200000, }