mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-05-15 11:02:36 +02:00
add anthropic_api
This commit is contained in:
parent
0e63b92883
commit
f1f0ae4cc1
20 changed files with 228 additions and 199 deletions
|
|
@ -56,3 +56,7 @@ mock_llm_config_spark = LLMConfig(
|
|||
mock_llm_config_qianfan = LLMConfig(api_type="qianfan", access_key="xxx", secret_key="xxx", model="ERNIE-Bot-turbo")
|
||||
|
||||
mock_llm_config_dashscope = LLMConfig(api_type="dashscope", api_key="xxx", model="qwen-max")
|
||||
|
||||
mock_llm_config_anthropic = LLMConfig(
|
||||
api_type="anthropic", api_key="xxx", base_url="https://api.anthropic.com", model="claude-3-opus-20240229"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -3,6 +3,14 @@
|
|||
# @Desc : default request & response data for provider unittest
|
||||
|
||||
|
||||
from anthropic.types import (
|
||||
ContentBlock,
|
||||
ContentBlockDeltaEvent,
|
||||
Message,
|
||||
MessageStartEvent,
|
||||
TextDelta,
|
||||
)
|
||||
from anthropic.types import Usage as AnthropicUsage
|
||||
from dashscope.api_entities.dashscope_response import (
|
||||
DashScopeAPIResponse,
|
||||
GenerationOutput,
|
||||
|
|
@ -130,6 +138,38 @@ def get_dashscope_response(name: str) -> GenerationResponse:
|
|||
)
|
||||
|
||||
|
||||
# For Anthropic
|
||||
def get_anthropic_response(name: str, stream: bool = False) -> Message:
|
||||
if stream:
|
||||
return [
|
||||
MessageStartEvent(
|
||||
message=Message(
|
||||
id="xxx",
|
||||
model=name,
|
||||
role="assistant",
|
||||
type="message",
|
||||
content=[ContentBlock(text="", type="text")],
|
||||
usage=AnthropicUsage(input_tokens=10, output_tokens=10),
|
||||
),
|
||||
type="message_start",
|
||||
),
|
||||
ContentBlockDeltaEvent(
|
||||
index=0,
|
||||
delta=TextDelta(text=resp_cont_tmpl.format(name=name), type="text_delta"),
|
||||
type="content_block_delta",
|
||||
),
|
||||
]
|
||||
else:
|
||||
return Message(
|
||||
id="xxx",
|
||||
model=name,
|
||||
role="assistant",
|
||||
type="message",
|
||||
content=[ContentBlock(text=resp_cont_tmpl.format(name=name), type="text")],
|
||||
usage=AnthropicUsage(input_tokens=10, output_tokens=10),
|
||||
)
|
||||
|
||||
|
||||
# For llm general chat functions call
|
||||
async def llm_general_chat_funcs_test(llm: BaseLLM, prompt: str, messages: list[dict], resp_cont: str):
|
||||
resp = await llm.aask(prompt, stream=False)
|
||||
|
|
|
|||
|
|
@ -2,31 +2,45 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# @Desc : the unittest of Claude2
|
||||
|
||||
|
||||
import pytest
|
||||
from anthropic.resources.completions import Completion
|
||||
|
||||
from metagpt.provider.anthropic_api import Claude2
|
||||
from tests.metagpt.provider.mock_llm_config import mock_llm_config
|
||||
from tests.metagpt.provider.req_resp_const import prompt, resp_cont_tmpl
|
||||
from metagpt.provider.anthropic_api import AnthropicLLM
|
||||
from tests.metagpt.provider.mock_llm_config import mock_llm_config_anthropic
|
||||
from tests.metagpt.provider.req_resp_const import (
|
||||
get_anthropic_response,
|
||||
llm_general_chat_funcs_test,
|
||||
messages,
|
||||
prompt,
|
||||
resp_cont_tmpl,
|
||||
)
|
||||
|
||||
resp_cont = resp_cont_tmpl.format(name="Claude")
|
||||
name = "claude-3-opus-20240229"
|
||||
resp_cont = resp_cont_tmpl.format(name=name)
|
||||
|
||||
|
||||
def mock_anthropic_completions_create(self, model: str, prompt: str, max_tokens_to_sample: int) -> Completion:
|
||||
return Completion(id="xx", completion=resp_cont, model="claude-2", stop_reason="stop_sequence", type="completion")
|
||||
async def mock_anthropic_messages_create(
|
||||
self, messages: list[dict], model: str, stream: bool = True, max_tokens: int = None, system: str = None
|
||||
) -> Completion:
|
||||
if stream:
|
||||
|
||||
async def aresp_iterator():
|
||||
resps = get_anthropic_response(name, stream=True)
|
||||
for resp in resps:
|
||||
yield resp
|
||||
|
||||
async def mock_anthropic_acompletions_create(self, model: str, prompt: str, max_tokens_to_sample: int) -> Completion:
|
||||
return Completion(id="xx", completion=resp_cont, model="claude-2", stop_reason="stop_sequence", type="completion")
|
||||
|
||||
|
||||
def test_claude2_ask(mocker):
|
||||
mocker.patch("anthropic.resources.completions.Completions.create", mock_anthropic_completions_create)
|
||||
assert resp_cont == Claude2(mock_llm_config).ask(prompt)
|
||||
return aresp_iterator()
|
||||
else:
|
||||
return get_anthropic_response(name)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_claude2_aask(mocker):
|
||||
mocker.patch("anthropic.resources.completions.AsyncCompletions.create", mock_anthropic_acompletions_create)
|
||||
assert resp_cont == await Claude2(mock_llm_config).aask(prompt)
|
||||
async def test_anthropic_acompletion(mocker):
|
||||
mocker.patch("anthropic.resources.messages.AsyncMessages.create", mock_anthropic_messages_create)
|
||||
|
||||
anthropic_llm = AnthropicLLM(mock_llm_config_anthropic)
|
||||
|
||||
resp = await anthropic_llm.acompletion(messages)
|
||||
assert resp.content[0].text == resp_cont
|
||||
|
||||
await llm_general_chat_funcs_test(anthropic_llm, prompt, messages, resp_cont)
|
||||
|
|
|
|||
|
|
@ -27,9 +27,15 @@ class MockBaseLLM(BaseLLM):
|
|||
def completion(self, messages: list[dict], timeout=3):
|
||||
return get_part_chat_completion(name)
|
||||
|
||||
async def _achat_completion(self, messages: list[dict], timeout=3):
|
||||
pass
|
||||
|
||||
async def acompletion(self, messages: list[dict], timeout=3):
|
||||
return get_part_chat_completion(name)
|
||||
|
||||
async def _achat_completion_stream(self, messages: list[dict], timeout: int = 3) -> str:
|
||||
pass
|
||||
|
||||
async def acompletion_text(self, messages: list[dict], stream=False, timeout=3) -> str:
|
||||
return default_resp_cont
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue