fix tests

This commit is contained in:
geekan 2023-12-28 18:06:02 +08:00
parent 637f04dd2a
commit 4e32ee120c
5 changed files with 24 additions and 24 deletions

View file

@ -17,7 +17,7 @@ async def test_collect_links(mocker):
elif "sort the remaining search results" in prompt:
return "[1,2]"
mocker.patch("metagpt.provider.base_gpt_api.BaseGPTAPI.aask", mock_llm_ask)
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_llm_ask)
resp = await research.CollectLinks().run("The application of MetaGPT")
for i in ["MetaGPT use cases", "The roadmap of MetaGPT", "The function of MetaGPT", "What llm MetaGPT support"]:
assert i in resp
@ -36,7 +36,7 @@ async def test_collect_links_with_rank_func(mocker):
rank_after.append(results)
return results
mocker.patch("metagpt.provider.base_gpt_api.BaseGPTAPI.aask", mock_collect_links_llm_ask)
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_collect_links_llm_ask)
resp = await research.CollectLinks(rank_func=rank_func).run("The application of MetaGPT")
for x, y, z in zip(rank_before, rank_after, resp.values()):
assert x[::-1] == y
@ -48,7 +48,7 @@ async def test_web_browse_and_summarize(mocker):
async def mock_llm_ask(*args, **kwargs):
return "metagpt"
mocker.patch("metagpt.provider.base_gpt_api.BaseGPTAPI.aask", mock_llm_ask)
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_llm_ask)
url = "https://github.com/geekan/MetaGPT"
url2 = "https://github.com/trending"
query = "What's new in metagpt"
@ -64,7 +64,7 @@ async def test_web_browse_and_summarize(mocker):
async def mock_llm_ask(*args, **kwargs):
return "Not relevant."
mocker.patch("metagpt.provider.base_gpt_api.BaseGPTAPI.aask", mock_llm_ask)
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_llm_ask)
resp = await research.WebBrowseAndSummarize().run(url, query=query)
assert len(resp) == 1
@ -81,7 +81,7 @@ async def test_conduct_research(mocker):
data = f"# Research Report\n## Introduction\n{args} {kwargs}"
return data
mocker.patch("metagpt.provider.base_gpt_api.BaseGPTAPI.aask", mock_llm_ask)
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_llm_ask)
content = (
"MetaGPT takes a one line requirement as input and "
"outputs user stories / competitive analysis / requirements / data structures / APIs / documents, etc."

View file

@ -3,7 +3,7 @@
"""
@Time : 2023/5/7 17:40
@Author : alexanderwu
@File : test_base_gpt_api.py
@File : test_base_llm.py
"""
import pytest
@ -27,7 +27,7 @@ prompt_msg = "who are you"
resp_content = default_chat_resp["choices"][0]["message"]["content"]
class MockBaseGPTAPI(BaseLLM):
class MockBaseLLM(BaseLLM):
def completion(self, messages: list[dict], timeout=3):
return default_chat_resp
@ -41,12 +41,12 @@ class MockBaseGPTAPI(BaseLLM):
return default_chat_resp
def test_base_gpt_api():
def test_base_llm():
message = Message(role="user", content="hello")
assert "role" in message.to_dict()
assert "user" in str(message)
base_gpt_api = MockBaseGPTAPI()
base_llm = MockBaseLLM()
openai_funccall_resp = {
"choices": [
@ -70,37 +70,37 @@ def test_base_gpt_api():
}
]
}
func: dict = base_gpt_api.get_choice_function(openai_funccall_resp)
func: dict = base_llm.get_choice_function(openai_funccall_resp)
assert func == {
"name": "execute",
"arguments": '{\n "language": "python",\n "code": "print(\'Hello, World!\')"\n}',
}
func_args: dict = base_gpt_api.get_choice_function_arguments(openai_funccall_resp)
func_args: dict = base_llm.get_choice_function_arguments(openai_funccall_resp)
assert func_args == {"language": "python", "code": "print('Hello, World!')"}
choice_text = base_gpt_api.get_choice_text(openai_funccall_resp)
choice_text = base_llm.get_choice_text(openai_funccall_resp)
assert choice_text == openai_funccall_resp["choices"][0]["message"]["content"]
# resp = base_gpt_api.ask(prompt_msg)
# resp = base_llm.ask(prompt_msg)
# assert resp == resp_content
# resp = base_gpt_api.ask_batch([prompt_msg])
# resp = base_llm.ask_batch([prompt_msg])
# assert resp == resp_content
# resp = base_gpt_api.ask_code([prompt_msg])
# resp = base_llm.ask_code([prompt_msg])
# assert resp == resp_content
@pytest.mark.asyncio
async def test_async_base_gpt_api():
base_gpt_api = MockBaseGPTAPI()
async def test_async_base_llm():
base_llm = MockBaseLLM()
resp = await base_gpt_api.aask(prompt_msg)
resp = await base_llm.aask(prompt_msg)
assert resp == resp_content
resp = await base_gpt_api.aask_batch([prompt_msg])
resp = await base_llm.aask_batch([prompt_msg])
assert resp == resp_content
resp = await base_gpt_api.aask_code([prompt_msg])
resp = await base_llm.aask_code([prompt_msg])
assert resp == resp_content

View file

@ -28,7 +28,7 @@ async def mock_llm_ask(self, prompt: str, system_msgs):
async def test_researcher(mocker):
with TemporaryDirectory() as dirname:
topic = "dataiku vs. datarobot"
mocker.patch("metagpt.provider.base_gpt_api.BaseGPTAPI.aask", mock_llm_ask)
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", mock_llm_ask)
researcher.RESEARCH_PATH = Path(dirname)
await researcher.Researcher().run(topic)
assert (researcher.RESEARCH_PATH / f"{topic}.md").read_text().startswith("# Research Report")