mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-05-04 21:32:38 +02:00
Merge pull request #685 from garylin2099/llm_mock
Reduce test time with a global LLM mock
This commit is contained in:
commit
230192f5e0
51 changed files with 289 additions and 217 deletions
|
|
@ -12,7 +12,6 @@ import logging
|
|||
import os
|
||||
import re
|
||||
import uuid
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
|
||||
|
|
@ -20,49 +19,13 @@ from metagpt.config import CONFIG, Config
|
|||
from metagpt.const import DEFAULT_WORKSPACE_ROOT, TEST_DATA_PATH
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.openai_api import OpenAILLM
|
||||
from metagpt.utils.git_repository import GitRepository
|
||||
from tests.mock.mock_llm import MockLLM
|
||||
|
||||
|
||||
class MockLLM(OpenAILLM):
|
||||
rsp_cache: dict = {}
|
||||
|
||||
async def original_aask(
|
||||
self,
|
||||
msg: str,
|
||||
system_msgs: Optional[list[str]] = None,
|
||||
format_msgs: Optional[list[dict[str, str]]] = None,
|
||||
timeout=3,
|
||||
stream=True,
|
||||
):
|
||||
"""A copy of metagpt.provider.base_llm.BaseLLM.aask, we can't use super().aask because it will be mocked"""
|
||||
if system_msgs:
|
||||
message = self._system_msgs(system_msgs)
|
||||
else:
|
||||
message = [self._default_system_msg()] if self.use_system_prompt else []
|
||||
if format_msgs:
|
||||
message.extend(format_msgs)
|
||||
message.append(self._user_msg(msg))
|
||||
rsp = await self.acompletion_text(message, stream=stream, timeout=timeout)
|
||||
return rsp
|
||||
|
||||
async def aask(
|
||||
self,
|
||||
msg: str,
|
||||
system_msgs: Optional[list[str]] = None,
|
||||
format_msgs: Optional[list[dict[str, str]]] = None,
|
||||
timeout=3,
|
||||
stream=True,
|
||||
) -> str:
|
||||
if msg not in self.rsp_cache:
|
||||
# Call the original unmocked method
|
||||
rsp = await self.original_aask(msg, system_msgs, format_msgs, timeout, stream)
|
||||
logger.info(f"Added '{rsp[:20]}' ... to response cache")
|
||||
self.rsp_cache[msg] = rsp
|
||||
return rsp
|
||||
else:
|
||||
logger.info("Use response cache")
|
||||
return self.rsp_cache[msg]
|
||||
RSP_CACHE_NEW = {} # used globally for producing new and useful only response cache
|
||||
ALLOW_OPENAI_API_CALL = os.environ.get(
|
||||
"ALLOW_OPENAI_API_CALL", True
|
||||
) # NOTE: should change to default False once mock is complete
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
|
|
@ -76,16 +39,37 @@ def rsp_cache():
|
|||
else:
|
||||
rsp_cache_json = {}
|
||||
yield rsp_cache_json
|
||||
with open(new_rsp_cache_file_path, "w") as f2:
|
||||
with open(rsp_cache_file_path, "w") as f2:
|
||||
json.dump(rsp_cache_json, f2, indent=4, ensure_ascii=False)
|
||||
with open(new_rsp_cache_file_path, "w") as f2:
|
||||
json.dump(RSP_CACHE_NEW, f2, indent=4, ensure_ascii=False)
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def llm_mock(rsp_cache, mocker):
|
||||
llm = MockLLM()
|
||||
# Hook to capture the test result
|
||||
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
||||
def pytest_runtest_makereport(item, call):
|
||||
outcome = yield
|
||||
rep = outcome.get_result()
|
||||
if rep.when == "call":
|
||||
item.test_outcome = rep
|
||||
|
||||
|
||||
@pytest.fixture(scope="function", autouse=True)
|
||||
def llm_mock(rsp_cache, mocker, request):
|
||||
llm = MockLLM(allow_open_api_call=ALLOW_OPENAI_API_CALL)
|
||||
llm.rsp_cache = rsp_cache
|
||||
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", llm.aask)
|
||||
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask_batch", llm.aask_batch)
|
||||
yield mocker
|
||||
if hasattr(request.node, "test_outcome") and request.node.test_outcome.passed:
|
||||
if llm.rsp_candidates:
|
||||
for rsp_candidate in llm.rsp_candidates:
|
||||
cand_key = list(rsp_candidate.keys())[0]
|
||||
cand_value = list(rsp_candidate.values())[0]
|
||||
if cand_key not in llm.rsp_cache:
|
||||
logger.info(f"Added '{cand_key[:100]} ... -> {cand_value[:20]} ...' to response cache")
|
||||
llm.rsp_cache.update(rsp_candidate)
|
||||
RSP_CACHE_NEW.update(rsp_candidate)
|
||||
|
||||
|
||||
class Context:
|
||||
|
|
@ -173,6 +157,13 @@ def init_config():
|
|||
Config()
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def new_filename(mocker):
|
||||
# NOTE: Mock new filename to make reproducible llm aask, should consider changing after implementing requirement segmentation
|
||||
mocker.patch("metagpt.utils.file_repository.FileRepository.new_filename", lambda: "20240101")
|
||||
yield mocker
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def aiohttp_mocker(mocker):
|
||||
class MockAioResponse:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue