MetaGPT/tests/conftest.py

283 lines
9 KiB
Python
Raw Normal View History

2023-06-30 17:10:48 +08:00
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/1 12:10
@Author : alexanderwu
@File : conftest.py
"""
2023-11-22 16:26:48 +08:00
import asyncio
2024-01-03 10:58:22 +08:00
import json
2023-12-01 16:10:38 +08:00
import logging
2024-01-03 10:58:22 +08:00
import os
2023-11-22 16:26:48 +08:00
import re
2024-01-02 23:09:09 +08:00
import uuid
from pathlib import Path
2024-01-14 23:40:09 +08:00
from typing import Callable
2023-12-14 22:59:41 +08:00
2024-02-08 00:46:04 +08:00
import aiohttp.web
2023-06-30 17:10:48 +08:00
import pytest
2023-12-14 22:59:41 +08:00
2024-01-02 23:07:50 +08:00
from metagpt.const import DEFAULT_WORKSPACE_ROOT, TEST_DATA_PATH
from metagpt.context import Context as MetagptContext
from metagpt.llm import LLM
2023-07-22 11:28:22 +08:00
from metagpt.logs import logger
from metagpt.utils.git_repository import GitRepository
from metagpt.utils.project_repo import ProjectRepo
2024-01-14 23:40:09 +08:00
from tests.mock.mock_aiohttp import MockAioResponse
from tests.mock.mock_curl_cffi import MockCurlCffiResponse
from tests.mock.mock_httplib2 import MockHttplib2Response
2024-01-04 16:58:11 +08:00
from tests.mock.mock_llm import MockLLM
2023-06-30 17:10:48 +08:00
RSP_CACHE_NEW = {} # used globally for producing new and useful only response cache
2024-01-05 20:38:07 +08:00
ALLOW_OPENAI_API_CALL = int(
os.environ.get("ALLOW_OPENAI_API_CALL", 1)
) # NOTE: should change to default 0 (False) once mock is complete
2024-01-02 23:07:50 +08:00
@pytest.fixture(scope="session")
def rsp_cache():
2024-01-03 10:58:22 +08:00
rsp_cache_file_path = TEST_DATA_PATH / "rsp_cache.json" # read repo-provided
new_rsp_cache_file_path = TEST_DATA_PATH / "rsp_cache_new.json" # exporting a new copy
2024-01-02 23:07:50 +08:00
if os.path.exists(rsp_cache_file_path):
with open(rsp_cache_file_path, "r", encoding="utf-8") as f1:
2024-01-02 23:07:50 +08:00
rsp_cache_json = json.load(f1)
else:
rsp_cache_json = {}
yield rsp_cache_json
with open(rsp_cache_file_path, "w", encoding="utf-8") as f2:
2024-01-02 23:07:50 +08:00
json.dump(rsp_cache_json, f2, indent=4, ensure_ascii=False)
with open(new_rsp_cache_file_path, "w", encoding="utf-8") as f2:
json.dump(RSP_CACHE_NEW, f2, indent=4, ensure_ascii=False)
2024-01-02 23:07:50 +08:00
# Hook to capture the test result
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
if rep.when == "call":
item.test_outcome = rep
@pytest.fixture(scope="function", autouse=True)
def llm_mock(rsp_cache, mocker, request):
2024-01-05 14:34:44 +08:00
llm = MockLLM(allow_open_api_call=ALLOW_OPENAI_API_CALL)
2024-01-02 23:07:50 +08:00
llm.rsp_cache = rsp_cache
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", llm.aask)
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask_batch", llm.aask_batch)
mocker.patch("metagpt.provider.openai_api.OpenAILLM.aask_code", llm.aask_code)
2024-01-02 23:07:50 +08:00
yield mocker
if hasattr(request.node, "test_outcome") and request.node.test_outcome.passed:
if llm.rsp_candidates:
for rsp_candidate in llm.rsp_candidates:
cand_key = list(rsp_candidate.keys())[0]
cand_value = list(rsp_candidate.values())[0]
if cand_key not in llm.rsp_cache:
logger.info(f"Added '{cand_key[:100]} ... -> {str(cand_value)[:20]} ...' to response cache")
llm.rsp_cache.update(rsp_candidate)
RSP_CACHE_NEW.update(rsp_candidate)
2024-01-02 23:07:50 +08:00
2023-06-30 17:10:48 +08:00
class Context:
def __init__(self):
self._llm_ui = None
2024-01-04 23:33:09 +08:00
self._llm_api = LLM()
2023-06-30 17:10:48 +08:00
@property
def llm_api(self):
2024-01-02 16:28:03 +08:00
# 1. 初始化llm带有缓存结果
# 2. 如果缓存query那么直接返回缓存结果
# 3. 如果没有缓存query那么调用llm_api返回结果
# 4. 如果有缓存query那么更新缓存结果
2023-06-30 17:10:48 +08:00
return self._llm_api
@pytest.fixture(scope="package")
def llm_api():
logger.info("Setting up the test")
2024-01-04 22:02:47 +08:00
g_context = Context()
2023-06-30 17:10:48 +08:00
2024-01-04 22:02:47 +08:00
yield g_context.llm_api
2023-06-30 17:10:48 +08:00
logger.info("Tearing down the test")
2024-01-05 16:42:57 +08:00
@pytest.fixture
2023-07-24 00:19:59 +08:00
def proxy():
pattern = re.compile(
rb"(?P<method>[a-zA-Z]+) (?P<uri>(\w+://)?(?P<host>[^\s\'\"<>\[\]{}|/:]+)(:(?P<port>\d+))?[^\s\'\"<>\[\]{}|]*) "
)
async def pipe(reader, writer):
while not reader.at_eof():
writer.write(await reader.read(2048))
writer.close()
await writer.wait_closed()
2023-07-24 00:19:59 +08:00
async def handle_client(reader, writer):
data = await reader.readuntil(b"\r\n\r\n")
infos = pattern.match(data)
host, port = infos.group("host"), infos.group("port")
print(f"Proxy: {host}") # checking with capfd fixture
2023-07-24 00:19:59 +08:00
port = int(port) if port else 80
remote_reader, remote_writer = await asyncio.open_connection(host, port)
if data.startswith(b"CONNECT"):
writer.write(b"HTTP/1.1 200 Connection Established\r\n\r\n")
else:
remote_writer.write(data)
await asyncio.gather(pipe(reader, remote_writer), pipe(remote_reader, writer))
2024-01-05 16:42:57 +08:00
async def proxy_func():
server = await asyncio.start_server(handle_client, "127.0.0.1", 0)
return server, "http://{}:{}".format(*server.sockets[0].getsockname())
2024-01-15 13:20:13 +08:00
return proxy_func
2023-12-08 19:55:47 +08:00
# see https://github.com/Delgan/loguru/issues/59#issuecomment-466591978
@pytest.fixture
def loguru_caplog(caplog):
class PropogateHandler(logging.Handler):
def emit(self, record):
logging.getLogger(record.name).handle(record)
logger.add(PropogateHandler(), format="{message}")
yield caplog
@pytest.fixture(scope="function")
def context(request):
ctx = MetagptContext()
ctx.git_repo = GitRepository(local_path=DEFAULT_WORKSPACE_ROOT / f"unittest/{uuid.uuid4().hex}")
ctx.repo = ProjectRepo(ctx.git_repo)
# Destroy git repo at the end of the test session.
def fin():
if ctx.git_repo:
ctx.git_repo.delete_repository()
# Register the function for destroying the environment.
request.addfinalizer(fin)
return ctx
2023-12-14 15:06:04 +08:00
2023-12-14 22:59:41 +08:00
2023-08-30 14:52:00 +08:00
@pytest.fixture(scope="session", autouse=True)
def init_config():
2024-01-04 23:33:09 +08:00
pass
2024-01-03 18:16:00 +08:00
@pytest.fixture(scope="function")
def new_filename(mocker):
2024-01-05 14:46:59 +08:00
# NOTE: Mock new filename to make reproducible llm aask, should consider changing after implementing requirement segmentation
mocker.patch("metagpt.utils.file_repository.FileRepository.new_filename", lambda: "20240101")
yield mocker
2024-02-08 00:46:04 +08:00
def _rsp_cache(name):
rsp_cache_file_path = TEST_DATA_PATH / f"{name}.json" # read repo-provided
2024-01-14 23:40:09 +08:00
if os.path.exists(rsp_cache_file_path):
with open(rsp_cache_file_path, "r") as f1:
rsp_cache_json = json.load(f1)
else:
rsp_cache_json = {}
yield rsp_cache_json
with open(rsp_cache_file_path, "w") as f2:
json.dump(rsp_cache_json, f2, indent=4, ensure_ascii=False)
2024-02-08 00:46:04 +08:00
@pytest.fixture(scope="session")
def search_rsp_cache():
yield from _rsp_cache("search_rsp_cache")
@pytest.fixture(scope="session")
def mermaid_rsp_cache():
yield from _rsp_cache("mermaid_rsp_cache")
2024-01-03 18:16:00 +08:00
@pytest.fixture
def aiohttp_mocker(mocker):
2024-01-14 23:40:09 +08:00
MockResponse = type("MockResponse", (MockAioResponse,), {})
2024-01-03 18:16:00 +08:00
2024-01-14 23:40:09 +08:00
def wrap(method):
def run(self, url, **kwargs):
return MockResponse(self, method, url, **kwargs)
2024-01-03 18:16:00 +08:00
2024-01-14 23:40:09 +08:00
return run
2024-01-03 18:16:00 +08:00
2024-01-14 23:40:09 +08:00
mocker.patch("aiohttp.ClientSession.request", MockResponse)
for i in ["get", "post", "delete", "patch"]:
mocker.patch(f"aiohttp.ClientSession.{i}", wrap(i))
yield MockResponse
2024-01-03 18:16:00 +08:00
2024-01-14 23:40:09 +08:00
@pytest.fixture
def curl_cffi_mocker(mocker):
MockResponse = type("MockResponse", (MockCurlCffiResponse,), {})
2024-01-03 18:16:00 +08:00
2024-01-14 23:40:09 +08:00
def request(self, *args, **kwargs):
return MockResponse(self, *args, **kwargs)
2024-01-03 18:16:00 +08:00
2024-01-14 23:40:09 +08:00
mocker.patch("curl_cffi.requests.Session.request", request)
yield MockResponse
2024-01-03 18:16:00 +08:00
2024-01-14 23:40:09 +08:00
@pytest.fixture
def httplib2_mocker(mocker):
MockResponse = type("MockResponse", (MockHttplib2Response,), {})
def request(self, *args, **kwargs):
return MockResponse(self, *args, **kwargs)
2024-01-03 18:16:00 +08:00
2024-01-14 23:40:09 +08:00
mocker.patch("httplib2.Http.request", request)
yield MockResponse
@pytest.fixture
def search_engine_mocker(aiohttp_mocker, curl_cffi_mocker, httplib2_mocker, search_rsp_cache):
# aiohttp_mocker: serpapi/serper
# httplib2_mocker: google
# curl_cffi_mocker: ddg
check_funcs: dict[tuple[str, str], Callable[[dict], str]] = {}
aiohttp_mocker.rsp_cache = httplib2_mocker.rsp_cache = curl_cffi_mocker.rsp_cache = search_rsp_cache
aiohttp_mocker.check_funcs = httplib2_mocker.check_funcs = curl_cffi_mocker.check_funcs = check_funcs
yield check_funcs
2024-02-08 00:46:04 +08:00
@pytest.fixture
def http_server():
async def handler(request):
return aiohttp.web.Response(
text="""<!DOCTYPE html><html lang="en"><head><meta charset="UTF-8">
<title>MetaGPT</title></head><body><h1>MetaGPT</h1></body></html>""",
content_type="text/html",
)
async def start():
server = aiohttp.web.Server(handler)
runner = aiohttp.web.ServerRunner(server)
await runner.setup()
site = aiohttp.web.TCPSite(runner, "127.0.0.1", 0)
2024-02-08 00:46:04 +08:00
await site.start()
_, port, *_ = site._server.sockets[0].getsockname()
return site, f"http://127.0.0.1:{port}"
2024-02-08 00:46:04 +08:00
return start
@pytest.fixture
def mermaid_mocker(aiohttp_mocker, mermaid_rsp_cache):
check_funcs: dict[tuple[str, str], Callable[[dict], str]] = {}
aiohttp_mocker.rsp_cache = mermaid_rsp_cache
aiohttp_mocker.check_funcs = check_funcs
yield check_funcs
@pytest.fixture
def git_dir():
"""Fixture to get the unittest directory."""
git_dir = Path(__file__).parent / f"unittest/{uuid.uuid4().hex}"
git_dir.mkdir(parents=True, exist_ok=True)
return git_dir