2023-06-30 17:10:48 +08:00
|
|
|
|
#!/usr/bin/env python
|
|
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
|
"""
|
|
|
|
|
|
@Time : 2023/5/1 12:10
|
|
|
|
|
|
@Author : alexanderwu
|
|
|
|
|
|
@File : conftest.py
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
2023-11-22 16:26:48 +08:00
|
|
|
|
import asyncio
|
2024-01-03 10:58:22 +08:00
|
|
|
|
import json
|
2023-12-01 16:10:38 +08:00
|
|
|
|
import logging
|
2024-01-03 10:58:22 +08:00
|
|
|
|
import os
|
2023-11-22 16:26:48 +08:00
|
|
|
|
import re
|
2024-01-02 23:09:09 +08:00
|
|
|
|
import uuid
|
2023-12-14 22:59:41 +08:00
|
|
|
|
|
2023-06-30 17:10:48 +08:00
|
|
|
|
import pytest
|
2023-12-14 22:59:41 +08:00
|
|
|
|
|
2024-01-02 23:07:50 +08:00
|
|
|
|
from metagpt.const import DEFAULT_WORKSPACE_ROOT, TEST_DATA_PATH
|
2024-01-10 13:56:02 +08:00
|
|
|
|
from metagpt.context import CONTEXT
|
2023-12-23 17:45:10 +08:00
|
|
|
|
from metagpt.llm import LLM
|
2023-07-22 11:28:22 +08:00
|
|
|
|
from metagpt.logs import logger
|
2023-12-04 23:04:07 +08:00
|
|
|
|
from metagpt.utils.git_repository import GitRepository
|
2024-01-04 16:58:11 +08:00
|
|
|
|
from tests.mock.mock_llm import MockLLM
|
2023-06-30 17:10:48 +08:00
|
|
|
|
|
2024-01-04 20:45:38 +08:00
|
|
|
|
RSP_CACHE_NEW = {} # used globally for producing new and useful only response cache
|
2024-01-05 20:38:07 +08:00
|
|
|
|
ALLOW_OPENAI_API_CALL = int(
|
|
|
|
|
|
os.environ.get("ALLOW_OPENAI_API_CALL", 1)
|
|
|
|
|
|
) # NOTE: should change to default 0 (False) once mock is complete
|
2024-01-02 23:07:50 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="session")
|
|
|
|
|
|
def rsp_cache():
|
2024-01-03 10:58:22 +08:00
|
|
|
|
# model_version = CONFIG.openai_api_model
|
|
|
|
|
|
rsp_cache_file_path = TEST_DATA_PATH / "rsp_cache.json" # read repo-provided
|
|
|
|
|
|
new_rsp_cache_file_path = TEST_DATA_PATH / "rsp_cache_new.json" # exporting a new copy
|
2024-01-02 23:07:50 +08:00
|
|
|
|
if os.path.exists(rsp_cache_file_path):
|
|
|
|
|
|
with open(rsp_cache_file_path, "r") as f1:
|
|
|
|
|
|
rsp_cache_json = json.load(f1)
|
|
|
|
|
|
else:
|
|
|
|
|
|
rsp_cache_json = {}
|
|
|
|
|
|
yield rsp_cache_json
|
2024-01-04 15:28:46 +08:00
|
|
|
|
with open(rsp_cache_file_path, "w") as f2:
|
2024-01-02 23:07:50 +08:00
|
|
|
|
json.dump(rsp_cache_json, f2, indent=4, ensure_ascii=False)
|
2024-01-04 20:45:38 +08:00
|
|
|
|
with open(new_rsp_cache_file_path, "w") as f2:
|
|
|
|
|
|
json.dump(RSP_CACHE_NEW, f2, indent=4, ensure_ascii=False)
|
2024-01-02 23:07:50 +08:00
|
|
|
|
|
|
|
|
|
|
|
2024-01-04 15:28:46 +08:00
|
|
|
|
# Hook to capture the test result
|
|
|
|
|
|
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
|
|
|
|
|
def pytest_runtest_makereport(item, call):
|
|
|
|
|
|
outcome = yield
|
|
|
|
|
|
rep = outcome.get_result()
|
|
|
|
|
|
if rep.when == "call":
|
|
|
|
|
|
item.test_outcome = rep
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="function", autouse=True)
|
|
|
|
|
|
def llm_mock(rsp_cache, mocker, request):
|
2024-01-05 14:34:44 +08:00
|
|
|
|
llm = MockLLM(allow_open_api_call=ALLOW_OPENAI_API_CALL)
|
2024-01-02 23:07:50 +08:00
|
|
|
|
llm.rsp_cache = rsp_cache
|
|
|
|
|
|
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask", llm.aask)
|
2024-01-04 15:28:46 +08:00
|
|
|
|
mocker.patch("metagpt.provider.base_llm.BaseLLM.aask_batch", llm.aask_batch)
|
2024-01-02 23:07:50 +08:00
|
|
|
|
yield mocker
|
2024-01-04 15:28:46 +08:00
|
|
|
|
if hasattr(request.node, "test_outcome") and request.node.test_outcome.passed:
|
|
|
|
|
|
if llm.rsp_candidates:
|
|
|
|
|
|
for rsp_candidate in llm.rsp_candidates:
|
2024-01-04 20:45:38 +08:00
|
|
|
|
cand_key = list(rsp_candidate.keys())[0]
|
|
|
|
|
|
cand_value = list(rsp_candidate.values())[0]
|
|
|
|
|
|
if cand_key not in llm.rsp_cache:
|
|
|
|
|
|
logger.info(f"Added '{cand_key[:100]} ... -> {cand_value[:20]} ...' to response cache")
|
|
|
|
|
|
llm.rsp_cache.update(rsp_candidate)
|
|
|
|
|
|
RSP_CACHE_NEW.update(rsp_candidate)
|
2024-01-02 23:07:50 +08:00
|
|
|
|
|
|
|
|
|
|
|
2023-06-30 17:10:48 +08:00
|
|
|
|
class Context:
|
|
|
|
|
|
def __init__(self):
|
|
|
|
|
|
self._llm_ui = None
|
2024-01-04 23:33:09 +08:00
|
|
|
|
self._llm_api = LLM()
|
2023-06-30 17:10:48 +08:00
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
|
def llm_api(self):
|
2024-01-02 16:28:03 +08:00
|
|
|
|
# 1. 初始化llm,带有缓存结果
|
|
|
|
|
|
# 2. 如果缓存query,那么直接返回缓存结果
|
|
|
|
|
|
# 3. 如果没有缓存query,那么调用llm_api,返回结果
|
|
|
|
|
|
# 4. 如果有缓存query,那么更新缓存结果
|
2023-06-30 17:10:48 +08:00
|
|
|
|
return self._llm_api
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="package")
|
|
|
|
|
|
def llm_api():
|
|
|
|
|
|
logger.info("Setting up the test")
|
2024-01-04 22:02:47 +08:00
|
|
|
|
g_context = Context()
|
2023-06-30 17:10:48 +08:00
|
|
|
|
|
2024-01-04 22:02:47 +08:00
|
|
|
|
yield g_context.llm_api
|
2023-06-30 17:10:48 +08:00
|
|
|
|
|
|
|
|
|
|
logger.info("Tearing down the test")
|
|
|
|
|
|
|
|
|
|
|
|
|
2024-01-05 16:42:57 +08:00
|
|
|
|
@pytest.fixture
|
2023-07-24 00:19:59 +08:00
|
|
|
|
def proxy():
|
|
|
|
|
|
pattern = re.compile(
|
|
|
|
|
|
rb"(?P<method>[a-zA-Z]+) (?P<uri>(\w+://)?(?P<host>[^\s\'\"<>\[\]{}|/:]+)(:(?P<port>\d+))?[^\s\'\"<>\[\]{}|]*) "
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
async def pipe(reader, writer):
|
|
|
|
|
|
while not reader.at_eof():
|
|
|
|
|
|
writer.write(await reader.read(2048))
|
|
|
|
|
|
writer.close()
|
|
|
|
|
|
|
|
|
|
|
|
async def handle_client(reader, writer):
|
|
|
|
|
|
data = await reader.readuntil(b"\r\n\r\n")
|
|
|
|
|
|
print(f"Proxy: {data}") # checking with capfd fixture
|
|
|
|
|
|
infos = pattern.match(data)
|
|
|
|
|
|
host, port = infos.group("host"), infos.group("port")
|
|
|
|
|
|
port = int(port) if port else 80
|
|
|
|
|
|
remote_reader, remote_writer = await asyncio.open_connection(host, port)
|
|
|
|
|
|
if data.startswith(b"CONNECT"):
|
|
|
|
|
|
writer.write(b"HTTP/1.1 200 Connection Established\r\n\r\n")
|
|
|
|
|
|
else:
|
|
|
|
|
|
remote_writer.write(data)
|
|
|
|
|
|
await asyncio.gather(pipe(reader, remote_writer), pipe(remote_reader, writer))
|
|
|
|
|
|
|
2024-01-05 16:42:57 +08:00
|
|
|
|
async def proxy_func():
|
|
|
|
|
|
server = await asyncio.start_server(handle_client, "127.0.0.1", 0)
|
|
|
|
|
|
return server, "http://{}:{}".format(*server.sockets[0].getsockname())
|
|
|
|
|
|
|
|
|
|
|
|
return proxy_func()
|
2023-12-04 23:04:07 +08:00
|
|
|
|
|
|
|
|
|
|
|
2023-12-08 19:55:47 +08:00
|
|
|
|
# see https://github.com/Delgan/loguru/issues/59#issuecomment-466591978
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
|
|
def loguru_caplog(caplog):
|
|
|
|
|
|
class PropogateHandler(logging.Handler):
|
|
|
|
|
|
def emit(self, record):
|
|
|
|
|
|
logging.getLogger(record.name).handle(record)
|
|
|
|
|
|
|
|
|
|
|
|
logger.add(PropogateHandler(), format="{message}")
|
|
|
|
|
|
yield caplog
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-12-04 23:04:07 +08:00
|
|
|
|
# init & dispose git repo
|
2024-01-02 23:09:09 +08:00
|
|
|
|
@pytest.fixture(scope="function", autouse=True)
|
2023-12-04 23:04:07 +08:00
|
|
|
|
def setup_and_teardown_git_repo(request):
|
2024-01-10 13:56:02 +08:00
|
|
|
|
CONTEXT.git_repo = GitRepository(local_path=DEFAULT_WORKSPACE_ROOT / f"unittest/{uuid.uuid4().hex}")
|
|
|
|
|
|
CONTEXT.config.git_reinit = True
|
2023-12-04 23:04:07 +08:00
|
|
|
|
|
|
|
|
|
|
# Destroy git repo at the end of the test session.
|
|
|
|
|
|
def fin():
|
2024-01-10 13:56:02 +08:00
|
|
|
|
CONTEXT.git_repo.delete_repository()
|
2023-12-04 23:04:07 +08:00
|
|
|
|
|
|
|
|
|
|
# Register the function for destroying the environment.
|
|
|
|
|
|
request.addfinalizer(fin)
|
2023-12-14 15:06:04 +08:00
|
|
|
|
|
2023-12-14 22:59:41 +08:00
|
|
|
|
|
2023-08-30 14:52:00 +08:00
|
|
|
|
@pytest.fixture(scope="session", autouse=True)
|
|
|
|
|
|
def init_config():
|
2024-01-04 23:33:09 +08:00
|
|
|
|
pass
|
2024-01-03 18:16:00 +08:00
|
|
|
|
|
|
|
|
|
|
|
2024-01-04 20:45:38 +08:00
|
|
|
|
@pytest.fixture(scope="function")
|
|
|
|
|
|
def new_filename(mocker):
|
2024-01-05 14:46:59 +08:00
|
|
|
|
# NOTE: Mock new filename to make reproducible llm aask, should consider changing after implementing requirement segmentation
|
2024-01-04 20:45:38 +08:00
|
|
|
|
mocker.patch("metagpt.utils.file_repository.FileRepository.new_filename", lambda: "20240101")
|
|
|
|
|
|
yield mocker
|
|
|
|
|
|
|
|
|
|
|
|
|
2024-01-03 18:16:00 +08:00
|
|
|
|
@pytest.fixture
|
|
|
|
|
|
def aiohttp_mocker(mocker):
|
|
|
|
|
|
class MockAioResponse:
|
|
|
|
|
|
async def json(self, *args, **kwargs):
|
|
|
|
|
|
return self._json
|
|
|
|
|
|
|
|
|
|
|
|
def set_json(self, json):
|
|
|
|
|
|
self._json = json
|
|
|
|
|
|
|
|
|
|
|
|
response = MockAioResponse()
|
|
|
|
|
|
|
|
|
|
|
|
class MockCTXMng:
|
|
|
|
|
|
async def __aenter__(self):
|
|
|
|
|
|
return response
|
|
|
|
|
|
|
|
|
|
|
|
async def __aexit__(self, *args, **kwargs):
|
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
def __await__(self):
|
|
|
|
|
|
yield
|
|
|
|
|
|
return response
|
|
|
|
|
|
|
|
|
|
|
|
def mock_request(self, method, url, **kwargs):
|
|
|
|
|
|
return MockCTXMng()
|
|
|
|
|
|
|
|
|
|
|
|
def wrap(method):
|
|
|
|
|
|
def run(self, url, **kwargs):
|
|
|
|
|
|
return mock_request(self, method, url, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
return run
|
|
|
|
|
|
|
|
|
|
|
|
mocker.patch("aiohttp.ClientSession.request", mock_request)
|
|
|
|
|
|
for i in ["get", "post", "delete", "patch"]:
|
|
|
|
|
|
mocker.patch(f"aiohttp.ClientSession.{i}", wrap(i))
|
|
|
|
|
|
|
|
|
|
|
|
yield response
|