remove Dict, use direct LLMConfig / Browser. / Search. / Mermaid. instead

This commit is contained in:
geekan 2024-01-11 15:10:07 +08:00
parent 4de8fa3682
commit c275f28a37
16 changed files with 60 additions and 82 deletions

View file

@ -12,7 +12,7 @@ from tests.metagpt.provider.mock_llm_config import (
@pytest.mark.asyncio
async def test_aask_code():
llm = LLM(name="gpt3t")
llm = LLM()
msg = [{"role": "user", "content": "Write a python hello world code."}]
rsp = await llm.aask_code(msg) # -> {'language': 'python', 'code': "print('Hello, World!')"}
@ -24,7 +24,7 @@ async def test_aask_code():
@pytest.mark.asyncio
async def test_aask_code_str():
llm = LLM(name="gpt3t")
llm = LLM()
msg = "Write a python hello world code."
rsp = await llm.aask_code(msg) # -> {'language': 'python', 'code': "print('Hello, World!')"}
assert "language" in rsp
@ -34,7 +34,7 @@ async def test_aask_code_str():
@pytest.mark.asyncio
async def test_aask_code_message():
llm = LLM(name="gpt3t")
llm = LLM()
msg = UserMessage("Write a python hello world code.")
rsp = await llm.aask_code(msg) # -> {'language': 'python', 'code': "print('Hello, World!')"}
assert "language" in rsp

View file

@ -10,7 +10,10 @@ from pydantic import BaseModel
from metagpt.config2 import Config
from metagpt.configs.llm_config import LLMType
from metagpt.context import ContextMixin
from tests.metagpt.provider.mock_llm_config import mock_llm_config
from tests.metagpt.provider.mock_llm_config import (
mock_llm_config,
mock_llm_config_proxy,
)
def test_config_1():
@ -21,9 +24,9 @@ def test_config_1():
def test_config_from_dict():
cfg = Config(llm={"default": mock_llm_config})
cfg = Config(llm=mock_llm_config)
assert cfg
assert cfg.llm["default"].api_key == "mock_api_key"
assert cfg.llm.api_key == "mock_api_key"
class ModelX(ContextMixin, BaseModel):
@ -47,11 +50,11 @@ def test_config_mixin_1():
def test_config_mixin_2():
i = Config(llm={"default": mock_llm_config})
j = Config(llm={"new": mock_llm_config})
i = Config(llm=mock_llm_config)
j = Config(llm=mock_llm_config_proxy)
obj = ModelX(config=i)
assert obj._config == i
assert obj._config.llm["default"] == mock_llm_config
assert obj._config.llm == mock_llm_config
obj.set_config(j)
# obj already has a config, so it will not be set
@ -60,16 +63,16 @@ def test_config_mixin_2():
def test_config_mixin_3():
"""Test config mixin with multiple inheritance"""
i = Config(llm={"default": mock_llm_config})
j = Config(llm={"new": mock_llm_config})
i = Config(llm=mock_llm_config)
j = Config(llm=mock_llm_config_proxy)
obj = ModelY(config=i)
assert obj._config == i
assert obj._config.llm["default"] == mock_llm_config
assert obj._config.llm == mock_llm_config
obj.set_config(j)
# obj already has a config, so it will not be set
assert obj._config == i
assert obj._config.llm["default"] == mock_llm_config
assert obj._config.llm == mock_llm_config
assert obj.a == "a"
assert obj.b == "b"

View file

@ -49,13 +49,14 @@ class MockSearchEnine:
async def test_search_engine(search_engine_type, run_func: Callable, max_results: int, as_string: bool, aiohttp_mocker):
# Prerequisites
cache_json_path = None
# FIXME: 不能使用全局的config而是要自己实例化对应的config
if search_engine_type is SearchEngineType.SERPAPI_GOOGLE:
assert config.search["serpapi"]
assert config.search
cache_json_path = search_cache_path / f"serpapi-metagpt-{max_results}.json"
elif search_engine_type is SearchEngineType.DIRECT_GOOGLE:
assert config.search["google"]
assert config.search
elif search_engine_type is SearchEngineType.SERPER_GOOGLE:
assert config.search["serper"]
assert config.search
cache_json_path = search_cache_path / f"serper-metagpt-{max_results}.json"
if cache_json_path: