Merge branch 'fixbug/issues/1016' into HEAD

This commit is contained in:
莘权 马 2024-03-20 17:46:48 +08:00
commit a6f31bf3e6
16 changed files with 178 additions and 93 deletions

File diff suppressed because one or more lines are too long

View file

@ -25,7 +25,6 @@ async def test_interpreter(mocker, auto_run):
@pytest.mark.asyncio
async def test_interpreter_react_mode(mocker):
mocker.patch("metagpt.actions.di.execute_nb_code.ExecuteNbCode.run", return_value=("a successful run", True))
mocker.patch("builtins.input", return_value="confirm")
requirement = "Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy."

View file

@ -0,0 +1,37 @@
from metagpt.schema import Plan, Task
from metagpt.strategy.planner import Planner
from metagpt.strategy.task_type import TaskType
MOCK_TASK_MAP = {
"1": Task(
task_id="1",
instruction="test instruction for finished task",
task_type=TaskType.EDA.type_name,
dependent_task_ids=[],
code="some finished test code",
result="some finished test result",
is_finished=True,
),
"2": Task(
task_id="2",
instruction="test instruction for current task",
task_type=TaskType.DATA_PREPROCESS.type_name,
dependent_task_ids=["1"],
),
}
MOCK_PLAN = Plan(
goal="test goal",
tasks=list(MOCK_TASK_MAP.values()),
task_map=MOCK_TASK_MAP,
current_task_id="2",
)
def test_planner_get_plan_status():
planner = Planner(plan=MOCK_PLAN)
status = planner.get_plan_status()
assert "some finished test code" in status
assert "some finished test result" in status
assert "test instruction for current task" in status
assert TaskType.DATA_PREPROCESS.value.guidance in status # current task guidance

View file

@ -22,7 +22,7 @@ def _paragraphs(n):
@pytest.mark.parametrize(
"msgs, model_name, system_text, reserved, expected",
[
(_msgs(), "gpt-3.5-turbo", "System", 1500, 1),
(_msgs(), "gpt-3.5-turbo-0613", "System", 1500, 1),
(_msgs(), "gpt-3.5-turbo-16k", "System", 3000, 6),
(_msgs(), "gpt-3.5-turbo-16k", "Hello," * 1000, 3000, 5),
(_msgs(), "gpt-4", "System", 2000, 3),
@ -32,22 +32,23 @@ def _paragraphs(n):
],
)
def test_reduce_message_length(msgs, model_name, system_text, reserved, expected):
assert len(reduce_message_length(msgs, model_name, system_text, reserved)) / (len("Hello,")) / 1000 == expected
length = len(reduce_message_length(msgs, model_name, system_text, reserved)) / (len("Hello,")) / 1000
assert length == expected
@pytest.mark.parametrize(
"text, prompt_template, model_name, system_text, reserved, expected",
[
(" ".join("Hello World." for _ in range(1000)), "Prompt: {}", "gpt-3.5-turbo", "System", 1500, 2),
(" ".join("Hello World." for _ in range(1000)), "Prompt: {}", "gpt-3.5-turbo-0613", "System", 1500, 2),
(" ".join("Hello World." for _ in range(1000)), "Prompt: {}", "gpt-3.5-turbo-16k", "System", 3000, 1),
(" ".join("Hello World." for _ in range(4000)), "Prompt: {}", "gpt-4", "System", 2000, 2),
(" ".join("Hello World." for _ in range(8000)), "Prompt: {}", "gpt-4-32k", "System", 4000, 1),
(" ".join("Hello World" for _ in range(8000)), "Prompt: {}", "gpt-3.5-turbo", "System", 1000, 8),
(" ".join("Hello World" for _ in range(8000)), "Prompt: {}", "gpt-3.5-turbo-0613", "System", 1000, 8),
],
)
def test_generate_prompt_chunk(text, prompt_template, model_name, system_text, reserved, expected):
ret = list(generate_prompt_chunk(text, prompt_template, model_name, system_text, reserved))
assert len(ret) == expected
chunk = len(list(generate_prompt_chunk(text, prompt_template, model_name, system_text, reserved)))
assert chunk == expected
@pytest.mark.parametrize(

View file

@ -8,7 +8,6 @@ from metagpt.provider.azure_openai_api import AzureOpenAILLM
from metagpt.provider.constant import GENERAL_FUNCTION_SCHEMA
from metagpt.provider.openai_api import OpenAILLM
from metagpt.schema import Message
from metagpt.utils.common import process_message
OriginalLLM = OpenAILLM if config.llm.api_type == LLMType.OPENAI else AzureOpenAILLM
@ -105,7 +104,7 @@ class MockLLM(OriginalLLM):
return rsp
async def aask_code(self, messages: Union[str, Message, list[dict]], **kwargs) -> dict:
msg_key = json.dumps(process_message(messages), ensure_ascii=False)
msg_key = json.dumps(self.format_msg(messages), ensure_ascii=False)
rsp = await self._mock_rsp(msg_key, self.original_aask_code, messages, **kwargs)
return rsp