mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-04-30 11:26:23 +02:00
Merge branch 'fixbug/issues/1016' into HEAD
This commit is contained in:
commit
a6f31bf3e6
16 changed files with 178 additions and 93 deletions
|
|
@ -25,7 +25,6 @@ async def test_interpreter(mocker, auto_run):
|
|||
@pytest.mark.asyncio
|
||||
async def test_interpreter_react_mode(mocker):
|
||||
mocker.patch("metagpt.actions.di.execute_nb_code.ExecuteNbCode.run", return_value=("a successful run", True))
|
||||
mocker.patch("builtins.input", return_value="confirm")
|
||||
|
||||
requirement = "Run data analysis on sklearn Wine recognition dataset, include a plot, and train a model to predict wine class (20% as validation), and show validation accuracy."
|
||||
|
||||
|
|
|
|||
37
tests/metagpt/strategy/test_planner.py
Normal file
37
tests/metagpt/strategy/test_planner.py
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
from metagpt.schema import Plan, Task
|
||||
from metagpt.strategy.planner import Planner
|
||||
from metagpt.strategy.task_type import TaskType
|
||||
|
||||
MOCK_TASK_MAP = {
|
||||
"1": Task(
|
||||
task_id="1",
|
||||
instruction="test instruction for finished task",
|
||||
task_type=TaskType.EDA.type_name,
|
||||
dependent_task_ids=[],
|
||||
code="some finished test code",
|
||||
result="some finished test result",
|
||||
is_finished=True,
|
||||
),
|
||||
"2": Task(
|
||||
task_id="2",
|
||||
instruction="test instruction for current task",
|
||||
task_type=TaskType.DATA_PREPROCESS.type_name,
|
||||
dependent_task_ids=["1"],
|
||||
),
|
||||
}
|
||||
MOCK_PLAN = Plan(
|
||||
goal="test goal",
|
||||
tasks=list(MOCK_TASK_MAP.values()),
|
||||
task_map=MOCK_TASK_MAP,
|
||||
current_task_id="2",
|
||||
)
|
||||
|
||||
|
||||
def test_planner_get_plan_status():
|
||||
planner = Planner(plan=MOCK_PLAN)
|
||||
status = planner.get_plan_status()
|
||||
|
||||
assert "some finished test code" in status
|
||||
assert "some finished test result" in status
|
||||
assert "test instruction for current task" in status
|
||||
assert TaskType.DATA_PREPROCESS.value.guidance in status # current task guidance
|
||||
|
|
@ -22,7 +22,7 @@ def _paragraphs(n):
|
|||
@pytest.mark.parametrize(
|
||||
"msgs, model_name, system_text, reserved, expected",
|
||||
[
|
||||
(_msgs(), "gpt-3.5-turbo", "System", 1500, 1),
|
||||
(_msgs(), "gpt-3.5-turbo-0613", "System", 1500, 1),
|
||||
(_msgs(), "gpt-3.5-turbo-16k", "System", 3000, 6),
|
||||
(_msgs(), "gpt-3.5-turbo-16k", "Hello," * 1000, 3000, 5),
|
||||
(_msgs(), "gpt-4", "System", 2000, 3),
|
||||
|
|
@ -32,22 +32,23 @@ def _paragraphs(n):
|
|||
],
|
||||
)
|
||||
def test_reduce_message_length(msgs, model_name, system_text, reserved, expected):
|
||||
assert len(reduce_message_length(msgs, model_name, system_text, reserved)) / (len("Hello,")) / 1000 == expected
|
||||
length = len(reduce_message_length(msgs, model_name, system_text, reserved)) / (len("Hello,")) / 1000
|
||||
assert length == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"text, prompt_template, model_name, system_text, reserved, expected",
|
||||
[
|
||||
(" ".join("Hello World." for _ in range(1000)), "Prompt: {}", "gpt-3.5-turbo", "System", 1500, 2),
|
||||
(" ".join("Hello World." for _ in range(1000)), "Prompt: {}", "gpt-3.5-turbo-0613", "System", 1500, 2),
|
||||
(" ".join("Hello World." for _ in range(1000)), "Prompt: {}", "gpt-3.5-turbo-16k", "System", 3000, 1),
|
||||
(" ".join("Hello World." for _ in range(4000)), "Prompt: {}", "gpt-4", "System", 2000, 2),
|
||||
(" ".join("Hello World." for _ in range(8000)), "Prompt: {}", "gpt-4-32k", "System", 4000, 1),
|
||||
(" ".join("Hello World" for _ in range(8000)), "Prompt: {}", "gpt-3.5-turbo", "System", 1000, 8),
|
||||
(" ".join("Hello World" for _ in range(8000)), "Prompt: {}", "gpt-3.5-turbo-0613", "System", 1000, 8),
|
||||
],
|
||||
)
|
||||
def test_generate_prompt_chunk(text, prompt_template, model_name, system_text, reserved, expected):
|
||||
ret = list(generate_prompt_chunk(text, prompt_template, model_name, system_text, reserved))
|
||||
assert len(ret) == expected
|
||||
chunk = len(list(generate_prompt_chunk(text, prompt_template, model_name, system_text, reserved)))
|
||||
assert chunk == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue