2024-07-01 14:16:35 +08:00
|
|
|
import asyncio
|
|
|
|
|
from metagpt.llm import LLM
|
2024-07-09 14:51:27 +08:00
|
|
|
from examples.ags.benchmark.humaneval import sample_generate, samples_generate, extract_failure_tests, automatic_evalplus
|
2024-07-04 14:31:13 +08:00
|
|
|
from examples.ags.w_action_node.utils import jsonl_ranker
|
2024-07-01 14:16:35 +08:00
|
|
|
|
2024-07-17 23:08:41 +08:00
|
|
|
# 132 141 136 80 73
|
|
|
|
|
# asyncio.run(sample_generate('HumanEval/118',result_path="llm_based_4.jsonl",mode="llm"))
|
|
|
|
|
# asyncio.run(samples_generate(mode='ags',result_path="ags_based_1.jsonl"))
|
2024-07-04 14:31:13 +08:00
|
|
|
# jsonl_ranker("samples.jsonl", "samples.jsonl")
|
|
|
|
|
|
2024-07-17 23:08:41 +08:00
|
|
|
result_path = "ags_based_2.jsonl"
|
|
|
|
|
if automatic_evalplus(result_path):
|
|
|
|
|
unpassed_exapmle = extract_failure_tests(result_path[:-6]+"_eval_results.json")
|
|
|
|
|
print(unpassed_exapmle)
|
2024-07-10 16:23:38 +08:00
|
|
|
|
2024-07-14 09:12:33 +08:00
|
|
|
# unpassed_exapmle = extract_failure_tests(file_path="2_eval_results.json")
|
2024-07-11 16:18:34 +08:00
|
|
|
# print(unpassed_exapmle)
|
|
|
|
|
|
|
|
|
|
# for example in failure_list:
|
|
|
|
|
# asyncio.run(sample_generate(example))
|