Merge pull request #1572 from cyzus/fix-1514-pr

Fix 1514 pr
This commit is contained in:
Alexander Wu 2024-11-04 22:53:32 +08:00 committed by GitHub
commit 379ba789a7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
14 changed files with 83 additions and 74 deletions

View file

@ -12,9 +12,8 @@ ## 1. Data Preparation
- **Download Datasets:** [Dataset Link](https://drive.google.com/drive/folders/151FIZoLygkRfeJgSI9fNMiLsixh1mK0r?usp=sharing)
- **Download and prepare datasets from scratch:**
```bash
cd data
python dataset.py --save_analysis_pool
python hf_data.py --save_analysis_pool
python data/dataset.py --save_analysis_pool
python data/hf_data.py --save_analysis_pool
```
## 2. Configurations

View file

@ -1,4 +1,5 @@
import os
from pathlib import Path
from metagpt.ext.sela.data.dataset import SPECIAL_INSTRUCTIONS
from metagpt.ext.sela.runner.mle_bench.instructions import (
@ -6,6 +7,7 @@ from metagpt.ext.sela.runner.mle_bench.instructions import (
INSTRUCTIONS,
INSTRUCTIONS_OBFUSCATED,
)
from metagpt.ext.sela.utils import mcts_logger
MLE_BENCH_FILES = ["description.md", "description_obfuscated.md"]
@ -61,7 +63,7 @@ def get_mle_bench_requirements(dataset_dir, data_config, special_instruction, ob
instructions = INSTRUCTIONS.format(dataset_dir=dataset_dir, output_dir=output_dir)
task_file = "description.md"
with open(os.path.join(dataset_dir, task_file), encoding="utf-8") as f:
with open(Path(dataset_dir) / task_file, encoding="utf-8") as f:
task_description = f.read()
mle_requirement = MLE_REQUIREMENTS.format(
instructions=instructions,
@ -70,5 +72,5 @@ def get_mle_bench_requirements(dataset_dir, data_config, special_instruction, ob
output_dir=output_dir,
special_instruction=special_instruction,
)
print(mle_requirement)
mcts_logger.info(mle_requirement)
return mle_requirement

View file

@ -10,7 +10,7 @@ import yaml
from sklearn.model_selection import train_test_split
from metagpt.ext.sela.insights.solution_designer import SolutionDesigner
from metagpt.ext.sela.utils import DATA_CONFIG
from metagpt.ext.sela.utils import DATA_CONFIG, mcts_logger
BASE_USER_REQUIREMENT = """
This is a {datasetname} dataset. Your goal is to predict the target column `{target_col}`.
@ -113,15 +113,15 @@ def get_split_dataset_path(dataset_name, config):
datasets_dir = config["datasets_dir"]
if dataset_name in config["datasets"]:
dataset = config["datasets"][dataset_name]
data_path = os.path.join(datasets_dir, dataset["dataset"])
data_path = Path(datasets_dir) / dataset["dataset"]
split_datasets = {
"train": os.path.join(data_path, "split_train.csv"),
"dev": os.path.join(data_path, "split_dev.csv"),
"dev_wo_target": os.path.join(data_path, "split_dev_wo_target.csv"),
"dev_target": os.path.join(data_path, "split_dev_target.csv"),
"test": os.path.join(data_path, "split_test.csv"),
"test_wo_target": os.path.join(data_path, "split_test_wo_target.csv"),
"test_target": os.path.join(data_path, "split_test_target.csv"),
"train": data_path / "split_train.csv",
"dev": data_path / "split_dev.csv",
"dev_wo_target": data_path / "split_dev_wo_target.csv",
"dev_target": data_path / "split_dev_target.csv",
"test": data_path / "split_test.csv",
"test_wo_target": data_path / "split_test_wo_target.csv",
"test_target": data_path / "split_test_target.csv",
}
return split_datasets
else:
@ -131,10 +131,8 @@ def get_split_dataset_path(dataset_name, config):
def get_user_requirement(task_name, config):
# datasets_dir = config["datasets_dir"]
if task_name in config["datasets"]:
dataset = config["datasets"][task_name]
# data_path = os.path.join(datasets_dir, dataset["dataset"])
user_requirement = dataset["user_requirement"]
return user_requirement
else:
@ -191,7 +189,7 @@ def generate_task_requirement(task_name, data_config, is_di=True, special_instru
additional_instruction=additional_instruction,
data_info_path=data_info_path,
)
print(user_requirement)
mcts_logger.info(user_requirement)
return user_requirement
@ -220,22 +218,22 @@ class ExpDataset:
"split_test_target.csv",
]
for fname in fnames:
if not os.path.exists(Path(self.dataset_dir, self.name, fname)):
if not Path(self.dataset_dir, self.name, fname).exists():
return False
return True
def check_datasetinfo_exists(self):
return os.path.exists(Path(self.dataset_dir, self.name, "dataset_info.json"))
return Path(self.dataset_dir, self.name, "dataset_info.json").exists()
def get_raw_dataset(self):
raw_dir = Path(self.dataset_dir, self.name, "raw")
train_df = None
test_df = None
if not os.path.exists(Path(raw_dir, "train.csv")):
if not Path(raw_dir, "train.csv").exists():
raise FileNotFoundError(f"Raw dataset `train.csv` not found in {raw_dir}")
else:
train_df = pd.read_csv(Path(raw_dir, "train.csv"))
if os.path.exists(Path(raw_dir, "test.csv")):
if Path(raw_dir, "test.csv").exists():
test_df = pd.read_csv(Path(raw_dir, "test.csv"))
return train_df, test_df
@ -286,16 +284,16 @@ class ExpDataset:
def save_dataset(self, target_col):
df, test_df = self.get_raw_dataset()
if not self.check_dataset_exists() or self.force_update:
print(f"Saving Dataset {self.name} in {self.dataset_dir}")
mcts_logger.info(f"Saving Dataset {self.name} in {self.dataset_dir}")
self.split_and_save(df, target_col, test_df=test_df)
else:
print(f"Dataset {self.name} already exists")
mcts_logger.info(f"Dataset {self.name} already exists")
if not self.check_datasetinfo_exists() or self.force_update:
print(f"Saving Dataset info for {self.name}")
mcts_logger.info(f"Saving Dataset info for {self.name}")
dataset_info = self.get_dataset_info()
self.save_datasetinfo(dataset_info)
else:
print(f"Dataset info for {self.name} already exists")
mcts_logger.info(f"Dataset info for {self.name} already exists")
def save_datasetinfo(self, dataset_info):
with open(Path(self.dataset_dir, self.name, "dataset_info.json"), "w", encoding="utf-8") as file:

View file

@ -1,6 +1,5 @@
import asyncio
import io
import os
from pathlib import Path
import pandas as pd
@ -63,7 +62,7 @@ class HFExpDataset(ExpDataset):
raw_dir = Path(self.dataset_dir, self.name, "raw")
raw_dir.mkdir(parents=True, exist_ok=True)
if os.path.exists(Path(raw_dir, "train.csv")):
if Path(raw_dir, "train.csv").exists():
df = pd.read_csv(Path(raw_dir, "train.csv"), encoding="utf-8")
else:
df = self.dataset["train"].to_pandas()
@ -73,7 +72,7 @@ class HFExpDataset(ExpDataset):
df.to_csv(Path(raw_dir, "train.csv"), index=False, encoding="utf-8")
if os.path.exists(Path(raw_dir, "test.csv")):
if Path(raw_dir, "test.csv").exists():
test_df = pd.read_csv(Path(raw_dir, "test.csv"), encoding="utf-8")
else:
if self.dataset and "test" in self.dataset:

View file

@ -4,6 +4,7 @@ import matplotlib.pyplot as plt
import networkx as nx
from metagpt.ext.sela.search.tree_search import Node
from metagpt.ext.sela.utils import mcts_logger
NODE_TEMPLATE = """\
[Node {id}]
@ -139,7 +140,7 @@ def build_tree_recursive(graph, parent_id, node, node_order, start_task_id=2):
instruction = "\n\n".join([role.planner.plan.tasks[i].instruction for i in range(start_task_id)])
else:
instruction = role.planner.plan.tasks[depth + start_task_id - 1].instruction
print(instruction)
mcts_logger.info(instruction)
# Add the current node with attributes to the graph
dev_score = node.raw_reward.get("dev_score", 0) * 100
avg_score = node.avg_value() * 100

View file

@ -2,7 +2,7 @@ from __future__ import annotations
import asyncio
import json
import os
from pathlib import Path
from pydantic import model_validator
@ -133,9 +133,9 @@ class Experimenter(DataInterpreter):
if self.planner.plan.goal != "":
self.set_actions([WriteAnalysisCode])
self._set_state(0)
print("Plan already exists, skipping initialization.")
mcts_logger.info("Plan already exists, skipping initialization.")
return self
print("Initializing plan and tool...")
mcts_logger.info("Initializing plan and tool...")
return super().set_plan_and_tool()
async def _act_on_task(self, current_task: Task) -> TaskResult:
@ -172,7 +172,7 @@ class Experimenter(DataInterpreter):
mcts_logger.log("MCTS", "Static Saving")
stg_path = self.role_dir
name = self.get_node_name()
role_path = os.path.join(stg_path, f"{name}.json")
role_path = Path(stg_path) / f"{name}.json"
# save state as json file
write_json_file(role_path, self.model_dump())

View file

@ -3,6 +3,8 @@ import time
import aide
from metagpt.ext.sela.utils import mcts_logger
os.environ["OPENAI_API_KEY"] = "sk-xxx"
os.environ["OPENAI_BASE_URL"] = "your url"
@ -27,9 +29,9 @@ exp = aide.Experiment(
best_solution = exp.run(steps=10)
print(f"Best solution has validation metric: {best_solution.valid_metric}")
print(f"Best solution code: {best_solution.code}")
mcts_logger.info(f"Best solution has validation metric: {best_solution.valid_metric}")
mcts_logger.info(f"Best solution code: {best_solution.code}")
end_time = time.time()
execution_time = end_time - start_time
print(f"run time : {execution_time} seconds")
mcts_logger.info(f"run time : {execution_time} seconds")

View file

@ -1,9 +1,10 @@
import os
from datetime import datetime
from pathlib import Path
import pandas as pd
from metagpt.ext.sela.runner.custom import CustomRunner
from metagpt.ext.sela.utils import DATA_CONFIG
class AGRunner:
@ -80,7 +81,7 @@ class AGRunner:
"""
# Define the root path to append
root_folder = os.path.join("F:/Download/Dataset/", self.state["task"])
root_folder = Path(DATA_CONFIG["datasets_dir"]) / self.state["task"]
# Load the datasets
train_data = pd.read_csv(train_path)
@ -92,12 +93,10 @@ class AGRunner:
image_column = train_data.columns[0]
# Append root folder path to the image column in each dataset
train_data[image_column] = train_data[image_column].apply(lambda x: os.path.join(root_folder, x))
dev_data[image_column] = dev_data[image_column].apply(lambda x: os.path.join(root_folder, x))
dev_wo_target_data[image_column] = dev_wo_target_data[image_column].apply(
lambda x: os.path.join(root_folder, x)
)
test_data[image_column] = test_data[image_column].apply(lambda x: os.path.join(root_folder, x))
train_data[image_column] = train_data[image_column].apply(lambda x: Path(root_folder) / x)
dev_data[image_column] = dev_data[image_column].apply(lambda x: Path(root_folder) / x)
dev_wo_target_data[image_column] = dev_wo_target_data[image_column].apply(lambda x: Path(root_folder) / x)
test_data[image_column] = test_data[image_column].apply(lambda x: Path(root_folder) / x)
return train_data, dev_data, dev_wo_target_data, test_data

View file

@ -1,4 +1,4 @@
import os
from pathlib import Path
import pandas as pd
@ -47,7 +47,7 @@ class CustomRunner(Runner):
def evaluate_predictions(self, preds, split):
metric = self.state["dataset_config"]["metric"]
gt_path = os.path.join(self.state["datasets_dir"][f"{split}_target"])
gt_path = Path(self.state["datasets_dir"][f"{split}_target"])
gt = pd.read_csv(gt_path)["target"]
score = evaluate_score(preds, gt, metric)
return score

View file

@ -7,6 +7,7 @@ from metagpt.ext.sela.evaluation.evaluation import (
from metagpt.ext.sela.evaluation.visualize_mcts import get_tree_text
from metagpt.ext.sela.runner.runner import Runner
from metagpt.ext.sela.search.search_algorithm import MCTS, Greedy, Random
from metagpt.ext.sela.utils import mcts_logger
class MCTSRunner(Runner):
@ -46,7 +47,7 @@ class MCTSRunner(Runner):
text += f"Best node: {best_node.id}, score: {best_node.raw_reward}\n"
text += f"Dev best node: {dev_best_node.id}, score: {dev_best_node.raw_reward}\n"
text += f"Grader score: {additional_scores['grader']}\n"
print(text)
mcts_logger.info(text)
results = [
{
"best_node": best_node.id,

View file

@ -1,7 +1,7 @@
from metagpt.ext.sela.experimenter import Experimenter
from metagpt.ext.sela.insights.instruction_generator import InstructionGenerator
from metagpt.ext.sela.runner.runner import Runner
from metagpt.ext.sela.utils import get_exp_pool_path
from metagpt.ext.sela.utils import get_exp_pool_path, mcts_logger
EXPS_PROMPT = """
When doing the tasks, you can refer to the insights below:
@ -37,7 +37,7 @@ class RandomSearchRunner(Runner):
di = Experimenter(node_id=str(i), use_reflection=self.args.reflection, role_timeout=self.args.role_timeout)
di.role_dir = f"{di.role_dir}_{self.args.task}"
requirement = user_requirement + EXPS_PROMPT.format(experience=exps[i])
print(requirement)
mcts_logger.info(requirement)
score_dict = await self.run_di(di, requirement, run_idx=i)
results.append(
{

View file

@ -1,6 +1,7 @@
import datetime
import json
import os
from pathlib import Path
import numpy as np
import pandas as pd
@ -8,7 +9,7 @@ import pandas as pd
from metagpt.ext.sela.evaluation.evaluation import evaluate_score
from metagpt.ext.sela.experimenter import Experimenter
from metagpt.ext.sela.search.tree_search import create_initial_state
from metagpt.ext.sela.utils import DATA_CONFIG, save_notebook
from metagpt.ext.sela.utils import DATA_CONFIG, mcts_logger, save_notebook
class Runner:
@ -38,7 +39,7 @@ class Runner:
score_dict = self.evaluate(score_dict, self.state)
run_finished = True
except Exception as e:
print(f"Error: {e}")
mcts_logger.info(f"Error: {e}")
num_runs += 1
# save_notebook(role=di, save_dir=self.result_path, name=f"{self.args.task}_{self.start_time}_{run_idx}")
save_name = self.get_save_name()
@ -94,10 +95,10 @@ class Runner:
self.save_result(results)
def evaluate_prediction(self, split, state):
pred_path = os.path.join(state["work_dir"], state["task"], f"{split}_predictions.csv")
pred_path = Path(state["work_dir"]) / state["task"] / f"{split}_predictions.csv"
os.makedirs(state["node_dir"], exist_ok=True)
pred_node_path = os.path.join(state["node_dir"], f"{self.start_time}-{split}_predictions.csv")
gt_path = os.path.join(state["datasets_dir"][f"{split}_target"])
pred_node_path = Path(state["node_dir"]) / f"{self.start_time}-{split}_predictions.csv"
gt_path = Path(state["datasets_dir"]) / f"{split}_target.csv"
preds = pd.read_csv(pred_path)
preds = preds[preds.columns.tolist()[-1]]
preds.to_csv(pred_node_path, index=False)

View file

@ -2,6 +2,7 @@ import json
import os
import pickle
import shutil
from pathlib import Path
import numpy as np
import pandas as pd
@ -95,7 +96,9 @@ def create_initial_state(task: str, start_task_id: int, data_config: dict, args)
initial_state = {
"task": task,
"work_dir": data_config["work_dir"],
"node_dir": os.path.join(data_config["work_dir"], data_config["role_dir"], f"{task}{args.name}"),
"node_dir": os.path.join(
data_config["work_dir"], data_config["role_dir"], f"{task}{args.name}"
), # cannot use Path here because of the Pydantic
"dataset_config": dataset_config,
"datasets_dir": datasets_dir, # won't be used if external eval is used
"exp_pool_path": exp_pool_path,
@ -145,12 +148,15 @@ class Node:
return hash(self.id)
def save_node(self):
os.makedirs(self.state["node_dir"], exist_ok=True)
with open(os.path.join(self.state["node_dir"], f"Node-{self.id}.pkl"), "wb") as f:
node_dir = Path(self.state["node_dir"])
node_dir.mkdir(parents=True, exist_ok=True)
node_path = node_dir / f"Node-{self.id}.pkl"
with node_path.open("wb") as f:
pickle.dump(self, f)
def load_node(self):
with open(os.path.join(self.state["node_dir"], f"Node-{self.id}.pkl"), "rb") as f:
node_path = Path(self.state["node_dir"]) / f"Node-{self.id}.pkl"
with node_path.open("rb") as f:
return pickle.load(f)
def get_depth(self):
@ -195,7 +201,7 @@ class Node:
def get_role_path(self):
fname = f"Node-{self.id}.json"
role_path = os.path.join(self.state["node_dir"], fname)
role_path = Path(self.state["node_dir"]) / fname
return role_path
def load_role(self):
@ -239,17 +245,17 @@ class Node:
self.add_child(node)
def get_predictions_path(self, split):
return os.path.join(self.state["node_dir"], f"Node-{self.id}-{split}_predictions.csv")
return Path(self.state["node_dir"]) / f"Node-{self.id}-{split}_predictions.csv"
def get_and_move_predictions(self, split):
if not os.path.exists(self.get_predictions_path(split)):
pred_path = os.path.join(self.state["work_dir"], self.state["task"], f"{split}_predictions.csv")
if not self.get_predictions_path(split).exists():
pred_path = Path(self.state["work_dir"]) / self.state["task"] / f"{split}_predictions.csv"
shutil.copy(pred_path, self.get_predictions_path(split))
os.remove(pred_path)
return pd.read_csv(self.get_predictions_path(split))
def get_gt(self, split):
gt_path = os.path.join(self.state["datasets_dir"][f"{split}_target"])
gt_path = Path(self.state["datasets_dir"][f"{split}_target"])
return pd.read_csv(gt_path)
def evaluate_prediction(self, split):
@ -374,7 +380,7 @@ class BaseTreeSearch:
return best_score, best_child
for child in self.children[node]:
score = child.normalized_reward[split]
print(child.id, split, score)
mcts_logger.info(f"{child.id} {split} {score}")
if score > best_score:
best_score = score
best_child = child
@ -391,11 +397,11 @@ class BaseTreeSearch:
def save_node_order(self, node_id: str):
self.node_order.append(node_id)
with open(os.path.join(self.root_node.state["node_dir"], "node_order.json"), "w") as f:
with open(Path(self.root_node.state["node_dir"]) / "node_order.json", "w") as f:
json.dump(self.node_order, f)
def load_node_order(self):
with open(os.path.join(self.root_node.state["node_dir"], "node_order.json"), "r") as f:
with open(Path(self.root_node.state["node_dir"]) / "node_order.json", "r") as f:
self.node_order = json.load(f)
def get_score_order_dict(self):
@ -481,8 +487,9 @@ class BaseTreeSearch:
# Load all pkl files in the node_dir
all_pkl_files = os.listdir(self.root_node.state["node_dir"])
all_pkl_files = [f for f in all_pkl_files if f.endswith(".pkl")]
if os.path.exists(os.path.join(self.root_node.state["node_dir"], "Node-0.pkl")):
with open(os.path.join(self.root_node.state["node_dir"], "Node-0.pkl"), "rb") as f:
node_0_path = Path(self.root_node.state["node_dir"]) / "Node-0.pkl"
if node_0_path.exists():
with open(node_0_path, "rb") as f:
self.root_node = pickle.load(f)
self.children[self.root_node] = self.root_node.children
load_children_node(self.root_node)

View file

@ -45,19 +45,19 @@ def get_exp_pool_path(task_name, data_config, pool_name="analysis_pool"):
datasets_dir = data_config["datasets_dir"]
if task_name in data_config["datasets"]:
dataset = data_config["datasets"][task_name]
data_path = os.path.join(datasets_dir, dataset["dataset"])
data_path = Path(datasets_dir) / dataset["dataset"]
else:
raise ValueError(
f"Dataset {task_name} not found in config file. Available datasets: {data_config['datasets'].keys()}"
)
exp_pool_path = os.path.join(data_path, f"{pool_name}.json")
if not os.path.exists(exp_pool_path):
exp_pool_path = Path(data_path) / f"{pool_name}.json"
if not exp_pool_path.exists():
return None
return exp_pool_path
def change_plan(role, plan):
print(f"Change next plan to: {plan}")
mcts_logger.info(f"Change next plan to: {plan}")
tasks = role.planner.plan.tasks
finished = True
for i, task in enumerate(tasks):
@ -115,8 +115,8 @@ async def load_execute_notebook(role):
# await executor.build()
for code in codes:
outputs, success = await executor.run(code)
print(f"Execution success: {success}, Output: {outputs}")
print("Finish executing the loaded notebook")
mcts_logger.info(f"Execution success: {success}, Output: {outputs}")
mcts_logger.info("Finish executing the loaded notebook")
return executor