mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-05-03 12:52:37 +02:00
Update print error
This commit is contained in:
parent
2b788b21f6
commit
ebcacdd648
6 changed files with 26 additions and 23 deletions
|
|
@ -27,7 +27,7 @@ from examples.aflow.scripts.prompt import (
|
|||
CONTEXTUAL_GENERATE_PROMPT,
|
||||
FORMAT_PROMPT,
|
||||
GENERATE_CODEBLOCK_PROMPT,
|
||||
GENERATE_PROMPT,
|
||||
GENERATE_PROMPT, # TODO
|
||||
MD_ENSEMBLE_PROMPT,
|
||||
PYTHON_CODE_VERIFIER_PROMPT,
|
||||
REFLECTION_ON_PUBLIC_TEST_PROMPT,
|
||||
|
|
@ -167,7 +167,7 @@ class MdEnsemble(Operator):
|
|||
return shuffled_solutions, answer_mapping
|
||||
|
||||
async def __call__(self, solutions: List[str], problem: str, mode: str = None):
|
||||
print(f"solution count: {len(solutions)}")
|
||||
logger.info(f"solution count: {len(solutions)}")
|
||||
all_responses = []
|
||||
|
||||
for _ in range(self.vote_count):
|
||||
|
|
@ -369,6 +369,6 @@ class Programmer(Operator):
|
|||
if status == "Success":
|
||||
return {"code": code, "output": output}
|
||||
else:
|
||||
print(f"Execution error in attempt {i + 1}, error message: {output}")
|
||||
logger.info(f"Execution error in attempt {i + 1}, error message: {output}")
|
||||
feedback = f"\nThe result of the error from the code you wrote in the previous round:\nCode:{code}\n\nStatus:{status},{output}"
|
||||
return {"code": code, "output": "error"}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ from pydantic import BaseModel, Field
|
|||
|
||||
from metagpt.actions.action_node import ActionNode
|
||||
from metagpt.provider.llm_provider_registry import create_llm_instance
|
||||
|
||||
from metagpt.logs import logger
|
||||
from examples.aflow.scripts.optimizer_utils.graph_utils import GraphUtils
|
||||
from examples.aflow.scripts.optimizer_utils.data_utils import DataUtils
|
||||
from examples.aflow.scripts.optimizer_utils.experience_utils import ExperienceUtils
|
||||
|
|
@ -88,9 +88,9 @@ class Optimizer:
|
|||
break
|
||||
except Exception as e:
|
||||
retry_count += 1
|
||||
print(f"Error occurred: {e}. Retrying... (Attempt {retry_count}/{max_retries})")
|
||||
logger.info(f"Error occurred: {e}. Retrying... (Attempt {retry_count}/{max_retries})")
|
||||
if retry_count == max_retries:
|
||||
print("Max retries reached. Moving to next round.")
|
||||
logger.info("Max retries reached. Moving to next round.")
|
||||
score = None
|
||||
|
||||
wait_time = 5 * retry_count
|
||||
|
|
@ -100,13 +100,13 @@ class Optimizer:
|
|||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
self.round += 1
|
||||
print(f"Score for round {self.round}: {score}")
|
||||
logger.info(f"Score for round {self.round}: {score}")
|
||||
|
||||
converged, convergence_round, final_round = self.convergence_utils.check_convergence(top_k=3)
|
||||
|
||||
if converged and self.check_convergence:
|
||||
|
||||
print(f"Convergence detected, occurred in round {convergence_round}, final round is {final_round}")
|
||||
logger.info(f"Convergence detected, occurred in round {convergence_round}, final round is {final_round}")
|
||||
# Print average scores and standard deviations for each round
|
||||
self.convergence_utils.print_results()
|
||||
break
|
||||
|
|
@ -165,7 +165,7 @@ class Optimizer:
|
|||
|
||||
self.graph = self.graph_utils.load_graph(self.round + 1, graph_path)
|
||||
|
||||
print(directory)
|
||||
logger.info(directory)
|
||||
|
||||
avg_score = await self.evaluation_utils.evaluate_graph(self, directory, validation_n, data, initial=False)
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
import numpy as np
|
||||
import json
|
||||
import os
|
||||
from metagpt.logs import logger
|
||||
|
||||
class ConvergenceUtils:
|
||||
def __init__(self, root_path):
|
||||
|
|
@ -106,7 +107,7 @@ class ConvergenceUtils:
|
|||
"""
|
||||
self.avg_scores, self.stds = self.calculate_avg_and_std()
|
||||
for i, (avg_score, std) in enumerate(zip(self.avg_scores, self.stds), 1):
|
||||
print(f"轮次 {i}: 平均分 = {avg_score:.4f}, 标准差 = {std:.4f}")
|
||||
logger.info(f"轮次 {i}: 平均分 = {avg_score:.4f}, 标准差 = {std:.4f}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
|
|
@ -115,9 +116,9 @@ if __name__ == "__main__":
|
|||
converged, convergence_round, final_round = checker.check_convergence()
|
||||
|
||||
if converged:
|
||||
print(f"检测到收敛,发生在第 {convergence_round} 轮,最终轮次为 {final_round} 轮")
|
||||
logger.info(f"检测到收敛,发生在第 {convergence_round} 轮,最终轮次为 {final_round} 轮")
|
||||
else:
|
||||
print("在所有轮次内未检测到收敛")
|
||||
logger.info("在所有轮次内未检测到收敛")
|
||||
|
||||
# 打印每轮的平均分和标准差
|
||||
checker.print_results()
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import random
|
|||
import datetime
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from metagpt.logs import logger
|
||||
|
||||
|
||||
class DataUtils:
|
||||
|
|
@ -49,10 +50,10 @@ class DataUtils:
|
|||
scores = [item["score"] * 100 for item in sorted_items]
|
||||
|
||||
probabilities = self._compute_probabilities(scores)
|
||||
print("\nMixed probability distribution: ", probabilities)
|
||||
logger.info("\nMixed probability distribution: ", probabilities)
|
||||
|
||||
selected_index = np.random.choice(len(sorted_items), p=probabilities)
|
||||
print(f"\nSelected index: {selected_index}, Selected item: {sorted_items[selected_index]}")
|
||||
logger.info(f"\nSelected index: {selected_index}, Selected item: {sorted_items[selected_index]}")
|
||||
|
||||
return sorted_items[selected_index]
|
||||
|
||||
|
|
@ -92,7 +93,7 @@ class DataUtils:
|
|||
# 检查文件是否存在
|
||||
if not os.path.exists(log_dir):
|
||||
return "" # 如果文件不存在,返回空字符串
|
||||
print(log_dir)
|
||||
logger.info(log_dir)
|
||||
with open(log_dir, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import json
|
||||
import os
|
||||
from collections import defaultdict
|
||||
|
||||
from metagpt.logs import logger
|
||||
|
||||
class ExperienceUtils:
|
||||
def __init__(self, root_path: str):
|
||||
|
|
@ -40,7 +40,7 @@ class ExperienceUtils:
|
|||
"score": data["after"]
|
||||
}
|
||||
except Exception as e:
|
||||
print(f"Error processing {round_dir}: {str(e)}")
|
||||
logger.info(f"Error processing {round_dir}: {str(e)}")
|
||||
|
||||
experience_data = dict(experience_data)
|
||||
|
||||
|
|
@ -48,7 +48,7 @@ class ExperienceUtils:
|
|||
with open(output_path, "w", encoding="utf-8") as outfile:
|
||||
json.dump(experience_data, outfile, indent=4, ensure_ascii=False)
|
||||
|
||||
print(f"Processed experience data saved to {output_path}")
|
||||
logger.info(f"Processed experience data saved to {output_path}")
|
||||
return experience_data
|
||||
|
||||
def format_experience(self, processed_experience, sample_round):
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import json
|
|||
from typing import List
|
||||
import traceback
|
||||
import time
|
||||
from metagpt.logs import logger
|
||||
|
||||
from examples.aflow.scripts.prompts.optimize_prompt import (
|
||||
WORKFLOW_CUSTOM_USE,
|
||||
|
|
@ -31,7 +32,7 @@ class GraphUtils:
|
|||
graph_class = getattr(graph_module, "Workflow")
|
||||
return graph_class
|
||||
except ImportError as e:
|
||||
print(f"Error loading graph for round {round_number}: {e}")
|
||||
logger.info(f"Error loading graph for round {round_number}: {e}")
|
||||
raise
|
||||
|
||||
def read_graph_files(self, round_number: int, workflows_path: str):
|
||||
|
|
@ -44,10 +45,10 @@ class GraphUtils:
|
|||
with open(graph_file_path, "r", encoding="utf-8") as file:
|
||||
graph_content = file.read()
|
||||
except FileNotFoundError as e:
|
||||
print(f"Error: File not found for round {round_number}: {e}")
|
||||
logger.info(f"Error: File not found for round {round_number}: {e}")
|
||||
raise
|
||||
except Exception as e:
|
||||
print(f"Error loading prompt for round {round_number}: {e}")
|
||||
logger.info(f"Error loading prompt for round {round_number}: {e}")
|
||||
raise
|
||||
return prompt_content, graph_content
|
||||
|
||||
|
|
@ -90,9 +91,9 @@ class GraphUtils:
|
|||
return response
|
||||
except Exception as e:
|
||||
retries += 1
|
||||
print(f"Error generating prediction: {e}. Retrying... ({retries}/{max_retries})")
|
||||
logger.info(f"Error generating prediction: {e}. Retrying... ({retries}/{max_retries})")
|
||||
if retries == max_retries:
|
||||
print("Maximum retries reached. Skipping this sample.")
|
||||
logger.info("Maximum retries reached. Skipping this sample.")
|
||||
break
|
||||
traceback.print_exc()
|
||||
time.sleep(5)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue