Remove Chinese comments and redundant code

This commit is contained in:
isaacJinyu 2025-02-05 18:31:11 +08:00
parent a56b0e340a
commit 5c8e6da655
7 changed files with 14 additions and 66 deletions

View file

@ -13,7 +13,7 @@ from metagpt.ext.spo.scripts.utils.llm_client import SPO_LLM, extract_content
class QuickExecute:
"""
完成不同数据集的评估
Execute Prompt
"""
def __init__(self, prompt: str):
@ -41,7 +41,7 @@ class QuickExecute:
class QuickEvaluate:
"""
Complete the evaluation for different datasets here.
Complete the evaluation for different answers here.
"""
def __init__(self):
@ -79,6 +79,5 @@ class QuickEvaluate:
if __name__ == "__main__":
execute = QuickExecute(prompt="Answer the Question{question}")
# 使用asyncio.run来运行异步方法
answers = asyncio.run(execute.prompt_evaluate())
print(answers)

View file

@ -84,8 +84,6 @@ class Optimizer:
logger.info(f"choose {sample['round']}")
prompt = sample['prompt']
golden_answer = self.data_utils.list_to_markdown(qa)
best_answer = self.data_utils.list_to_markdown(sample["answers"])
@ -98,6 +96,9 @@ class Optimizer:
response = await self.llm.responser(role="optimize", messages=[{"role": "user", "content": optimize_prompt}])
modification = extract_content(response, "modification")
logger.info(f"Modification of this round: {modification}")
prompt = extract_content(response, "prompt")
if prompt:
self.prompt = prompt

View file

@ -70,39 +70,6 @@ class DataUtils:
return self.top_scores
def set_file_name(name):
global FILE_NAME
FILE_NAME = name
def load_meta_data(k=SAMPLE_K):
# 读取 YAML 文件
config_path = os.path.join(os.path.dirname(__file__), '../settings', FILE_NAME)
with open(config_path, 'r', encoding='utf-8') as file:
data = yaml.safe_load(file)
qa = []
# 提取问题和答案
for item in data['faq']:
question = item['question']
answer = item['answer']
qa.append({'question': question, 'answer': answer})
prompt = data['prompt']
requirements = data['requirements']
count = data['count']
if isinstance(count, int):
count = f", within {count} words"
else:
count = ""
# 随机选择三组问答
random_qa = random.sample(qa, min(k, len(qa))) # 确保不超过列表长度
return prompt, requirements, random_qa, count
def list_to_markdown(self, questions_list):
"""
Convert a list of question-answer dictionaries to a formatted Markdown string.

View file

@ -1,5 +1,3 @@
import asyncio
from metagpt.ext.spo.scripts.evaluator import QuickEvaluate, QuickExecute
from metagpt.logs import logger
import tiktoken

View file

@ -55,23 +55,23 @@ def extract_content(xml_string, tag):
async def spo():
# 在入口处初始化配置
# test LLM
SPO_LLM.initialize(
optimize_kwargs={"model": "gpt-4o-mini", "temperature": 0.7},
optimize_kwargs={"model": "gpt-4o", "temperature": 0.7},
evaluate_kwargs={"model": "gpt-4o-mini", "temperature": 0.3},
execute_kwargs={"model": "gpt-4o-mini", "temperature": 0.3}
)
llm = SPO_LLM.get_instance()
# 测试消息
hello_msg = [{"role": "user", "content": "你是什么模型"}]
# test messages
hello_msg = [{"role": "user", "content": "hello"}]
response = await llm.responser(role='execute', messages=hello_msg)
print(f"AI回复: {response}")
print(f"AI: {response}")
response = await llm.responser(role='optimize', messages=hello_msg)
print(f"AI回复: {response}")
print(f"AI: {response}")
response = await llm.responser(role='evaluate', messages=hello_msg)
print(f"AI回复: {response}")
print(f"AI: {response}")
if __name__ == "__main__":

View file

@ -5,16 +5,6 @@ import os
FILE_NAME = 'meta.yaml'
SAMPLE_K = 3
def load_llm():
# 读取上一级目录中的 YAML 配置文件
config_path = os.path.join(os.path.dirname(__file__), '..', 'config.yaml')
with open(config_path, 'r') as file:
config = yaml.safe_load(file)
return config
def set_file_name(name):
global FILE_NAME
FILE_NAME = name
@ -22,14 +12,13 @@ def set_file_name(name):
def load_meta_data(k=SAMPLE_K):
# 读取 YAML 文件
# load yaml file
config_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'settings', FILE_NAME)
with open(config_path, 'r', encoding='utf-8') as file:
data = yaml.safe_load(file)
qa = []
# 提取问题和答案
for item in data['faq']:
question = item['question']
answer = item['answer']
@ -44,8 +33,7 @@ def load_meta_data(k=SAMPLE_K):
else:
count = ""
# 随机选择k组问答
random_qa = random.sample(qa, min(k, len(qa))) # 确保不超过列表长度
random_qa = random.sample(qa, min(k, len(qa)))
return prompt, requirements, random_qa, count

View file

@ -1,9 +1,4 @@
import json
import os
import re
import time
import traceback
from typing import List
from metagpt.logs import logger