mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-04-25 00:36:55 +02:00
Update HotpotQA's init round
This commit is contained in:
parent
23eec00b00
commit
d8c7174fc0
10 changed files with 166 additions and 0 deletions
|
|
@ -0,0 +1,32 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# @Date : 6/27/2024 22:07 PM
|
||||
# @Author : didi
|
||||
# @Desc : Basic Graph Class
|
||||
|
||||
from typing import Literal
|
||||
import examples.aflow.scripts.optimized.HotpotQA.workflows.template.operator as operator
|
||||
import examples.aflow.scripts.optimized.HotpotQA.workflows.round_1.prompt as prompt_custom
|
||||
from metagpt.provider.llm_provider_registry import create_llm_instance
|
||||
from metagpt.utils.cost_manager import CostManager
|
||||
|
||||
DatasetType = Literal["HumanEval", "MBPP", "GSM8K", "MATH", "HotpotQA", "DROP"]
|
||||
|
||||
class Workflow:
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
llm_config,
|
||||
dataset: DatasetType,
|
||||
) -> None:
|
||||
self.name = name
|
||||
self.dataset = dataset
|
||||
self.llm = create_llm_instance(llm_config)
|
||||
self.llm.cost_manager = CostManager()
|
||||
self.custom = operator.Custom(self.llm)
|
||||
|
||||
async def __call__(self, problem: str):
|
||||
"""
|
||||
Implementation of the workflow
|
||||
"""
|
||||
solution = await self.custom(input=problem, instruction="")
|
||||
return solution['response'], self.llm.cost_manager.total_cost
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
# XXX_PROMPT = """
|
||||
#
|
||||
# Solve it.
|
||||
#
|
||||
# """
|
||||
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
SC_ENSEMBLE_PROMPT = """
|
||||
Several answers have been generated to a same question. They are as follows:
|
||||
{solutions}
|
||||
|
||||
Identify the concise answer that appears most frequently across them. This consistency in answers is crucial for determining the most reliable solution.
|
||||
|
||||
In the "thought" field, provide a detailed explanation of your thought process. In the "solution_letter" field, output only the single letter ID (A, B, C, etc.) corresponding to the most consistent solution. Do not include any additional text or explanation in the "solution_letter" field.
|
||||
"""
|
||||
|
||||
ANSWER_GENERATION_PROMPT = """
|
||||
Think step by step and solve the problem.
|
||||
1. In the "thought" field, explain your thinking process in detail.
|
||||
2. In the "answer" field, provide the final answer concisely and clearly. The answer should be a direct response to the question, without including explanations or reasoning.
|
||||
Your task: {input}
|
||||
"""
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
"Custom": {
|
||||
"description": "Generates anything based on customized input and instruction.",
|
||||
"interface": "custom(input: str, instruction: str) -> dict with key 'response' of type str"
|
||||
},
|
||||
"ScEnsemble": {
|
||||
"description": "Uses self-consistency to select the solution that appears most frequently in the solution list, improve the selection to enhance the choice of the best solution.",
|
||||
"interface": "sc_ensemble(solutions: List[str]) -> dict with key 'response' of type str"
|
||||
},
|
||||
"AnswerGenerate": {
|
||||
"description": "Generate step by step based on the input. The step by step thought process is in the field of 'thought', and the final answer is in the field of 'answer'.",
|
||||
"interface": "answer_generate(input: str) -> dict with key 'thought' of type str, 'answer' of type str"
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# @Date : 6/27/2024 17:36 PM
|
||||
# @Author : didi
|
||||
# @Desc : operator demo of ags
|
||||
import ast
|
||||
import random
|
||||
import sys
|
||||
import traceback
|
||||
from collections import Counter
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
from tenacity import retry, stop_after_attempt, wait_fixed
|
||||
|
||||
from examples.aflow.scripts.optimized.HotpotQA.workflows.template.operator_an import *
|
||||
from examples.aflow.scripts.optimized.HotpotQA.workflows.template.op_prompt import *
|
||||
from metagpt.actions.action_node import ActionNode
|
||||
from metagpt.llm import LLM
|
||||
from metagpt.logs import logger
|
||||
import re
|
||||
|
||||
|
||||
class Operator:
|
||||
def __init__(self, llm: LLM, name: str):
|
||||
self.name = name
|
||||
self.llm = llm
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
async def _fill_node(self, op_class, prompt, mode=None, **extra_kwargs):
|
||||
fill_kwargs = {"context": prompt, "llm": self.llm}
|
||||
if mode:
|
||||
fill_kwargs["mode"] = mode
|
||||
fill_kwargs.update(extra_kwargs)
|
||||
node = await ActionNode.from_pydantic(op_class).fill(**fill_kwargs)
|
||||
return node.instruct_content.model_dump()
|
||||
|
||||
|
||||
class Custom(Operator):
|
||||
def __init__(self, llm: LLM, name: str = "Custom"):
|
||||
super().__init__(llm, name)
|
||||
|
||||
async def __call__(self, input, instruction):
|
||||
prompt = instruction + input
|
||||
response = await self._fill_node(GenerateOp, prompt, mode="single_fill")
|
||||
return response
|
||||
|
||||
class AnswerGenerate(Operator):
|
||||
def __init__(self, llm: LLM, name: str = "AnswerGenerate"):
|
||||
super().__init__(llm, name)
|
||||
|
||||
async def __call__(self, input: str, mode: str = None) -> Tuple[str, str]:
|
||||
prompt = ANSWER_GENERATION_PROMPT.format(input=input)
|
||||
response = await self._fill_node(AnswerGenerateOp, prompt, mode="context_fill")
|
||||
return response
|
||||
|
||||
class ScEnsemble(Operator):
|
||||
"""
|
||||
Paper: Self-Consistency Improves Chain of Thought Reasoning in Language Models
|
||||
Link: https://arxiv.org/abs/2203.11171
|
||||
Paper: Universal Self-Consistency for Large Language Model Generation
|
||||
Link: https://arxiv.org/abs/2311.17311
|
||||
"""
|
||||
|
||||
def __init__(self, llm: LLM, name: str = "ScEnsemble"):
|
||||
super().__init__(llm, name)
|
||||
|
||||
async def __call__(self, solutions: List[str]):
|
||||
answer_mapping = {}
|
||||
solution_text = ""
|
||||
for index, solution in enumerate(solutions):
|
||||
answer_mapping[chr(65 + index)] = index
|
||||
solution_text += f"{chr(65 + index)}: \n{str(solution)}\n\n\n"
|
||||
|
||||
prompt = SC_ENSEMBLE_PROMPT.format(solutions=solution_text)
|
||||
response = await self._fill_node(ScEnsembleOp, prompt, mode="context_fill")
|
||||
|
||||
answer = response.get("solution_letter", "")
|
||||
answer = answer.strip().upper()
|
||||
|
||||
return {"response": solutions[answer_mapping[answer]]}
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# @Date : 6/27/2024 19:46 PM
|
||||
# @Author : didi
|
||||
# @Desc : action nodes for operator
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class GenerateOp(BaseModel):
|
||||
response: str = Field(default="", description="Your solution for this problem")
|
||||
|
||||
class ScEnsembleOp(BaseModel):
|
||||
thought: str = Field(default="", description="The thought of the most consistent solution.")
|
||||
solution_letter: str = Field(default="", description="The letter of most consistent solution.")
|
||||
|
||||
class AnswerGenerateOp(BaseModel):
|
||||
thought: str = Field(default="", description="The step by step thinking process")
|
||||
answer: str = Field(default="", description="The final answer to the question")
|
||||
Loading…
Add table
Add a link
Reference in a new issue