mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-05-05 05:42:37 +02:00
Update AGS
This commit is contained in:
parent
9f8f0a27fd
commit
8f1cf58af2
11 changed files with 918 additions and 0 deletions
101
examples/ags/demo/claude.py
Normal file
101
examples/ags/demo/claude.py
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
|
||||
from typing import Any, Dict, List, Callable
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
class LLM:
|
||||
def ask(self, text: str) -> str:
|
||||
# Implement LLM query logic here
|
||||
pass
|
||||
|
||||
class Operator(ABC):
|
||||
def __init__(self, llm: LLM):
|
||||
self.llm = llm
|
||||
|
||||
@abstractmethod
|
||||
def forward(self, *args: Any, **kwargs: Any) -> Any:
|
||||
pass
|
||||
|
||||
def __call__(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self.forward(*args, **kwargs)
|
||||
|
||||
class Generate(Operator):
|
||||
def __init__(self, llm: LLM, prompt: str):
|
||||
super().__init__(llm)
|
||||
self.prompt = prompt
|
||||
|
||||
def forward(self, input_problem: str) -> str:
|
||||
return self.llm.ask(f"{self.prompt}\n{input_problem}")
|
||||
|
||||
class Review(Operator):
|
||||
def __init__(self, llm: LLM, criteria: List[str]):
|
||||
super().__init__(llm)
|
||||
self.criteria = criteria
|
||||
|
||||
def forward(self, solution: str) -> Dict[str, float]:
|
||||
review_prompt = f"Review the following solution based on these criteria: {', '.join(self.criteria)}\n\nSolution: {solution}"
|
||||
review_result = self.llm.ask(review_prompt)
|
||||
# Parse the review_result to extract scores
|
||||
return {criteria: float(review_result.split(criteria)[1].split()[0]) for criteria in self.criteria}
|
||||
|
||||
class Module:
|
||||
def __init__(self, llm: LLM):
|
||||
self.llm = llm
|
||||
|
||||
def forward(self, x: Any) -> Any:
|
||||
raise NotImplementedError("Subclasses must implement forward method")
|
||||
|
||||
def __call__(self, x: Any) -> Any:
|
||||
return self.forward(x)
|
||||
|
||||
class CodeGenerationModule(Module):
|
||||
def __init__(self, llm: LLM):
|
||||
super().__init__(llm)
|
||||
self.generate = Generate(llm, "Generate a Python function for the following problem:")
|
||||
self.review = Review(llm, ["correctness", "efficiency", "readability"])
|
||||
|
||||
def forward(self, problem: str) -> Dict[str, Any]:
|
||||
solution = self.generate(problem)
|
||||
review = self.review(solution)
|
||||
return {"solution": solution, "review": review}
|
||||
|
||||
def optimize(module: Module, loss_fn: Callable[[Dict[str, Any]], float], iterations: int = 10):
|
||||
for _ in range(iterations):
|
||||
# This is a placeholder for the optimization logic
|
||||
# In a real implementation, you would:
|
||||
# 1. Run the module on some input
|
||||
# 2. Compute the loss
|
||||
# 3. Use the loss to improve the module (e.g., by adjusting prompts or using LLM feedback)
|
||||
pass
|
||||
|
||||
# Usage
|
||||
llm = LLM()
|
||||
code_gen = CodeGenerationModule(llm)
|
||||
|
||||
# Solve a problem
|
||||
result = code_gen("Write a function to calculate the factorial of a number")
|
||||
print(result)
|
||||
|
||||
# Define a loss function
|
||||
def loss_function(output: Dict[str, Any]) -> float:
|
||||
# Implement your loss computation here
|
||||
# For example, you might use the review scores
|
||||
return 1.0 - output["review"].get("correctness", 0)
|
||||
|
||||
# Optimize the module
|
||||
optimize(code_gen, loss_function, iterations=10)
|
||||
|
||||
# You can also create custom modules easily
|
||||
class CustomModule(Module):
|
||||
def __init__(self, llm: LLM):
|
||||
super().__init__(llm)
|
||||
self.op1 = Generate(llm, "Custom prompt 1")
|
||||
self.op2 = Review(llm, ["custom_criteria"])
|
||||
|
||||
def forward(self, x: str) -> Dict[str, Any]:
|
||||
intermediate = self.op1(x)
|
||||
final = self.op2(intermediate)
|
||||
return {"result": final}
|
||||
|
||||
custom_module = CustomModule(llm)
|
||||
custom_result = custom_module("Custom input")
|
||||
print(custom_result)
|
||||
82
examples/ags/demo/claude_2.py
Normal file
82
examples/ags/demo/claude_2.py
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
from metagpt import nn
|
||||
import metagpt.functional as F
|
||||
|
||||
class Generate(nn.Module):
|
||||
def __init__(self, model_name):
|
||||
super(Generate, self).__init__()
|
||||
self.model = nn.LLM(model_name)
|
||||
|
||||
def forward(self, prompt):
|
||||
return self.model.generate(prompt)
|
||||
|
||||
class Review(nn.Module):
|
||||
def __init__(self, criteria):
|
||||
super(Review, self).__init__()
|
||||
self.criteria = criteria
|
||||
|
||||
def forward(self, generated_code):
|
||||
return F.analyze(generated_code, self.criteria)
|
||||
|
||||
class Revise(nn.Module):
|
||||
def __init__(self, model_name):
|
||||
super(Revise, self).__init__()
|
||||
self.model = nn.LLM(model_name)
|
||||
|
||||
def forward(self, original_code, review_feedback):
|
||||
prompt = f"Original code:\n{original_code}\n\nFeedback:\n{review_feedback}\n\nRevised code:"
|
||||
return self.model.generate(prompt)
|
||||
|
||||
class Ensemble(nn.Module):
|
||||
def __init__(self, strategy='majority_vote'):
|
||||
super(Ensemble, self).__init__()
|
||||
self.strategy = strategy
|
||||
|
||||
def forward(self, solutions):
|
||||
return F.ensemble(solutions, strategy=self.strategy)
|
||||
|
||||
class LLMAgent(nn.Module):
|
||||
def __init__(self, generate_model, review_criteria, revise_model):
|
||||
super(LLMAgent, self).__init__()
|
||||
self.generate = Generate(generate_model)
|
||||
self.review = Review(review_criteria)
|
||||
self.revise = Revise(revise_model)
|
||||
self.ensemble = Ensemble()
|
||||
|
||||
def forward(self, problem_description, num_iterations=3):
|
||||
solutions = []
|
||||
for _ in range(num_iterations):
|
||||
# 生成初始解决方案
|
||||
initial_solution = self.generate(problem_description)
|
||||
|
||||
# 审查解决方案
|
||||
review_feedback = self.review(initial_solution)
|
||||
|
||||
# 根据反馈修改解决方案
|
||||
revised_solution = self.revise(initial_solution, review_feedback)
|
||||
|
||||
solutions.append(revised_solution)
|
||||
|
||||
# 整合多个解决方案
|
||||
final_solution = self.ensemble(solutions)
|
||||
return final_solution
|
||||
|
||||
# 示例使用
|
||||
problem = """
|
||||
Human: Write a function that takes a list of numbers and returns the sum of the numbers at even indices.
|
||||
|
||||
Function Signature:
|
||||
def sum_even_indices(numbers: List[int]) -> int:
|
||||
|
||||
Example:
|
||||
>>> sum_even_indices([1, 2, 3, 4, 5])
|
||||
9 # 1 + 3 + 5 = 9
|
||||
"""
|
||||
|
||||
agent = LLMAgent(
|
||||
generate_model="gpt-3.5-turbo",
|
||||
review_criteria=["correctness", "efficiency", "readability"],
|
||||
revise_model="gpt-4"
|
||||
)
|
||||
|
||||
solution = agent(problem)
|
||||
print(solution)
|
||||
37
examples/ags/demo/graph.py
Normal file
37
examples/ags/demo/graph.py
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# @Date : 6/26/2024 17:07 PM
|
||||
# @Author : didi
|
||||
# @Desc : graph demo of ags
|
||||
|
||||
from examples.ags.demo.operator import Generate, GenerateCode, Review, Revise, Ensemble, LLM
|
||||
|
||||
class Graph:
|
||||
def __init__(self, name:str, llm:str) -> None:
|
||||
self.name = name
|
||||
self.model = llm # TODO 抽象一个逻辑,用不同的model适配不同的算子
|
||||
|
||||
def __call__():
|
||||
NotImplementedError("Subclasses must implement __call__ method")
|
||||
|
||||
|
||||
class HumanEvalGraph(Graph):
|
||||
def __init__(self, name:str, llm: str, criteria:str) -> None:
|
||||
super().__init__(name, llm)
|
||||
self.criteria = criteria # TODO 有位置参数的生成逻辑是基于算子的要求
|
||||
self.generate_code = GenerateCode(llm=LLM(model=llm))
|
||||
self.review = Review(llm=LLM(model=llm), criteria=criteria)
|
||||
self.revise = Revise(llm=LLM(model=llm))
|
||||
self.ensemble = Ensemble(llm=LLM(model=llm))
|
||||
|
||||
def __call__(self, problem):
|
||||
# TODO 我先来实现一版不带Ensemble的版本
|
||||
solution = self.generate_code(problem)
|
||||
# review & revise loop
|
||||
for _ in range(3):
|
||||
review_feedback = self.review(problem, solution)
|
||||
if review_feedback['result']:
|
||||
break
|
||||
solution = self.revise(solution, review_feedback['feedback'])
|
||||
return solution
|
||||
|
||||
|
||||
109
examples/ags/demo/operator.py
Normal file
109
examples/ags/demo/operator.py
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# @Date : 6/26/2024 17:07 PM
|
||||
# @Author : didi
|
||||
# @Desc : operator demo of ags
|
||||
|
||||
import json
|
||||
from openai import OpenAI
|
||||
from examples.ags.demo.prompt import GENERATE_PROMPT, GENERATE_CODE_PROMPT, REVIEW_PROMPT, REVISE_PROMPT, ENSEMBLE_PROMPT
|
||||
|
||||
class LLM():
|
||||
def __init__(self, model:str='gpt-4-turbo', timeout:int=60):
|
||||
self.model = model
|
||||
self.timeout = timeout
|
||||
self.api_key = 'sk-6uLg7KCASTHxoLIL00E0F0C0377449Bd9cE506B04791B23a'
|
||||
self.base_url = 'https://api.aigcbest.top/v1'
|
||||
self.client = OpenAI(api_key=self.api_key, base_url=self.base_url)
|
||||
self.system_prompt = None
|
||||
|
||||
def ask(self, text: str, json_mode: bool = False, temperature: float = 0.7, retries: int = 5):
|
||||
response_type = "text" if not json_mode else "json_object"
|
||||
messages = [{"role": "user", "content": text}] if self.system_prompt == None else [
|
||||
{"role": "system", "content": self.system_prompt}, {"role": "user", "content": text}]
|
||||
for i in range(retries):
|
||||
try:
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
response_format={"type": response_type}
|
||||
)
|
||||
if json_mode:
|
||||
result = response.choices[0].message.content
|
||||
result = json.loads(result)
|
||||
else:
|
||||
result = response.choices[0].message.content
|
||||
print(result)
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"{__name__} occurs: {e}")
|
||||
|
||||
|
||||
class Operator:
|
||||
def __init__(self, name, llm:LLM=None):
|
||||
self.name = name
|
||||
self.llm = llm
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
class Generate(Operator):
|
||||
"""
|
||||
Generate code & Generate text 应该被分开
|
||||
"""
|
||||
def __init__(self, name:str ="Generator", llm: LLM = LLM()):
|
||||
super().__init__(name, llm)
|
||||
|
||||
def __call__(self, problem_description):
|
||||
prompt = GENERATE_PROMPT.format(problem_description=problem_description)
|
||||
response = self.llm.ask(prompt, json_mode=True)
|
||||
return {"solution": response.get("solution")}
|
||||
|
||||
class GenerateCode(Operator):
|
||||
|
||||
def __init__(self, name:str ="Coder", llm: LLM = LLM()):
|
||||
super().__init__(name, llm)
|
||||
|
||||
def __call__(self, problem_description):
|
||||
prompt = GENERATE_CODE_PROMPT.format(problem_description=problem_description)
|
||||
response = self.llm.ask(prompt, json_mode=True)
|
||||
return {"code": response.get("code")}
|
||||
|
||||
class Review(Operator):
|
||||
|
||||
def __init__(self, criteria, name:str ="Reviewer", llm: LLM = LLM()):
|
||||
self.criteria = criteria
|
||||
super().__init__(name, llm)
|
||||
|
||||
# TODO 有点搞笑,我忘记加上criteria了
|
||||
def __call__(self, problem_description, solution):
|
||||
prompt = REVIEW_PROMPT.format(problem_description=problem_description, solution=solution)
|
||||
response = self.llm.ask(prompt, json_mode=True)
|
||||
if response.get("result") == True:
|
||||
return {"result": True}
|
||||
else:
|
||||
return {"result":False, "feedback":response.get('feedback')}
|
||||
|
||||
class Revise(Operator):
|
||||
|
||||
def __init__(self, name:str ="Reviser", llm: LLM = LLM()):
|
||||
super().__init__(name, llm)
|
||||
|
||||
def __call__(self, problem_description, solution, feedback):
|
||||
prompt = REVISE_PROMPT.format(problem_description=problem_description, solution=solution, feedback=feedback)
|
||||
response = self.llm.ask(prompt, json_mode=True)
|
||||
return {"revised_solution": response.get("revised_solution")}
|
||||
|
||||
class Ensemble(Operator):
|
||||
|
||||
def __init__(self, name:str ="Ensembler", llm: LLM = LLM()):
|
||||
super().__init__(name, llm)
|
||||
|
||||
def __call__(self, *args, problem_description):
|
||||
solutions = ""
|
||||
for solution in args:
|
||||
solutions += solution + "\n"
|
||||
prompt = ENSEMBLE_PROMPT.format(solutions=solutions, problem_description=problem_description)
|
||||
response = self.llm.ask(prompt, json_mode=True)
|
||||
return {"ensembled_solution": response.get("ensembled_solution")}
|
||||
|
||||
57
examples/ags/demo/prompt.py
Normal file
57
examples/ags/demo/prompt.py
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# @Date : 6/26/2024 17:07 PM
|
||||
# @Author : didi
|
||||
# @Desc : prompts of operators
|
||||
|
||||
|
||||
GENERATE_PROMPT = """
|
||||
Generate Solution for the following problem: {problem_description}
|
||||
|
||||
Please structure your response in JSON format as follows:
|
||||
{{
|
||||
"solution": "<your solution>"
|
||||
}}
|
||||
"""
|
||||
|
||||
GENERATE_CODE_PROMPT = """
|
||||
Generate Code Solution for the following problem: {problem_description}
|
||||
|
||||
Please structure your response in JSON format as follows:
|
||||
{{
|
||||
"code": "<your code>"
|
||||
}}
|
||||
"""
|
||||
|
||||
REVIEW_PROMPT = """
|
||||
For the question described as {problem_description},
|
||||
please review the following solution: {solution}, and provide a review result in boolean format.
|
||||
If you believe the solution is capable of resolving the issue, return True; otherwise, return False, and include your comments
|
||||
|
||||
Please structure your response in JSON format as follows:
|
||||
{{
|
||||
"result": <result>,
|
||||
"comment": "<if result is ture, don't response this>"
|
||||
}}
|
||||
"""
|
||||
|
||||
REVISE_PROMPT = """
|
||||
For the question described as {problem_description},
|
||||
please evaluate and revise the solution provided: {solution}, taking into account the review comments: {comment}."
|
||||
Then output the revised solution.
|
||||
|
||||
Please structure your response in JSON format as follows:
|
||||
{{
|
||||
"revised_solution": "<your revised solution>"
|
||||
}}
|
||||
|
||||
"""
|
||||
|
||||
ENSEMBLE_PROMPT = """
|
||||
For the question described as {problem_description},
|
||||
please ensemble the following solutions: {solutions}, and provide an ensemble result.
|
||||
|
||||
Please structure your response in JSON format as follows:
|
||||
{{
|
||||
"ensembled_solution": "<your ensembled solution>"
|
||||
}}
|
||||
"""
|
||||
46
examples/ags/w_action_node/graph.py
Normal file
46
examples/ags/w_action_node/graph.py
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# @Date : 6/27/2024 22:07 PM
|
||||
# @Author : didi
|
||||
# @Desc : graph & an instance - humanevalgraph
|
||||
|
||||
from metagpt.llm import LLM
|
||||
|
||||
from examples.ags.w_action_node.operator import Generate, GenerateCode, Review, Revise, Ensemble
|
||||
|
||||
class Graph:
|
||||
def __init__(self, name:str, llm:LLM) -> None:
|
||||
self.name = name
|
||||
# TODO 是否需要对每一个算子使用不同的Graph?
|
||||
self.model = llm
|
||||
|
||||
def __call__():
|
||||
NotImplementedError("Subclasses must implement __call__ method")
|
||||
|
||||
|
||||
class HumanEvalGraph(Graph):
|
||||
def __init__(self, name:str, llm: LLM, criteria:str) -> None:
|
||||
super().__init__(name, llm)
|
||||
self.criteria = criteria # TODO 自动构建图时,图的初始参数与图所使用的算子要求的外部参数相关
|
||||
self.generate_code = GenerateCode(llm=llm)
|
||||
self.review = Review(llm=llm, criteria=criteria)
|
||||
self.revise = Revise(llm=llm)
|
||||
self.ensemble = Ensemble(llm=llm)
|
||||
|
||||
async def __call__(self, problem:str, ensemble_count:int = 2):
|
||||
# TODO Ensemble Implamentation
|
||||
solution_list = []
|
||||
for _ in range(ensemble_count):
|
||||
solution = await self.single_solve(problem, 3)
|
||||
solution = solution.get('code_solution')
|
||||
solution_list.append(solution)
|
||||
solution = await self.ensemble(solution_list, problem)
|
||||
return solution
|
||||
|
||||
async def single_solve(self, problem:str, max_loop:int):
|
||||
solution = await self.generate_code(problem)
|
||||
for _ in range(max_loop):
|
||||
review_feedback = await self.review(problem, solution)
|
||||
if review_feedback['review_result']:
|
||||
break
|
||||
solution = await self.revise(problem, solution, review_feedback['feedback'])
|
||||
return solution
|
||||
78
examples/ags/w_action_node/operator.py
Normal file
78
examples/ags/w_action_node/operator.py
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# @Date : 6/27/2024 17:36 PM
|
||||
# @Author : didi
|
||||
# @Desc : operator demo of ags
|
||||
|
||||
from typing import List
|
||||
|
||||
from metagpt.actions.action_node import ActionNode
|
||||
from metagpt.llm import LLM
|
||||
|
||||
from examples.ags.w_action_node.operator_an import GENERATE_NODE, GENERATE_CODE_NODE, REVIEW_NODE, REVISE_NODE, ENSEMBLE_NODE
|
||||
from examples.ags.w_action_node.prompt import GENERATE_PROMPT, GENERATE_CODE_PROMPT, REVIEW_PROMPT, REVISE_PROMPT, ENSEMBLE_PROMPT
|
||||
|
||||
class Operator:
|
||||
def __init__(self, name, llm:LLM=None):
|
||||
self.name = name
|
||||
self.llm = llm
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
class Generate(Operator):
|
||||
def __init__(self, name:str ="Generator", llm: LLM = LLM()):
|
||||
super().__init__(name, llm)
|
||||
|
||||
async def __call__(self, problem_description):
|
||||
prompt = GENERATE_PROMPT.format(problem_description=problem_description)
|
||||
node = await GENERATE_NODE.fill(context=prompt, llm=self.llm)
|
||||
response = node.instruct_content.model_dump()
|
||||
return response
|
||||
|
||||
class GenerateCode(Operator):
|
||||
|
||||
def __init__(self, name:str ="Coder", llm: LLM = LLM()):
|
||||
super().__init__(name, llm)
|
||||
|
||||
async def __call__(self, problem_description):
|
||||
prompt = GENERATE_CODE_PROMPT.format(problem_description=problem_description)
|
||||
node = await GENERATE_CODE_NODE.fill(context=prompt, llm=self.llm)
|
||||
response = node.instruct_content.model_dump()
|
||||
return response
|
||||
|
||||
class Review(Operator):
|
||||
|
||||
def __init__(self, criteria, name:str ="Reviewer", llm: LLM = LLM()):
|
||||
self.criteria = criteria
|
||||
super().__init__(name, llm)
|
||||
|
||||
async def __call__(self, problem_description, solution):
|
||||
prompt = REVIEW_PROMPT.format(problem_description=problem_description, solution=solution, criteria=self.criteria)
|
||||
node = await REVIEW_NODE.fill(context=prompt, llm=self.llm)
|
||||
response = node.instruct_content.model_dump()
|
||||
return response
|
||||
|
||||
class Revise(Operator):
|
||||
|
||||
def __init__(self, name:str ="Reviser", llm: LLM = LLM()):
|
||||
super().__init__(name, llm)
|
||||
|
||||
async def __call__(self, problem_description, solution, feedback):
|
||||
prompt = REVISE_PROMPT.format(problem_description=problem_description, solution=solution, feedback=feedback)
|
||||
node = await REVISE_NODE.fill(context=prompt, llm=self.llm)
|
||||
response = node.instruct_content.model_dump()
|
||||
return response
|
||||
|
||||
class Ensemble(Operator):
|
||||
|
||||
def __init__(self, name:str ="Ensembler", llm: LLM = LLM()):
|
||||
super().__init__(name, llm)
|
||||
|
||||
async def __call__(self, solutions:List, problem_description):
|
||||
solution_text = ""
|
||||
for solution in solutions:
|
||||
solution_text += solution + "\n"
|
||||
prompt = ENSEMBLE_PROMPT.format(solutions=solution_text, problem_description=problem_description)
|
||||
node = await ENSEMBLE_NODE.fill(context=prompt, llm=self.llm)
|
||||
response = node.instruct_content.model_dump()
|
||||
return response
|
||||
65
examples/ags/w_action_node/operator_an.py
Normal file
65
examples/ags/w_action_node/operator_an.py
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# @Date : 6/27/2024 19:46 PM
|
||||
# @Author : didi
|
||||
# @Desc : action nodes for operator
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from metagpt.actions.action_node import ActionNode
|
||||
|
||||
SOLUTION = ActionNode(
|
||||
key="solution",
|
||||
expected_type=str,
|
||||
instruction="Your Solution for this problem",
|
||||
example=""
|
||||
)
|
||||
|
||||
CODE_SOLUTION = ActionNode(
|
||||
key="code_solution",
|
||||
expected_type=str,
|
||||
instruction="Your Code Solution for this problem",
|
||||
example=""
|
||||
)
|
||||
|
||||
REVIEW_RESULT = ActionNode(
|
||||
key="review_result",
|
||||
expected_type=bool,
|
||||
instruction="The Review Result (Bool). If you think this solution looks good for you, return 'true'; If not, return 'false'",
|
||||
example=""
|
||||
)
|
||||
|
||||
FEEDBACK = ActionNode(
|
||||
key="feedback",
|
||||
expected_type=str,
|
||||
instruction="Your FeedBack for this problem based on the criteria. If the review result is true, you can put it 'nothing here'.",
|
||||
example=""
|
||||
)
|
||||
|
||||
GENERATE_NODE = ActionNode.from_children("Generate", [SOLUTION])
|
||||
GENERATE_CODE_NODE = ActionNode.from_children("GenerateCode", [CODE_SOLUTION])
|
||||
REVIEW_NODE = ActionNode.from_children("Review", [REVIEW_RESULT, FEEDBACK])
|
||||
REVISE_NODE = ActionNode.from_children("Revise", [SOLUTION])
|
||||
ENSEMBLE_NODE = ActionNode.from_children("Ensemble", [SOLUTION])
|
||||
|
||||
class Generate(BaseModel):
|
||||
solution: str = Field(default="", description="Your Solution for this problem")
|
||||
|
||||
class GenerateCode(BaseModel):
|
||||
code_solution: str = Field(default="", description="Your Code Solution for this problem")
|
||||
|
||||
class Review(BaseModel):
|
||||
review_result: bool = Field(default=False, description="The Review Result (Bool). If you think this solution looks good for you, return 'true'; If not, return 'false'")
|
||||
feedback: str = Field(default="", description="Your FeedBack for this problem based on the criteria. If the review result is true, you can put it 'nothing here'.")
|
||||
|
||||
class Revise(BaseModel):
|
||||
revised_solution: str = Field(default="", description="Revised solution for this problem")
|
||||
|
||||
class Ensemble(BaseModel):
|
||||
final_solution: str = Field(default="", description="Final ensemble solution for this problem")
|
||||
|
||||
|
||||
|
||||
|
||||
# 接下来我将给予你两段代码,请你按照我的要求对其进行改写
|
||||
# 第一段代码是一个利用子ActionNode通过From_children方法实现的多个Node聚合
|
||||
# 第二段代码是一个利用Pydantic与From_pydantic方法实现的Node
|
||||
# 现在,我希望你能够通过From Pydantic方法,通过第二段代码的风格,完成我第一段代码中的GENERATE -> ENSEMBLE 五个Node的实现,每一个实现都应该像是第二段代码中的一个Class一样
|
||||
30
examples/ags/w_action_node/prompt.py
Normal file
30
examples/ags/w_action_node/prompt.py
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# @Date : 6/26/2024 17:07 PM
|
||||
# @Author : didi
|
||||
# @Desc : prompts of operators
|
||||
|
||||
|
||||
GENERATE_PROMPT = """
|
||||
Generate Solution for the following problem: {problem_description}
|
||||
"""
|
||||
|
||||
GENERATE_CODE_PROMPT = """
|
||||
Generate Code Solution for the following problem: {problem_description}
|
||||
"""
|
||||
|
||||
REVIEW_PROMPT = """
|
||||
For the question described as {problem_description},
|
||||
please review the following solution: {solution}, and provide a review result in boolean format.
|
||||
If you believe the solution is capable of resolving the issue, return True; otherwise, return False, and include your comments
|
||||
"""
|
||||
|
||||
REVISE_PROMPT = """
|
||||
For the question described as {problem_description},
|
||||
please evaluate and revise the solution provided: {solution}, taking into account the review comments: {comment}."
|
||||
Then output the revised solution.
|
||||
"""
|
||||
|
||||
ENSEMBLE_PROMPT = """
|
||||
For the question described as {problem_description},
|
||||
please ensemble the following solutions: {solutions}, and provide an ensemble solution.
|
||||
"""
|
||||
Loading…
Add table
Add a link
Reference in a new issue