mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-05-09 15:52:38 +02:00
format SPO code and add figure from paper
This commit is contained in:
parent
322003aad7
commit
852dc20a84
13 changed files with 156 additions and 148 deletions
|
|
@ -4,6 +4,10 @@ # SPO 🤖 | Self-Supervised Prompt PromptOptimizer
|
|||
|
||||
A next-generation prompt engineering system implementing **Self-Supervised Prompt Optimization (SPO)**. Achieves state-of-the-art performance with 17.8-90.9× higher cost efficiency than conventional methods. 🚀
|
||||
|
||||
<p align="center">
|
||||
<a href=""><img src="../../docs/resources/spo/SPO-method.png" alt="Framework of AFlow" title="Framework of AFlow <sub>1</sub>" width="80%"></a>
|
||||
</p>
|
||||
|
||||
## ✨ Core Advantages
|
||||
|
||||
- 💸 **Ultra-Low Cost** - _$0.15 per task optimization_
|
||||
|
|
@ -11,6 +15,25 @@ ## ✨ Core Advantages
|
|||
- ⚡ **Universal Adaptation** - _Closed & open-ended tasks supported_
|
||||
- 🔄 **Self-Evolving** - _Auto-optimization via LLM-as-judge mechanism_
|
||||
|
||||
[Read our paper on arXiv](coming soon)
|
||||
|
||||
## 📊 Experiment
|
||||
|
||||
### Closed Tasks
|
||||
<p align="center">
|
||||
<a href=""><img src="../../docs/resources/spo/SPO-closed_task_table.png" alt="Framework of AFlow" title="Framework of AFlow <sub>1</sub>" width="80%"></a>
|
||||
<a href=""><img src="../../docs/resources/spo/SPO-closed_task_figure.png" alt="Framework of AFlow" title="Framework of AFlow <sub>1</sub>" width="80%"></a>
|
||||
</p>
|
||||
|
||||
*SPO demonstrates superior cost efficiency, requiring only 1.1% to 5.6% of the cost of state-of-the-art methods while maintaining competitive performance.*
|
||||
|
||||
### Open-ended Tasks
|
||||
<p align="center">
|
||||
<a href=""><img src="../../docs/resources/spo/SPO-open-ended _task_figure.png" alt="Framework of AFlow" title="Framework of AFlow <sub>1</sub>" width="80%"></a>
|
||||
</p>
|
||||
|
||||
*SPO significantly improves model performance across all model configurations in open-ended tasks.*
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### 1. Configure Your API Key ⚙️
|
||||
|
|
|
|||
|
|
@ -1,38 +1,27 @@
|
|||
import argparse
|
||||
|
||||
from metagpt.ext.spo.components.optimizer import PromptOptimizer
|
||||
from metagpt.ext.spo.utils.llm_client import SPO_LLM
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description='SPO PromptOptimizer CLI')
|
||||
parser = argparse.ArgumentParser(description="SPO PromptOptimizer CLI")
|
||||
|
||||
# LLM parameter
|
||||
parser.add_argument('--opt-model', type=str, default='claude-3-5-sonnet-20240620',
|
||||
help='Model for optimization')
|
||||
parser.add_argument('--opt-temp', type=float, default=0.7,
|
||||
help='Temperature for optimization')
|
||||
parser.add_argument('--eval-model', type=str, default='gpt-4o-mini',
|
||||
help='Model for evaluation')
|
||||
parser.add_argument('--eval-temp', type=float, default=0.3,
|
||||
help='Temperature for evaluation')
|
||||
parser.add_argument('--exec-model', type=str, default='gpt-4o-mini',
|
||||
help='Model for execution')
|
||||
parser.add_argument('--exec-temp', type=float, default=0,
|
||||
help='Temperature for execution')
|
||||
parser.add_argument("--opt-model", type=str, default="claude-3-5-sonnet-20240620", help="Model for optimization")
|
||||
parser.add_argument("--opt-temp", type=float, default=0.7, help="Temperature for optimization")
|
||||
parser.add_argument("--eval-model", type=str, default="gpt-4o-mini", help="Model for evaluation")
|
||||
parser.add_argument("--eval-temp", type=float, default=0.3, help="Temperature for evaluation")
|
||||
parser.add_argument("--exec-model", type=str, default="gpt-4o-mini", help="Model for execution")
|
||||
parser.add_argument("--exec-temp", type=float, default=0, help="Temperature for execution")
|
||||
|
||||
# PromptOptimizer parameter
|
||||
parser.add_argument('--workspace', type=str, default='workspace',
|
||||
help='Path for optimized output')
|
||||
parser.add_argument('--initial-round', type=int, default=1,
|
||||
help='Initial round number')
|
||||
parser.add_argument('--max-rounds', type=int, default=10,
|
||||
help='Maximum number of rounds')
|
||||
parser.add_argument('--template', type=str, default='Poem.yaml',
|
||||
help='Template file name')
|
||||
parser.add_argument('--name', type=str, default='Poem',
|
||||
help='Project name')
|
||||
parser.add_argument('--no-iteration', action='store_false', dest='iteration',
|
||||
help='Disable iteration mode')
|
||||
parser.add_argument("--workspace", type=str, default="workspace", help="Path for optimized output")
|
||||
parser.add_argument("--initial-round", type=int, default=1, help="Initial round number")
|
||||
parser.add_argument("--max-rounds", type=int, default=10, help="Maximum number of rounds")
|
||||
parser.add_argument("--template", type=str, default="Poem.yaml", help="Template file name")
|
||||
parser.add_argument("--name", type=str, default="Poem", help="Project name")
|
||||
parser.add_argument("--no-iteration", action="store_false", dest="iteration", help="Disable iteration mode")
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
|
@ -41,18 +30,9 @@ def main():
|
|||
args = parse_args()
|
||||
|
||||
SPO_LLM.initialize(
|
||||
optimize_kwargs={
|
||||
"model": args.opt_model,
|
||||
"temperature": args.opt_temp
|
||||
},
|
||||
evaluate_kwargs={
|
||||
"model": args.eval_model,
|
||||
"temperature": args.eval_temp
|
||||
},
|
||||
execute_kwargs={
|
||||
"model": args.exec_model,
|
||||
"temperature": args.exec_temp
|
||||
}
|
||||
optimize_kwargs={"model": args.opt_model, "temperature": args.opt_temp},
|
||||
evaluate_kwargs={"model": args.eval_model, "temperature": args.eval_temp},
|
||||
execute_kwargs={"model": args.exec_model, "temperature": args.exec_temp},
|
||||
)
|
||||
|
||||
optimizer = PromptOptimizer(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue