From 1b933271e28d8747fd70fd9eda14e2c6e2b2a48a Mon Sep 17 00:00:00 2001 From: isaacJinyu <1376193973@qq.com> Date: Wed, 5 Feb 2025 19:09:49 +0800 Subject: [PATCH] Add README for SPO --- examples/spo/README.md | 101 ++++++++++++++++++++++++++++++ examples/spo/config2.example.yaml | 12 ++++ examples/spo/optimize.py | 71 +++++++++++++++++++++ metagpt/ext/spo/optimize.py | 2 +- 4 files changed, 185 insertions(+), 1 deletion(-) create mode 100644 examples/spo/README.md create mode 100644 examples/spo/config2.example.yaml create mode 100644 examples/spo/optimize.py diff --git a/examples/spo/README.md b/examples/spo/README.md new file mode 100644 index 000000000..60e029713 --- /dev/null +++ b/examples/spo/README.md @@ -0,0 +1,101 @@ +# SPO 🤖 | Self-Supervised Prompt Optimizer + +An automated prompt engineering tool for Large Language Models (LLMs), designed for universal domain adaptation. + +A next-generation prompt engineering system implementing **Self-Supervised Prompt Optimization (SPO)**. Achieves state-of-the-art performance with 17.8-90.9× higher cost efficiency than conventional methods. 🚀 + +## ✨ Core Advantages + +- 💸 **Ultra-Low Cost** - _$0.15 per task optimization_ +- 🏷️ **Zero Supervision** - _No ground truth/human feedback required_ +- ⚡ **Universal Adaptation** - _Closed & open-ended tasks supported_ +- 🔄 **Self-Evolving** - _Auto-optimization via LLM-as-judge mechanism_ + +## 🚀 Quick Start + +### 1. Configure Your API Key ⚙️ + +Configure LLM parameters in `config/config2.yaml` (see `examples/aflow/config2.example.yaml` for reference) +### 2. Define Your Iteration template 📝 + +Create a Iteration template file `metagpt/ext/spo/settings/task_name.yaml`: +```yaml +prompt: | + solve question. + +requirements: | + ... + +count: None + +faq: + - question: | + ... + answer: | + ... + + - question: | + ... + answer: | + ... +``` + +### 3. Implement the Optimizer 🔧 + +我帮你完成这个 Readme 部分: + +### 3. Implement the Optimizer 🔧 + +Use `metagpt/ext/spo/optimize.py` to execute: + +```python +from metagpt.ext.spo.scripts.optimizer import Optimizer +from metagpt.ext.spo.scripts.utils.llm_client import SPO_LLM + +if __name__ == "__main__": + # Initialize LLM settings + SPO_LLM.initialize( + optimize_kwargs={"model": "claude-3-5-sonnet-20240620", "temperature": 0.7}, + evaluate_kwargs={"model": "gpt-4o-mini", "temperature": 0.3}, + execute_kwargs={"model": "gpt-4o-mini", "temperature": 0} + ) + + # Create and run optimizer + optimizer = Optimizer( + optimized_path="workspace", # Output directory + initial_round=1, # Starting round + max_rounds=10, # Maximum optimization rounds + template="Poem.yaml", # Template file + name="Poem", # Project name + iteration=True, # Enable iteration mode + ) + + optimizer.optimize() +``` + +Or you can use command line interface: + +```bash +python optimize.py [options] +``` + +Available command line options: +``` +--optimize-model Model for optimization (default: claude-3-5-sonnet-20240620) +--optimize-temperature Temperature for optimization (default: 0.7) +--evaluate-model Model for evaluation (default: gpt-4o-mini) +--evaluate-temperature Temperature for evaluation (default: 0.3) +--execute-model Model for execution (default: gpt-4o-mini) +--execute-temperature Temperature for execution (default: 0) +--workspace Output directory path (default: workspace) +--initial-round Initial round number (default: 1) +--max-rounds Maximum number of rounds (default: 10) +--template Template file name (default: Poem.yaml) +--name Project name (default: Poem) +--no-iteration Disable iteration mode (iteration enabled by default) +``` + +For help: +```bash +python optimize.py --help +``` \ No newline at end of file diff --git a/examples/spo/config2.example.yaml b/examples/spo/config2.example.yaml new file mode 100644 index 000000000..3afa5406b --- /dev/null +++ b/examples/spo/config2.example.yaml @@ -0,0 +1,12 @@ +models: + "": # model: "gpt-4-turbo" # or gpt-3.5-turbo + api_type: "openai" # or azure / ollama / groq etc. + base_url: "" + api_key: "" + temperature: 0 + "": + api_type: "openai" + base_url: "" + api_key: "" + temperature: 0 + diff --git a/examples/spo/optimize.py b/examples/spo/optimize.py new file mode 100644 index 000000000..796aa988d --- /dev/null +++ b/examples/spo/optimize.py @@ -0,0 +1,71 @@ +import argparse +from metagpt.ext.spo.scripts.optimizer import Optimizer +from metagpt.ext.spo.scripts.utils.llm_client import SPO_LLM + + +def parse_args(): + parser = argparse.ArgumentParser(description='SPO Optimizer CLI') + + # LLM parameter + parser.add_argument('--optimize-model', type=str, default='claude-3-5-sonnet-20240620', + help='Model for optimization') + parser.add_argument('--optimize-temperature', type=float, default=0.7, + help='Temperature for optimization') + parser.add_argument('--evaluate-model', type=str, default='gpt-4o-mini', + help='Model for evaluation') + parser.add_argument('--evaluate-temperature', type=float, default=0.3, + help='Temperature for evaluation') + parser.add_argument('--execute-model', type=str, default='gpt-4o-mini', + help='Model for execution') + parser.add_argument('--execute-temperature', type=float, default=0, + help='Temperature for execution') + + # Optimizer parameter + parser.add_argument('--workspace', type=str, default='workspace', + help='Path for optimized output') + parser.add_argument('--initial-round', type=int, default=1, + help='Initial round number') + parser.add_argument('--max-rounds', type=int, default=10, + help='Maximum number of rounds') + parser.add_argument('--template', type=str, default='Poem.yaml', + help='Template file name') + parser.add_argument('--name', type=str, default='Poem', + help='Project name') + parser.add_argument('--no-iteration', action='store_false', dest='iteration', + help='Disable iteration mode') + + return parser.parse_args() + + +def main(): + args = parse_args() + + SPO_LLM.initialize( + optimize_kwargs={ + "model": args.optimize_model, + "temperature": args.optimize_temperature + }, + evaluate_kwargs={ + "model": args.evaluate_model, + "temperature": args.evaluate_temperature + }, + execute_kwargs={ + "model": args.execute_model, + "temperature": args.execute_temperature + } + ) + + optimizer = Optimizer( + optimized_path=args.workspace, + initial_round=args.initial_round, + max_rounds=args.max_rounds, + template=args.template, + name=args.name, + iteration=args.iteration, + ) + + optimizer.optimize() + + +if __name__ == "__main__": + main() diff --git a/metagpt/ext/spo/optimize.py b/metagpt/ext/spo/optimize.py index 8f821d6c9..777374aaa 100644 --- a/metagpt/ext/spo/optimize.py +++ b/metagpt/ext/spo/optimize.py @@ -7,7 +7,7 @@ if __name__ == "__main__": SPO_LLM.initialize( optimize_kwargs={"model": "claude-3-5-sonnet-20240620", "temperature": 0.7}, evaluate_kwargs={"model": "gpt-4o-mini", "temperature": 0.3}, - execute_kwargs={"model": "gpt-4o-mini", "temperature": 0.3} + execute_kwargs={"model": "gpt-4o-mini", "temperature": 0} ) optimizer = Optimizer(