mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-04-25 00:36:55 +02:00
Add README for SPO
This commit is contained in:
parent
5c8e6da655
commit
1b933271e2
4 changed files with 185 additions and 1 deletions
101
examples/spo/README.md
Normal file
101
examples/spo/README.md
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
# SPO 🤖 | Self-Supervised Prompt Optimizer
|
||||
|
||||
An automated prompt engineering tool for Large Language Models (LLMs), designed for universal domain adaptation.
|
||||
|
||||
A next-generation prompt engineering system implementing **Self-Supervised Prompt Optimization (SPO)**. Achieves state-of-the-art performance with 17.8-90.9× higher cost efficiency than conventional methods. 🚀
|
||||
|
||||
## ✨ Core Advantages
|
||||
|
||||
- 💸 **Ultra-Low Cost** - _$0.15 per task optimization_
|
||||
- 🏷️ **Zero Supervision** - _No ground truth/human feedback required_
|
||||
- ⚡ **Universal Adaptation** - _Closed & open-ended tasks supported_
|
||||
- 🔄 **Self-Evolving** - _Auto-optimization via LLM-as-judge mechanism_
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### 1. Configure Your API Key ⚙️
|
||||
|
||||
Configure LLM parameters in `config/config2.yaml` (see `examples/aflow/config2.example.yaml` for reference)
|
||||
### 2. Define Your Iteration template 📝
|
||||
|
||||
Create a Iteration template file `metagpt/ext/spo/settings/task_name.yaml`:
|
||||
```yaml
|
||||
prompt: |
|
||||
solve question.
|
||||
|
||||
requirements: |
|
||||
...
|
||||
|
||||
count: None
|
||||
|
||||
faq:
|
||||
- question: |
|
||||
...
|
||||
answer: |
|
||||
...
|
||||
|
||||
- question: |
|
||||
...
|
||||
answer: |
|
||||
...
|
||||
```
|
||||
|
||||
### 3. Implement the Optimizer 🔧
|
||||
|
||||
我帮你完成这个 Readme 部分:
|
||||
|
||||
### 3. Implement the Optimizer 🔧
|
||||
|
||||
Use `metagpt/ext/spo/optimize.py` to execute:
|
||||
|
||||
```python
|
||||
from metagpt.ext.spo.scripts.optimizer import Optimizer
|
||||
from metagpt.ext.spo.scripts.utils.llm_client import SPO_LLM
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Initialize LLM settings
|
||||
SPO_LLM.initialize(
|
||||
optimize_kwargs={"model": "claude-3-5-sonnet-20240620", "temperature": 0.7},
|
||||
evaluate_kwargs={"model": "gpt-4o-mini", "temperature": 0.3},
|
||||
execute_kwargs={"model": "gpt-4o-mini", "temperature": 0}
|
||||
)
|
||||
|
||||
# Create and run optimizer
|
||||
optimizer = Optimizer(
|
||||
optimized_path="workspace", # Output directory
|
||||
initial_round=1, # Starting round
|
||||
max_rounds=10, # Maximum optimization rounds
|
||||
template="Poem.yaml", # Template file
|
||||
name="Poem", # Project name
|
||||
iteration=True, # Enable iteration mode
|
||||
)
|
||||
|
||||
optimizer.optimize()
|
||||
```
|
||||
|
||||
Or you can use command line interface:
|
||||
|
||||
```bash
|
||||
python optimize.py [options]
|
||||
```
|
||||
|
||||
Available command line options:
|
||||
```
|
||||
--optimize-model Model for optimization (default: claude-3-5-sonnet-20240620)
|
||||
--optimize-temperature Temperature for optimization (default: 0.7)
|
||||
--evaluate-model Model for evaluation (default: gpt-4o-mini)
|
||||
--evaluate-temperature Temperature for evaluation (default: 0.3)
|
||||
--execute-model Model for execution (default: gpt-4o-mini)
|
||||
--execute-temperature Temperature for execution (default: 0)
|
||||
--workspace Output directory path (default: workspace)
|
||||
--initial-round Initial round number (default: 1)
|
||||
--max-rounds Maximum number of rounds (default: 10)
|
||||
--template Template file name (default: Poem.yaml)
|
||||
--name Project name (default: Poem)
|
||||
--no-iteration Disable iteration mode (iteration enabled by default)
|
||||
```
|
||||
|
||||
For help:
|
||||
```bash
|
||||
python optimize.py --help
|
||||
```
|
||||
12
examples/spo/config2.example.yaml
Normal file
12
examples/spo/config2.example.yaml
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
models:
|
||||
"<model_name>": # model: "gpt-4-turbo" # or gpt-3.5-turbo
|
||||
api_type: "openai" # or azure / ollama / groq etc.
|
||||
base_url: "<your base url>"
|
||||
api_key: "<your api key>"
|
||||
temperature: 0
|
||||
"<model_name>":
|
||||
api_type: "openai"
|
||||
base_url: "<your base url>"
|
||||
api_key: "<your api key>"
|
||||
temperature: 0
|
||||
|
||||
71
examples/spo/optimize.py
Normal file
71
examples/spo/optimize.py
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
import argparse
|
||||
from metagpt.ext.spo.scripts.optimizer import Optimizer
|
||||
from metagpt.ext.spo.scripts.utils.llm_client import SPO_LLM
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description='SPO Optimizer CLI')
|
||||
|
||||
# LLM parameter
|
||||
parser.add_argument('--optimize-model', type=str, default='claude-3-5-sonnet-20240620',
|
||||
help='Model for optimization')
|
||||
parser.add_argument('--optimize-temperature', type=float, default=0.7,
|
||||
help='Temperature for optimization')
|
||||
parser.add_argument('--evaluate-model', type=str, default='gpt-4o-mini',
|
||||
help='Model for evaluation')
|
||||
parser.add_argument('--evaluate-temperature', type=float, default=0.3,
|
||||
help='Temperature for evaluation')
|
||||
parser.add_argument('--execute-model', type=str, default='gpt-4o-mini',
|
||||
help='Model for execution')
|
||||
parser.add_argument('--execute-temperature', type=float, default=0,
|
||||
help='Temperature for execution')
|
||||
|
||||
# Optimizer parameter
|
||||
parser.add_argument('--workspace', type=str, default='workspace',
|
||||
help='Path for optimized output')
|
||||
parser.add_argument('--initial-round', type=int, default=1,
|
||||
help='Initial round number')
|
||||
parser.add_argument('--max-rounds', type=int, default=10,
|
||||
help='Maximum number of rounds')
|
||||
parser.add_argument('--template', type=str, default='Poem.yaml',
|
||||
help='Template file name')
|
||||
parser.add_argument('--name', type=str, default='Poem',
|
||||
help='Project name')
|
||||
parser.add_argument('--no-iteration', action='store_false', dest='iteration',
|
||||
help='Disable iteration mode')
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
|
||||
SPO_LLM.initialize(
|
||||
optimize_kwargs={
|
||||
"model": args.optimize_model,
|
||||
"temperature": args.optimize_temperature
|
||||
},
|
||||
evaluate_kwargs={
|
||||
"model": args.evaluate_model,
|
||||
"temperature": args.evaluate_temperature
|
||||
},
|
||||
execute_kwargs={
|
||||
"model": args.execute_model,
|
||||
"temperature": args.execute_temperature
|
||||
}
|
||||
)
|
||||
|
||||
optimizer = Optimizer(
|
||||
optimized_path=args.workspace,
|
||||
initial_round=args.initial_round,
|
||||
max_rounds=args.max_rounds,
|
||||
template=args.template,
|
||||
name=args.name,
|
||||
iteration=args.iteration,
|
||||
)
|
||||
|
||||
optimizer.optimize()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -7,7 +7,7 @@ if __name__ == "__main__":
|
|||
SPO_LLM.initialize(
|
||||
optimize_kwargs={"model": "claude-3-5-sonnet-20240620", "temperature": 0.7},
|
||||
evaluate_kwargs={"model": "gpt-4o-mini", "temperature": 0.3},
|
||||
execute_kwargs={"model": "gpt-4o-mini", "temperature": 0.3}
|
||||
execute_kwargs={"model": "gpt-4o-mini", "temperature": 0}
|
||||
)
|
||||
|
||||
optimizer = Optimizer(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue