mirror of
https://github.com/katanemo/plano.git
synced 2026-05-09 07:42:43 +02:00
add DigitalOcean pricing, startup validation, and demo update
- MetricsSource::DigitalOceanPricing variant: fetch public DO Gen-AI pricing, normalize as lowercase(creator)/model_id, cost = input + output per million
- cost_metrics endpoint format updated to { "model": { "input_per_million": X, "output_per_million": Y } }
- Startup errors: prefer:cheapest requires cost source, prefer:fastest requires prometheus
- Startup warning: models with no pricing/latency data ranked last
- One-per-type enforcement: digitalocean_pricing; error if cost_metrics + digitalocean_pricing both configured
- cost_snapshot() / latency_snapshot() on ModelMetricsService for startup checks
- Demo config updated to v0.4.0 top-level routing_preferences with cheapest + fastest policies
- docker-compose.yaml + prometheus.yaml + metrics_server.py for demo latency metrics
- Schema and docs updated
This commit is contained in:
parent
76b1f37052
commit
bd7afd911e
10 changed files with 427 additions and 80 deletions
30
demos/llm_routing/model_routing_service/metrics_server.py
Normal file
30
demos/llm_routing/model_routing_service/metrics_server.py
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
"""
|
||||
Minimal Prometheus metrics server for demo purposes.
|
||||
Exposes mock P95 latency data for model routing.
|
||||
"""
|
||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||
|
||||
METRICS = """\
|
||||
# HELP model_latency_p95_seconds P95 request latency in seconds per model
|
||||
# TYPE model_latency_p95_seconds gauge
|
||||
model_latency_p95_seconds{model_name="anthropic/claude-sonnet-4-20250514"} 0.85
|
||||
model_latency_p95_seconds{model_name="openai/gpt-4o"} 1.20
|
||||
model_latency_p95_seconds{model_name="openai/gpt-4o-mini"} 0.40
|
||||
""".encode()
|
||||
|
||||
|
||||
class MetricsHandler(BaseHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
self.send_response(200)
|
||||
self.send_header("Content-Type", "text/plain; version=0.0.4; charset=utf-8")
|
||||
self.end_headers()
|
||||
self.wfile.write(METRICS)
|
||||
|
||||
def log_message(self, fmt, *args):
|
||||
pass # suppress access logs
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
server = HTTPServer(("", 8080), MetricsHandler)
|
||||
print("metrics server listening on :8080", flush=True)
|
||||
server.serve_forever()
|
||||
Loading…
Add table
Add a link
Reference in a new issue