mirror of
https://github.com/katanemo/plano.git
synced 2026-04-25 00:36:34 +02:00
Compare commits
35 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
473ec70b5c | ||
|
|
dafd245332 | ||
|
|
897fda2deb | ||
|
|
5a652eb666 | ||
|
|
b81eb7266c | ||
|
|
78dc4edad9 | ||
|
|
aa726b1bba | ||
|
|
c8079ac971 | ||
|
|
6701195a5d | ||
|
|
22f332f62d | ||
|
|
9812540602 | ||
|
|
c3c213b2fd | ||
|
|
78d8c90184 | ||
|
|
ffea891dba | ||
|
|
e7464b817a | ||
|
|
254d2b03bc | ||
|
|
95a7beaab3 | ||
|
|
37600fd07a | ||
|
|
0f67b2c806 | ||
|
|
1f701258cb | ||
|
|
711e4dd07d | ||
|
|
743d074184 | ||
|
|
d39d7ddd1c | ||
|
|
90b926c2ce | ||
|
|
980faef6be | ||
|
|
128059e7c1 | ||
|
|
8dedf0bec1 | ||
|
|
978b1ea722 | ||
|
|
9406af3a09 | ||
|
|
aa16a6dc4b | ||
|
|
7606c55b4b | ||
|
|
1d3f4d6c05 | ||
|
|
5d79e7a7d4 | ||
|
|
76ff353c1e | ||
|
|
39b430d74b |
205 changed files with 31681 additions and 13264 deletions
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
|
|
@ -133,13 +133,13 @@ jobs:
|
|||
load: true
|
||||
tags: |
|
||||
${{ env.PLANO_DOCKER_IMAGE }}
|
||||
${{ env.DOCKER_IMAGE }}:0.4.16
|
||||
${{ env.DOCKER_IMAGE }}:0.4.21
|
||||
${{ env.DOCKER_IMAGE }}:latest
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Save image as artifact
|
||||
run: docker save ${{ env.PLANO_DOCKER_IMAGE }} ${{ env.DOCKER_IMAGE }}:0.4.16 ${{ env.DOCKER_IMAGE }}:latest -o /tmp/plano-image.tar
|
||||
run: docker save ${{ env.PLANO_DOCKER_IMAGE }} ${{ env.DOCKER_IMAGE }}:0.4.21 ${{ env.DOCKER_IMAGE }}:latest -o /tmp/plano-image.tar
|
||||
|
||||
- name: Upload image artifact
|
||||
uses: actions/upload-artifact@v6
|
||||
|
|
@ -477,7 +477,7 @@ jobs:
|
|||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
run: |
|
||||
source venv/bin/activate
|
||||
cd demos/shared/test_runner && sh run_demo_tests.sh llm_routing/preference_based_routing
|
||||
cd demos/shared/test_runner && bash run_demo_tests.sh llm_routing/preference_based_routing
|
||||
|
||||
# ──────────────────────────────────────────────
|
||||
# E2E: demo — currency conversion
|
||||
|
|
@ -527,4 +527,4 @@ jobs:
|
|||
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
|
||||
run: |
|
||||
source venv/bin/activate
|
||||
cd demos/shared/test_runner && sh run_demo_tests.sh advanced/currency_exchange
|
||||
cd demos/shared/test_runner && bash run_demo_tests.sh advanced/currency_exchange
|
||||
|
|
|
|||
80
.github/workflows/docker-push-release.yml
vendored
80
.github/workflows/docker-push-release.yml
vendored
|
|
@ -3,6 +3,8 @@ name: Publish docker image (release)
|
|||
env:
|
||||
DOCKER_IMAGE: katanemo/plano
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/plano
|
||||
DOCR_IMAGE: registry.digitalocean.com/genai-prod/plano
|
||||
DOCR_PREVIEW_IMAGE: registry.digitalocean.com/genai-preview/plano
|
||||
|
||||
on:
|
||||
release:
|
||||
|
|
@ -33,6 +35,14 @@ jobs:
|
|||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install doctl
|
||||
uses: digitalocean/action-doctl@v2
|
||||
with:
|
||||
token: ${{ secrets.PLATFORM_DIGITALOCEAN_TOKEN }}
|
||||
|
||||
- name: Log in to DOCR (prod)
|
||||
run: doctl registry login
|
||||
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
|
|
@ -51,6 +61,21 @@ jobs:
|
|||
tags: |
|
||||
${{ steps.meta.outputs.tags }}-arm64
|
||||
${{ env.GHCR_IMAGE }}:${{ github.event.release.tag_name }}-arm64
|
||||
${{ env.DOCR_IMAGE }}:${{ github.event.release.tag_name }}-arm64
|
||||
|
||||
- name: Switch to DOCR preview
|
||||
uses: digitalocean/action-doctl@v2
|
||||
with:
|
||||
token: ${{ secrets.PLATFORM_DIGITALOCEAN_TOKEN_PREVIEW }}
|
||||
|
||||
- name: Log in to DOCR (preview)
|
||||
run: doctl registry login
|
||||
|
||||
- name: Push to DOCR Preview
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.DOCR_PREVIEW_IMAGE }}:${{ github.event.release.tag_name }}-arm64 \
|
||||
${{ steps.meta.outputs.tags }}-arm64
|
||||
|
||||
# Build AMD64 image on GitHub's AMD64 runner — push to both registries
|
||||
build-amd64:
|
||||
|
|
@ -72,6 +97,14 @@ jobs:
|
|||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install doctl
|
||||
uses: digitalocean/action-doctl@v2
|
||||
with:
|
||||
token: ${{ secrets.PLATFORM_DIGITALOCEAN_TOKEN }}
|
||||
|
||||
- name: Log in to DOCR (prod)
|
||||
run: doctl registry login
|
||||
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
|
|
@ -90,6 +123,21 @@ jobs:
|
|||
tags: |
|
||||
${{ steps.meta.outputs.tags }}-amd64
|
||||
${{ env.GHCR_IMAGE }}:${{ github.event.release.tag_name }}-amd64
|
||||
${{ env.DOCR_IMAGE }}:${{ github.event.release.tag_name }}-amd64
|
||||
|
||||
- name: Switch to DOCR preview
|
||||
uses: digitalocean/action-doctl@v2
|
||||
with:
|
||||
token: ${{ secrets.PLATFORM_DIGITALOCEAN_TOKEN_PREVIEW }}
|
||||
|
||||
- name: Log in to DOCR (preview)
|
||||
run: doctl registry login
|
||||
|
||||
- name: Push to DOCR Preview
|
||||
run: |
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.DOCR_PREVIEW_IMAGE }}:${{ github.event.release.tag_name }}-amd64 \
|
||||
${{ steps.meta.outputs.tags }}-amd64
|
||||
|
||||
# Combine ARM64 and AMD64 images into multi-arch manifests for both registries
|
||||
create-manifest:
|
||||
|
|
@ -109,6 +157,14 @@ jobs:
|
|||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install doctl
|
||||
uses: digitalocean/action-doctl@v2
|
||||
with:
|
||||
token: ${{ secrets.PLATFORM_DIGITALOCEAN_TOKEN }}
|
||||
|
||||
- name: Log in to DOCR (prod)
|
||||
run: doctl registry login
|
||||
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
|
|
@ -131,3 +187,27 @@ jobs:
|
|||
-t ${{ env.GHCR_IMAGE }}:${TAG} \
|
||||
${{ env.GHCR_IMAGE }}:${TAG}-arm64 \
|
||||
${{ env.GHCR_IMAGE }}:${TAG}-amd64
|
||||
|
||||
- name: Create DOCR Prod Multi-Arch Manifest
|
||||
run: |
|
||||
TAG=${{ github.event.release.tag_name }}
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.DOCR_IMAGE }}:${TAG} \
|
||||
${{ env.DOCR_IMAGE }}:${TAG}-arm64 \
|
||||
${{ env.DOCR_IMAGE }}:${TAG}-amd64
|
||||
|
||||
- name: Switch to DOCR preview
|
||||
uses: digitalocean/action-doctl@v2
|
||||
with:
|
||||
token: ${{ secrets.PLATFORM_DIGITALOCEAN_TOKEN_PREVIEW }}
|
||||
|
||||
- name: Log in to DOCR (preview)
|
||||
run: doctl registry login
|
||||
|
||||
- name: Create DOCR Preview Multi-Arch Manifest
|
||||
run: |
|
||||
TAG=${{ github.event.release.tag_name }}
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.DOCR_PREVIEW_IMAGE }}:${TAG} \
|
||||
${{ steps.meta.outputs.tags }}-arm64 \
|
||||
${{ steps.meta.outputs.tags }}-amd64
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ Plano pulls rote plumbing out of your framework so you can stay focused on what
|
|||
**Jump to our [docs](https://docs.planoai.dev)** to learn how you can use Plano to improve the speed, safety and obervability of your agentic applications.
|
||||
|
||||
> [!IMPORTANT]
|
||||
> Plano and the Arch family of LLMs (like Plano-Orchestrator-4B, Arch-Router, etc) are hosted free of charge in the US-central region to give you a great first-run developer experience of Plano. To scale and run in production, you can either run these LLMs locally or contact us on [Discord](https://discord.gg/pGZf2gcwEc) for API keys.
|
||||
> Plano and the Plano family of LLMs (like Plano-Orchestrator) are hosted free of charge in the US-central region to give you a great first-run developer experience of Plano. To scale and run in production, you can either run these LLMs locally or contact us on [Discord](https://discord.gg/pGZf2gcwEc) for API keys.
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@
|
|||
"clean": "rm -rf .next"
|
||||
},
|
||||
"dependencies": {
|
||||
"@heroicons/react": "^2.2.0",
|
||||
"@katanemo/shared-styles": "*",
|
||||
"@katanemo/ui": "*",
|
||||
"next": "^16.1.6",
|
||||
|
|
|
|||
|
|
@ -66,7 +66,9 @@ export default function RootLayout({
|
|||
}>) {
|
||||
return (
|
||||
<html lang="en">
|
||||
<body className={`${ibmPlexSans.variable} antialiased text-white`}>
|
||||
<body
|
||||
className={`${ibmPlexSans.variable} overflow-hidden antialiased text-white`}
|
||||
>
|
||||
{/* Google tag (gtag.js) */}
|
||||
<Script
|
||||
src="https://www.googletagmanager.com/gtag/js?id=G-RLD5BDNW5N"
|
||||
|
|
@ -80,7 +82,9 @@ export default function RootLayout({
|
|||
gtag('config', 'G-RLD5BDNW5N');
|
||||
`}
|
||||
</Script>
|
||||
<div className="min-h-screen">{children}</div>
|
||||
<div className="h-screen overflow-hidden">
|
||||
{children}
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
);
|
||||
|
|
|
|||
|
|
@ -1,11 +1,23 @@
|
|||
import Image from "next/image";
|
||||
import Link from "next/link";
|
||||
import LogoSlider from "../components/LogoSlider";
|
||||
import { ArrowRightIcon } from "@heroicons/react/16/solid";
|
||||
|
||||
export default function HomePage() {
|
||||
return (
|
||||
<main className="relative flex min-h-screen items-center justify-center overflow-hidden px-6 pt-12 pb-16 font-sans sm:pt-20 lg:items-start lg:justify-start lg:pt-24">
|
||||
<main className="relative flex h-full items-center justify-center overflow-hidden px-6 pt-12 pb-16 font-sans sm:pt-20 lg:items-start lg:justify-start lg:pt-24">
|
||||
<div className="relative mx-auto w-full max-w-6xl flex flex-col items-center justify-center text-left lg:items-start lg:justify-start">
|
||||
<Link
|
||||
href="https://digitalocean.com/blog/digitalocean-acquires-katanemo-labs-inc"
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="mb-7 inline-flex max-w-[20rem] items-center gap-1 self-start rounded-full border border-[#22A875]/30 bg-[#22A875]/30 px-2.5 py-1 text-left text-[12px] leading-tight font-medium text-white transition-opacity hover:opacity-90 lg:hidden"
|
||||
>
|
||||
<span>
|
||||
DigitalOcean acquires Katanemo Labs, Inc.
|
||||
</span>
|
||||
<ArrowRightIcon aria-hidden className="h-3 w-3 shrink-0 text-white/90" />
|
||||
</Link>
|
||||
<div className="pointer-events-none mb-6 w-full self-start lg:hidden">
|
||||
<Image
|
||||
src="/KatanemoLogo.svg"
|
||||
|
|
@ -17,6 +29,20 @@ export default function HomePage() {
|
|||
/>
|
||||
</div>
|
||||
<div className="relative z-10 max-w-xl sm:max-w-2xl lg:max-w-2xl xl:max-w-8xl lg:pr-[26vw] xl:pr-[2vw] sm:right-0 md:right-0 lg:right-0 xl:right-20 2xl:right-50 sm:mt-36 mt-0">
|
||||
<Link
|
||||
href="https://digitalocean.com/blog/digitalocean-acquires-katanemo-labs-inc"
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="mb-4 hidden max-w-full items-center gap-2 rounded-full border border-[#22A875]/70 bg-[#22A875]/50 px-4 py-1 text-left text-sm font-medium text-white transition-opacity hover:opacity-90 lg:inline-flex"
|
||||
>
|
||||
<span>
|
||||
DigitalOcean acquires Katanemo Labs, Inc.
|
||||
</span>
|
||||
<ArrowRightIcon
|
||||
aria-hidden
|
||||
className="h-4 w-4 shrink-0 text-white/90"
|
||||
/>
|
||||
</Link>
|
||||
<h1 className="text-3xl sm:text-4xl md:text-5xl lg:text-6xl font-sans font-medium leading-tight tracking-tight text-white">
|
||||
Forward-deployed AI infrastructure engineers.
|
||||
</h1>
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
"migrate:blogs": "tsx scripts/migrate-blogs.ts"
|
||||
},
|
||||
"dependencies": {
|
||||
"@heroicons/react": "^2.2.0",
|
||||
"@katanemo/shared-styles": "*",
|
||||
"@katanemo/ui": "*",
|
||||
"@portabletext/react": "^5.0.0",
|
||||
|
|
@ -32,8 +33,8 @@
|
|||
"next": "^16.1.6",
|
||||
"next-sanity": "^11.6.9",
|
||||
"papaparse": "^5.5.3",
|
||||
"react": "19.2.0",
|
||||
"react-dom": "19.2.0",
|
||||
"react": "19.2.3",
|
||||
"react-dom": "19.2.3",
|
||||
"react-markdown": "^10.1.0",
|
||||
"react-syntax-highlighter": "^16.1.0",
|
||||
"remark-gfm": "^4.0.1",
|
||||
|
|
|
|||
|
|
@ -39,6 +39,10 @@ function loadFont(fileName: string, baseUrl: string) {
|
|||
}
|
||||
|
||||
async function getBlogPost(slug: string) {
|
||||
if (!client) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const query = `*[_type == "blog" && slug.current == $slug && published == true][0] {
|
||||
_id,
|
||||
title,
|
||||
|
|
@ -53,8 +57,13 @@ async function getBlogPost(slug: string) {
|
|||
}
|
||||
}`;
|
||||
|
||||
const post = await client.fetch(query, { slug });
|
||||
return post;
|
||||
try {
|
||||
const post = await client.fetch(query, { slug });
|
||||
return post;
|
||||
} catch (error) {
|
||||
console.error("Error fetching blog post for OG image:", error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function formatDate(dateString: string): string {
|
||||
|
|
|
|||
|
|
@ -17,6 +17,10 @@ interface BlogPost {
|
|||
}
|
||||
|
||||
async function getBlogPost(slug: string): Promise<BlogPost | null> {
|
||||
if (!client) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const query = `*[_type == "blog" && slug.current == $slug && published == true][0] {
|
||||
_id,
|
||||
title,
|
||||
|
|
@ -26,8 +30,13 @@ async function getBlogPost(slug: string): Promise<BlogPost | null> {
|
|||
author
|
||||
}`;
|
||||
|
||||
const post = await client.fetch(query, { slug });
|
||||
return post || null;
|
||||
try {
|
||||
const post = await client.fetch(query, { slug });
|
||||
return post || null;
|
||||
} catch (error) {
|
||||
console.error("Error fetching blog post metadata:", error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export async function generateMetadata({
|
||||
|
|
|
|||
|
|
@ -25,6 +25,10 @@ interface BlogPost {
|
|||
}
|
||||
|
||||
async function getBlogPost(slug: string): Promise<BlogPost | null> {
|
||||
if (!client) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const query = `*[_type == "blog" && slug.current == $slug && published == true][0] {
|
||||
_id,
|
||||
title,
|
||||
|
|
@ -51,17 +55,31 @@ async function getBlogPost(slug: string): Promise<BlogPost | null> {
|
|||
author
|
||||
}`;
|
||||
|
||||
const post = await client.fetch(query, { slug });
|
||||
return post || null;
|
||||
try {
|
||||
const post = await client.fetch(query, { slug });
|
||||
return post || null;
|
||||
} catch (error) {
|
||||
console.error("Error fetching blog post:", error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async function getAllBlogSlugs(): Promise<string[]> {
|
||||
if (!client) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const query = `*[_type == "blog" && published == true] {
|
||||
"slug": slug.current
|
||||
}`;
|
||||
|
||||
const posts = await client.fetch(query);
|
||||
return posts.map((post: { slug: string }) => post.slug);
|
||||
try {
|
||||
const posts = await client.fetch(query);
|
||||
return posts.map((post: { slug: string }) => post.slug);
|
||||
} catch (error) {
|
||||
console.error("Error fetching blog slugs:", error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
export async function generateStaticParams() {
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import { BlogSectionHeader } from "@/components/BlogSectionHeader";
|
|||
import { pageMetadata } from "@/lib/metadata";
|
||||
|
||||
export const metadata: Metadata = pageMetadata.blog;
|
||||
export const dynamic = "force-dynamic";
|
||||
|
||||
interface BlogPost {
|
||||
_id: string;
|
||||
|
|
@ -44,6 +45,10 @@ function formatDate(dateString: string): string {
|
|||
}
|
||||
|
||||
async function getBlogPosts(): Promise<BlogPost[]> {
|
||||
if (!client) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const query = `*[_type == "blog" && published == true] | order(publishedAt desc) {
|
||||
_id,
|
||||
title,
|
||||
|
|
@ -58,12 +63,48 @@ async function getBlogPosts(): Promise<BlogPost[]> {
|
|||
featured
|
||||
}`;
|
||||
|
||||
return await client.fetch(query);
|
||||
try {
|
||||
return await client.fetch(query);
|
||||
} catch (error) {
|
||||
console.error("Error fetching blog posts:", error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
async function getFeaturedBlogPost(): Promise<BlogPost | null> {
|
||||
if (!client) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const query = `*[_type == "blog" && published == true && featured == true] | order(_updatedAt desc, publishedAt desc)[0] {
|
||||
_id,
|
||||
title,
|
||||
slug,
|
||||
summary,
|
||||
publishedAt,
|
||||
mainImage,
|
||||
mainImageUrl,
|
||||
thumbnailImage,
|
||||
thumbnailImageUrl,
|
||||
author,
|
||||
featured
|
||||
}`;
|
||||
|
||||
try {
|
||||
const post = await client.fetch(query);
|
||||
return post || null;
|
||||
} catch (error) {
|
||||
console.error("Error fetching featured blog post:", error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export default async function BlogPage() {
|
||||
const posts = await getBlogPosts();
|
||||
const featuredPost = posts.find((post) => post.featured) || posts[0];
|
||||
const [posts, featuredCandidate] = await Promise.all([
|
||||
getBlogPosts(),
|
||||
getFeaturedBlogPost(),
|
||||
]);
|
||||
const featuredPost = featuredCandidate || posts[0];
|
||||
const recentPosts = posts
|
||||
.filter((post) => post._id !== featuredPost?._id)
|
||||
.slice(0, 3);
|
||||
|
|
|
|||
|
|
@ -1,4 +1,6 @@
|
|||
import type { Metadata } from "next";
|
||||
import { ArrowRightIcon } from "@heroicons/react/16/solid";
|
||||
import Link from "next/link";
|
||||
import Script from "next/script";
|
||||
import "@katanemo/shared-styles/globals.css";
|
||||
import { Analytics } from "@vercel/analytics/next";
|
||||
|
|
@ -35,6 +37,27 @@ export default function RootLayout({
|
|||
gtag('config', 'G-ML7B1X9HY2');
|
||||
`}
|
||||
</Script>
|
||||
<Link
|
||||
href="https://digitalocean.com/blog/digitalocean-acquires-katanemo-labs-inc"
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className="block w-full bg-[#7780D9] py-3 text-white transition-opacity"
|
||||
>
|
||||
<div className="mx-auto flex max-w-[85rem] items-center justify-center gap-4 px-6 text-center md:justify-between md:text-left lg:px-8">
|
||||
<span className="w-full text-xs font-medium leading-snug md:w-auto md:text-base flex items-center">
|
||||
DigitalOcean acquires Katanemo Labs, Inc. to accelerate AI
|
||||
development
|
||||
<ArrowRightIcon
|
||||
aria-hidden
|
||||
className="ml-1 inline-block h-3 w-3 align-[-1px] text-white/90 md:hidden"
|
||||
/>
|
||||
</span>
|
||||
<span className="hidden shrink-0 items-center gap-1 text-base font-medium tracking-[-0.989px] font-mono leading-snug opacity-70 transition-opacity hover:opacity-100 md:inline-flex">
|
||||
Read the announcement
|
||||
<ArrowRightIcon aria-hidden className="h-3.5 w-3.5 text-white/70" />
|
||||
</span>
|
||||
</div>
|
||||
</Link>
|
||||
<ConditionalLayout>{children}</ConditionalLayout>
|
||||
<Analytics />
|
||||
</body>
|
||||
|
|
|
|||
|
|
@ -10,6 +10,10 @@ interface BlogPost {
|
|||
}
|
||||
|
||||
async function getBlogPosts(): Promise<BlogPost[]> {
|
||||
if (!client) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const query = `*[_type == "blog" && published == true] | order(publishedAt desc) {
|
||||
slug,
|
||||
publishedAt,
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ export function Hero() {
|
|||
>
|
||||
<div className="inline-flex flex-wrap items-center gap-1.5 sm:gap-2 px-3 sm:px-4 py-1 rounded-full bg-[rgba(185,191,255,0.4)] border border-[var(--secondary)] shadow backdrop-blur hover:bg-[rgba(185,191,255,0.6)] transition-colors cursor-pointer">
|
||||
<span className="text-xs sm:text-sm font-medium text-black/65">
|
||||
v0.4.16
|
||||
v0.4.21
|
||||
</span>
|
||||
<span className="text-xs sm:text-sm font-medium text-black ">
|
||||
—
|
||||
|
|
|
|||
|
|
@ -2,19 +2,33 @@ import { createClient } from "@sanity/client";
|
|||
import imageUrlBuilder from "@sanity/image-url";
|
||||
import type { SanityImageSource } from "@sanity/image-url/lib/types/types";
|
||||
|
||||
const projectId = process.env.NEXT_PUBLIC_SANITY_PROJECT_ID;
|
||||
const dataset = process.env.NEXT_PUBLIC_SANITY_DATASET;
|
||||
const apiVersion = process.env.NEXT_PUBLIC_SANITY_API_VERSION;
|
||||
const projectId =
|
||||
process.env.NEXT_PUBLIC_SANITY_PROJECT_ID ||
|
||||
"71ny25bn";
|
||||
const dataset =
|
||||
process.env.NEXT_PUBLIC_SANITY_DATASET ||
|
||||
"production";
|
||||
const apiVersion =
|
||||
process.env.NEXT_PUBLIC_SANITY_API_VERSION ||
|
||||
"2025-01-01";
|
||||
|
||||
export const client = createClient({
|
||||
projectId,
|
||||
dataset,
|
||||
apiVersion,
|
||||
useCdn: true, // Set to false if statically generating pages, using ISR or using the on-demand revalidation API
|
||||
});
|
||||
export const hasSanityConfig = Boolean(projectId && dataset && apiVersion);
|
||||
|
||||
const builder = imageUrlBuilder(client);
|
||||
export const client = hasSanityConfig
|
||||
? createClient({
|
||||
projectId,
|
||||
dataset,
|
||||
apiVersion,
|
||||
// Keep blog/admin updates visible immediately after publishing.
|
||||
useCdn: false,
|
||||
})
|
||||
: null;
|
||||
|
||||
const builder = client ? imageUrlBuilder(client) : null;
|
||||
|
||||
export function urlFor(source: SanityImageSource) {
|
||||
if (!builder) {
|
||||
throw new Error("Sanity client is not configured.");
|
||||
}
|
||||
return builder.image(source);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
docker build -f Dockerfile . -t katanemo/plano -t katanemo/plano:0.4.16
|
||||
docker build -f Dockerfile . -t katanemo/plano -t katanemo/plano:0.4.21
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
"""Plano CLI - Intelligent Prompt Gateway."""
|
||||
|
||||
__version__ = "0.4.16"
|
||||
__version__ = "0.4.21"
|
||||
|
|
|
|||
290
cli/planoai/chatgpt_auth.py
Normal file
290
cli/planoai/chatgpt_auth.py
Normal file
|
|
@ -0,0 +1,290 @@
|
|||
"""
|
||||
ChatGPT subscription OAuth device-flow authentication.
|
||||
|
||||
Implements the device code flow used by OpenAI Codex CLI to authenticate
|
||||
with a ChatGPT Plus/Pro subscription. Tokens are stored locally in
|
||||
~/.plano/chatgpt/auth.json and auto-refreshed when expired.
|
||||
"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
|
||||
import requests
|
||||
|
||||
from planoai.consts import PLANO_HOME
|
||||
|
||||
# OAuth + API constants (derived from openai/codex)
|
||||
CHATGPT_AUTH_BASE = "https://auth.openai.com"
|
||||
CHATGPT_DEVICE_CODE_URL = f"{CHATGPT_AUTH_BASE}/api/accounts/deviceauth/usercode"
|
||||
CHATGPT_DEVICE_TOKEN_URL = f"{CHATGPT_AUTH_BASE}/api/accounts/deviceauth/token"
|
||||
CHATGPT_OAUTH_TOKEN_URL = f"{CHATGPT_AUTH_BASE}/oauth/token"
|
||||
CHATGPT_DEVICE_VERIFY_URL = f"{CHATGPT_AUTH_BASE}/codex/device"
|
||||
CHATGPT_API_BASE = "https://chatgpt.com/backend-api/codex"
|
||||
CHATGPT_CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann"
|
||||
|
||||
# Local storage
|
||||
CHATGPT_AUTH_DIR = os.path.join(PLANO_HOME, "chatgpt")
|
||||
CHATGPT_AUTH_FILE = os.path.join(CHATGPT_AUTH_DIR, "auth.json")
|
||||
|
||||
# Timeouts
|
||||
TOKEN_EXPIRY_SKEW_SECONDS = 60
|
||||
DEVICE_CODE_TIMEOUT_SECONDS = 15 * 60
|
||||
DEVICE_CODE_POLL_SECONDS = 5
|
||||
|
||||
|
||||
def _ensure_auth_dir():
|
||||
os.makedirs(CHATGPT_AUTH_DIR, exist_ok=True)
|
||||
|
||||
|
||||
def load_auth() -> Optional[Dict[str, Any]]:
|
||||
"""Load auth data from disk."""
|
||||
try:
|
||||
with open(CHATGPT_AUTH_FILE, "r") as f:
|
||||
return json.load(f)
|
||||
except (IOError, json.JSONDecodeError):
|
||||
return None
|
||||
|
||||
|
||||
def save_auth(data: Dict[str, Any]):
|
||||
"""Save auth data to disk."""
|
||||
_ensure_auth_dir()
|
||||
fd = os.open(CHATGPT_AUTH_FILE, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
|
||||
with os.fdopen(fd, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
|
||||
def delete_auth():
|
||||
"""Remove stored credentials."""
|
||||
try:
|
||||
os.remove(CHATGPT_AUTH_FILE)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
|
||||
def _decode_jwt_claims(token: str) -> Dict[str, Any]:
|
||||
"""Decode JWT payload without verification."""
|
||||
try:
|
||||
parts = token.split(".")
|
||||
if len(parts) < 2:
|
||||
return {}
|
||||
payload_b64 = parts[1]
|
||||
payload_b64 += "=" * (-len(payload_b64) % 4)
|
||||
return json.loads(base64.urlsafe_b64decode(payload_b64).decode("utf-8"))
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
|
||||
def _get_expires_at(token: str) -> Optional[int]:
|
||||
"""Extract expiration time from JWT."""
|
||||
claims = _decode_jwt_claims(token)
|
||||
exp = claims.get("exp")
|
||||
return int(exp) if isinstance(exp, (int, float)) else None
|
||||
|
||||
|
||||
def _extract_account_id(token: Optional[str]) -> Optional[str]:
|
||||
"""Extract ChatGPT account ID from JWT claims."""
|
||||
if not token:
|
||||
return None
|
||||
claims = _decode_jwt_claims(token)
|
||||
auth_claims = claims.get("https://api.openai.com/auth")
|
||||
if isinstance(auth_claims, dict):
|
||||
account_id = auth_claims.get("chatgpt_account_id")
|
||||
if isinstance(account_id, str) and account_id:
|
||||
return account_id
|
||||
return None
|
||||
|
||||
|
||||
def _is_token_expired(auth_data: Dict[str, Any]) -> bool:
|
||||
"""Check if the access token is expired."""
|
||||
expires_at = auth_data.get("expires_at")
|
||||
if expires_at is None:
|
||||
access_token = auth_data.get("access_token")
|
||||
if access_token:
|
||||
expires_at = _get_expires_at(access_token)
|
||||
if expires_at:
|
||||
auth_data["expires_at"] = expires_at
|
||||
save_auth(auth_data)
|
||||
if expires_at is None:
|
||||
return True
|
||||
return time.time() >= float(expires_at) - TOKEN_EXPIRY_SKEW_SECONDS
|
||||
|
||||
|
||||
def _refresh_tokens(refresh_token: str) -> Dict[str, str]:
|
||||
"""Refresh the access token using the refresh token."""
|
||||
resp = requests.post(
|
||||
CHATGPT_OAUTH_TOKEN_URL,
|
||||
json={
|
||||
"client_id": CHATGPT_CLIENT_ID,
|
||||
"grant_type": "refresh_token",
|
||||
"refresh_token": refresh_token,
|
||||
"scope": "openid profile email",
|
||||
},
|
||||
)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
access_token = data.get("access_token")
|
||||
id_token = data.get("id_token")
|
||||
if not access_token or not id_token:
|
||||
raise RuntimeError(f"Refresh response missing fields: {data}")
|
||||
|
||||
return {
|
||||
"access_token": access_token,
|
||||
"refresh_token": data.get("refresh_token", refresh_token),
|
||||
"id_token": id_token,
|
||||
}
|
||||
|
||||
|
||||
def _build_auth_record(tokens: Dict[str, str]) -> Dict[str, Any]:
|
||||
"""Build the auth record to persist."""
|
||||
access_token = tokens.get("access_token")
|
||||
id_token = tokens.get("id_token")
|
||||
expires_at = _get_expires_at(access_token) if access_token else None
|
||||
account_id = _extract_account_id(id_token or access_token)
|
||||
return {
|
||||
"access_token": access_token,
|
||||
"refresh_token": tokens.get("refresh_token"),
|
||||
"id_token": id_token,
|
||||
"expires_at": expires_at,
|
||||
"account_id": account_id,
|
||||
}
|
||||
|
||||
|
||||
def request_device_code() -> Dict[str, str]:
|
||||
"""Request a device code from OpenAI's device auth endpoint."""
|
||||
resp = requests.post(
|
||||
CHATGPT_DEVICE_CODE_URL,
|
||||
json={"client_id": CHATGPT_CLIENT_ID},
|
||||
)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
device_auth_id = data.get("device_auth_id")
|
||||
user_code = data.get("user_code") or data.get("usercode")
|
||||
interval = data.get("interval")
|
||||
if not device_auth_id or not user_code:
|
||||
raise RuntimeError(f"Device code response missing fields: {data}")
|
||||
|
||||
return {
|
||||
"device_auth_id": device_auth_id,
|
||||
"user_code": user_code,
|
||||
"interval": str(interval or "5"),
|
||||
}
|
||||
|
||||
|
||||
def poll_for_authorization(device_code: Dict[str, str]) -> Dict[str, str]:
|
||||
"""Poll until the user completes authorization. Returns code_data."""
|
||||
interval = int(device_code.get("interval", "5"))
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time < DEVICE_CODE_TIMEOUT_SECONDS:
|
||||
try:
|
||||
resp = requests.post(
|
||||
CHATGPT_DEVICE_TOKEN_URL,
|
||||
json={
|
||||
"device_auth_id": device_code["device_auth_id"],
|
||||
"user_code": device_code["user_code"],
|
||||
},
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
data = resp.json()
|
||||
if all(
|
||||
key in data
|
||||
for key in ("authorization_code", "code_challenge", "code_verifier")
|
||||
):
|
||||
return data
|
||||
if resp.status_code in (403, 404):
|
||||
time.sleep(max(interval, DEVICE_CODE_POLL_SECONDS))
|
||||
continue
|
||||
resp.raise_for_status()
|
||||
except requests.HTTPError as exc:
|
||||
if exc.response is not None and exc.response.status_code in (403, 404):
|
||||
time.sleep(max(interval, DEVICE_CODE_POLL_SECONDS))
|
||||
continue
|
||||
raise RuntimeError(f"Polling failed: {exc}") from exc
|
||||
|
||||
time.sleep(max(interval, DEVICE_CODE_POLL_SECONDS))
|
||||
|
||||
raise RuntimeError("Timed out waiting for device authorization")
|
||||
|
||||
|
||||
def exchange_code_for_tokens(code_data: Dict[str, str]) -> Dict[str, str]:
|
||||
"""Exchange the authorization code for access/refresh/id tokens."""
|
||||
redirect_uri = f"{CHATGPT_AUTH_BASE}/deviceauth/callback"
|
||||
body = (
|
||||
"grant_type=authorization_code"
|
||||
f"&code={code_data['authorization_code']}"
|
||||
f"&redirect_uri={redirect_uri}"
|
||||
f"&client_id={CHATGPT_CLIENT_ID}"
|
||||
f"&code_verifier={code_data['code_verifier']}"
|
||||
)
|
||||
resp = requests.post(
|
||||
CHATGPT_OAUTH_TOKEN_URL,
|
||||
headers={"Content-Type": "application/x-www-form-urlencoded"},
|
||||
data=body,
|
||||
)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
if not all(key in data for key in ("access_token", "refresh_token", "id_token")):
|
||||
raise RuntimeError(f"Token exchange response missing fields: {data}")
|
||||
|
||||
return {
|
||||
"access_token": data["access_token"],
|
||||
"refresh_token": data["refresh_token"],
|
||||
"id_token": data["id_token"],
|
||||
}
|
||||
|
||||
|
||||
def login() -> Dict[str, Any]:
|
||||
"""Run the full device code login flow. Returns the auth record."""
|
||||
device_code = request_device_code()
|
||||
auth_record = _build_auth_record({})
|
||||
auth_record["device_code_requested_at"] = time.time()
|
||||
save_auth(auth_record)
|
||||
|
||||
print(
|
||||
"\nSign in with your ChatGPT account:\n"
|
||||
f" 1) Visit: {CHATGPT_DEVICE_VERIFY_URL}\n"
|
||||
f" 2) Enter code: {device_code['user_code']}\n\n"
|
||||
"Device codes are a common phishing target. Never share this code.\n",
|
||||
flush=True,
|
||||
)
|
||||
|
||||
code_data = poll_for_authorization(device_code)
|
||||
tokens = exchange_code_for_tokens(code_data)
|
||||
auth_record = _build_auth_record(tokens)
|
||||
save_auth(auth_record)
|
||||
return auth_record
|
||||
|
||||
|
||||
def get_access_token() -> Tuple[str, Optional[str]]:
|
||||
"""
|
||||
Get a valid access token and account ID.
|
||||
Refreshes automatically if expired. Raises if no auth data exists.
|
||||
Returns (access_token, account_id).
|
||||
"""
|
||||
auth_data = load_auth()
|
||||
if not auth_data:
|
||||
raise RuntimeError(
|
||||
"No ChatGPT credentials found. Run 'planoai chatgpt login' first."
|
||||
)
|
||||
|
||||
access_token = auth_data.get("access_token")
|
||||
if access_token and not _is_token_expired(auth_data):
|
||||
return access_token, auth_data.get("account_id")
|
||||
|
||||
# Try refresh
|
||||
refresh_token = auth_data.get("refresh_token")
|
||||
if refresh_token:
|
||||
tokens = _refresh_tokens(refresh_token)
|
||||
auth_record = _build_auth_record(tokens)
|
||||
save_auth(auth_record)
|
||||
return auth_record["access_token"], auth_record.get("account_id")
|
||||
|
||||
raise RuntimeError(
|
||||
"ChatGPT token expired and refresh failed. Run 'planoai chatgpt login' again."
|
||||
)
|
||||
86
cli/planoai/chatgpt_cmd.py
Normal file
86
cli/planoai/chatgpt_cmd.py
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
"""
|
||||
CLI commands for ChatGPT subscription management.
|
||||
|
||||
Usage:
|
||||
planoai chatgpt login - Authenticate with ChatGPT via device code flow
|
||||
planoai chatgpt status - Check authentication status
|
||||
planoai chatgpt logout - Remove stored credentials
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
import click
|
||||
from rich.console import Console
|
||||
|
||||
from planoai import chatgpt_auth
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
@click.group()
|
||||
def chatgpt():
|
||||
"""ChatGPT subscription management."""
|
||||
pass
|
||||
|
||||
|
||||
@chatgpt.command()
|
||||
def login():
|
||||
"""Authenticate with your ChatGPT subscription using device code flow."""
|
||||
try:
|
||||
auth_record = chatgpt_auth.login()
|
||||
account_id = auth_record.get("account_id", "unknown")
|
||||
console.print(
|
||||
f"\n[green]Successfully authenticated with ChatGPT![/green]"
|
||||
f"\nAccount ID: {account_id}"
|
||||
f"\nCredentials saved to: {chatgpt_auth.CHATGPT_AUTH_FILE}"
|
||||
)
|
||||
except Exception as e:
|
||||
console.print(f"\n[red]Authentication failed:[/red] {e}")
|
||||
raise SystemExit(1)
|
||||
|
||||
|
||||
@chatgpt.command()
|
||||
def status():
|
||||
"""Check ChatGPT authentication status."""
|
||||
auth_data = chatgpt_auth.load_auth()
|
||||
if not auth_data or not auth_data.get("access_token"):
|
||||
console.print(
|
||||
"[yellow]Not authenticated.[/yellow] Run 'planoai chatgpt login'."
|
||||
)
|
||||
return
|
||||
|
||||
account_id = auth_data.get("account_id", "unknown")
|
||||
expires_at = auth_data.get("expires_at")
|
||||
|
||||
if expires_at:
|
||||
expiry_time = datetime.datetime.fromtimestamp(
|
||||
expires_at, tz=datetime.timezone.utc
|
||||
)
|
||||
now = datetime.datetime.now(tz=datetime.timezone.utc)
|
||||
if expiry_time > now:
|
||||
remaining = expiry_time - now
|
||||
console.print(
|
||||
f"[green]Authenticated[/green]"
|
||||
f"\n Account ID: {account_id}"
|
||||
f"\n Token expires: {expiry_time.strftime('%Y-%m-%d %H:%M:%S UTC')}"
|
||||
f" ({remaining.seconds // 60}m remaining)"
|
||||
)
|
||||
else:
|
||||
console.print(
|
||||
f"[yellow]Token expired[/yellow]"
|
||||
f"\n Account ID: {account_id}"
|
||||
f"\n Expired at: {expiry_time.strftime('%Y-%m-%d %H:%M:%S UTC')}"
|
||||
f"\n Will auto-refresh on next use, or run 'planoai chatgpt login'."
|
||||
)
|
||||
else:
|
||||
console.print(
|
||||
f"[green]Authenticated[/green] (no expiry info)"
|
||||
f"\n Account ID: {account_id}"
|
||||
)
|
||||
|
||||
|
||||
@chatgpt.command()
|
||||
def logout():
|
||||
"""Remove stored ChatGPT credentials."""
|
||||
chatgpt_auth.delete_auth()
|
||||
console.print("[green]ChatGPT credentials removed.[/green]")
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
import json
|
||||
import os
|
||||
import uuid
|
||||
from planoai.utils import convert_legacy_listeners
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
import yaml
|
||||
|
|
@ -21,14 +22,23 @@ SUPPORTED_PROVIDERS_WITHOUT_BASE_URL = [
|
|||
"groq",
|
||||
"mistral",
|
||||
"openai",
|
||||
"xiaomi",
|
||||
"gemini",
|
||||
"anthropic",
|
||||
"together_ai",
|
||||
"xai",
|
||||
"moonshotai",
|
||||
"zhipu",
|
||||
"chatgpt",
|
||||
"digitalocean",
|
||||
"vercel",
|
||||
"openrouter",
|
||||
]
|
||||
|
||||
CHATGPT_API_BASE = "https://chatgpt.com/backend-api/codex"
|
||||
CHATGPT_DEFAULT_ORIGINATOR = "codex_cli_rs"
|
||||
CHATGPT_DEFAULT_USER_AGENT = "codex_cli_rs/0.0.0 (Unknown 0; unknown) unknown"
|
||||
|
||||
SUPPORTED_PROVIDERS = (
|
||||
SUPPORTED_PROVIDERS_WITHOUT_BASE_URL + SUPPORTED_PROVIDERS_WITH_BASE_URL
|
||||
)
|
||||
|
|
@ -48,6 +58,110 @@ def get_endpoint_and_port(endpoint, protocol):
|
|||
return endpoint, port
|
||||
|
||||
|
||||
def migrate_inline_routing_preferences(config_yaml):
|
||||
"""Lift v0.3.0-style inline ``routing_preferences`` under each
|
||||
``model_providers`` entry to the v0.4.0 top-level ``routing_preferences``
|
||||
list with ``models: [...]``.
|
||||
|
||||
This function is a no-op for configs whose ``version`` is already
|
||||
``v0.4.0`` or newer — those are assumed to be on the canonical
|
||||
top-level shape and are passed through untouched.
|
||||
|
||||
For older configs, the version is bumped to ``v0.4.0`` up front so
|
||||
brightstaff's v0.4.0 gate for top-level ``routing_preferences``
|
||||
accepts the rendered config, then inline preferences under each
|
||||
provider are lifted into the top-level list. Preferences with the
|
||||
same ``name`` across multiple providers are merged into a single
|
||||
top-level entry whose ``models`` list contains every provider's
|
||||
full ``<provider>/<model>`` string in declaration order. The first
|
||||
``description`` encountered wins; conflicts are warned, not errored,
|
||||
so existing v0.3.0 configs keep compiling. Any top-level preference
|
||||
already defined by the user is preserved as-is.
|
||||
"""
|
||||
current_version = str(config_yaml.get("version", ""))
|
||||
if _version_tuple(current_version) >= (0, 4, 0):
|
||||
return
|
||||
|
||||
config_yaml["version"] = "v0.4.0"
|
||||
|
||||
model_providers = config_yaml.get("model_providers") or []
|
||||
if not model_providers:
|
||||
return
|
||||
|
||||
migrated = {}
|
||||
for model_provider in model_providers:
|
||||
inline_prefs = model_provider.get("routing_preferences")
|
||||
if not inline_prefs:
|
||||
continue
|
||||
|
||||
full_model_name = model_provider.get("model")
|
||||
if not full_model_name:
|
||||
continue
|
||||
|
||||
if "/" in full_model_name and full_model_name.split("/")[-1].strip() == "*":
|
||||
raise Exception(
|
||||
f"Model {full_model_name} has routing_preferences but uses wildcard (*). Models with routing preferences cannot be wildcards."
|
||||
)
|
||||
|
||||
for pref in inline_prefs:
|
||||
name = pref.get("name")
|
||||
description = pref.get("description", "")
|
||||
if not name:
|
||||
continue
|
||||
if name in migrated:
|
||||
entry = migrated[name]
|
||||
if description and description != entry["description"]:
|
||||
print(
|
||||
f"WARNING: routing preference '{name}' has conflicting descriptions across providers; keeping the first one."
|
||||
)
|
||||
if full_model_name not in entry["models"]:
|
||||
entry["models"].append(full_model_name)
|
||||
else:
|
||||
migrated[name] = {
|
||||
"name": name,
|
||||
"description": description,
|
||||
"models": [full_model_name],
|
||||
}
|
||||
|
||||
if not migrated:
|
||||
return
|
||||
|
||||
for model_provider in model_providers:
|
||||
if "routing_preferences" in model_provider:
|
||||
del model_provider["routing_preferences"]
|
||||
|
||||
existing_top_level = config_yaml.get("routing_preferences") or []
|
||||
existing_names = {entry.get("name") for entry in existing_top_level}
|
||||
merged = list(existing_top_level)
|
||||
for name, entry in migrated.items():
|
||||
if name in existing_names:
|
||||
continue
|
||||
merged.append(entry)
|
||||
config_yaml["routing_preferences"] = merged
|
||||
|
||||
print(
|
||||
"WARNING: inline routing_preferences under model_providers is deprecated "
|
||||
"and has been auto-migrated to top-level routing_preferences. Update your "
|
||||
"config to v0.4.0 top-level form. See docs/routing-api.md"
|
||||
)
|
||||
|
||||
|
||||
def _version_tuple(version_string):
|
||||
stripped = version_string.strip().lstrip("vV")
|
||||
if not stripped:
|
||||
return (0, 0, 0)
|
||||
parts = stripped.split("-", 1)[0].split(".")
|
||||
out = []
|
||||
for part in parts[:3]:
|
||||
try:
|
||||
out.append(int(part))
|
||||
except ValueError:
|
||||
out.append(0)
|
||||
while len(out) < 3:
|
||||
out.append(0)
|
||||
return tuple(out)
|
||||
|
||||
|
||||
def validate_and_render_schema():
|
||||
ENVOY_CONFIG_TEMPLATE_FILE = os.getenv(
|
||||
"ENVOY_CONFIG_TEMPLATE_FILE", "envoy.template.yaml"
|
||||
|
|
@ -91,6 +205,8 @@ def validate_and_render_schema():
|
|||
config_yaml["model_providers"] = config_yaml["llm_providers"]
|
||||
del config_yaml["llm_providers"]
|
||||
|
||||
migrate_inline_routing_preferences(config_yaml)
|
||||
|
||||
listeners, llm_gateway, prompt_gateway = convert_legacy_listeners(
|
||||
config_yaml.get("listeners"), config_yaml.get("model_providers")
|
||||
)
|
||||
|
|
@ -190,7 +306,16 @@ def validate_and_render_schema():
|
|||
model_provider_name_set = set()
|
||||
llms_with_usage = []
|
||||
model_name_keys = set()
|
||||
model_usage_name_keys = set()
|
||||
|
||||
top_level_preferences = config_yaml.get("routing_preferences") or []
|
||||
seen_pref_names = set()
|
||||
for pref in top_level_preferences:
|
||||
pref_name = pref.get("name")
|
||||
if pref_name in seen_pref_names:
|
||||
raise Exception(
|
||||
f'Duplicate routing preference name "{pref_name}", please provide unique name for each routing preference'
|
||||
)
|
||||
seen_pref_names.add(pref_name)
|
||||
|
||||
print("listeners: ", listeners)
|
||||
|
||||
|
|
@ -249,10 +374,6 @@ def validate_and_render_schema():
|
|||
raise Exception(
|
||||
f"Model {model_name} is configured as default but uses wildcard (*). Default models cannot be wildcards."
|
||||
)
|
||||
if model_provider.get("routing_preferences"):
|
||||
raise Exception(
|
||||
f"Model {model_name} has routing_preferences but uses wildcard (*). Models with routing preferences cannot be wildcards."
|
||||
)
|
||||
|
||||
# Validate azure_openai and ollama provider requires base_url
|
||||
if (provider in SUPPORTED_PROVIDERS_WITH_BASE_URL) and model_provider.get(
|
||||
|
|
@ -301,13 +422,6 @@ def validate_and_render_schema():
|
|||
)
|
||||
model_name_keys.add(model_id)
|
||||
|
||||
for routing_preference in model_provider.get("routing_preferences", []):
|
||||
if routing_preference.get("name") in model_usage_name_keys:
|
||||
raise Exception(
|
||||
f'Duplicate routing preference name "{routing_preference.get("name")}", please provide unique name for each routing preference'
|
||||
)
|
||||
model_usage_name_keys.add(routing_preference.get("name"))
|
||||
|
||||
# Warn if both passthrough_auth and access_key are configured
|
||||
if model_provider.get("passthrough_auth") and model_provider.get(
|
||||
"access_key"
|
||||
|
|
@ -330,6 +444,25 @@ def validate_and_render_schema():
|
|||
provider = model_provider["provider"]
|
||||
model_provider["provider_interface"] = provider
|
||||
del model_provider["provider"]
|
||||
|
||||
# Auto-wire ChatGPT provider: inject base_url, passthrough_auth, and extra headers
|
||||
if provider == "chatgpt":
|
||||
if not model_provider.get("base_url"):
|
||||
model_provider["base_url"] = CHATGPT_API_BASE
|
||||
if not model_provider.get("access_key") and not model_provider.get(
|
||||
"passthrough_auth"
|
||||
):
|
||||
model_provider["passthrough_auth"] = True
|
||||
headers = model_provider.get("headers", {})
|
||||
headers.setdefault(
|
||||
"ChatGPT-Account-Id",
|
||||
os.environ.get("CHATGPT_ACCOUNT_ID", ""),
|
||||
)
|
||||
headers.setdefault("originator", CHATGPT_DEFAULT_ORIGINATOR)
|
||||
headers.setdefault("user-agent", CHATGPT_DEFAULT_USER_AGENT)
|
||||
headers.setdefault("session_id", str(uuid.uuid4()))
|
||||
model_provider["headers"] = headers
|
||||
|
||||
updated_model_providers.append(model_provider)
|
||||
|
||||
if model_provider.get("base_url", None):
|
||||
|
|
@ -371,16 +504,15 @@ def validate_and_render_schema():
|
|||
# Build lookup of model names (already prefix-stripped by config processing)
|
||||
model_name_set = {mp.get("model") for mp in updated_model_providers}
|
||||
|
||||
# Auto-add arch-router provider if routing preferences exist and no provider matches the router model
|
||||
router_model = overrides_config.get("llm_routing_model", "Arch-Router")
|
||||
# Strip provider prefix for comparison since config processing strips prefixes from model names
|
||||
# Auto-add plano-orchestrator provider if routing preferences exist and no provider matches the routing model
|
||||
router_model = overrides_config.get("llm_routing_model", "Plano-Orchestrator")
|
||||
router_model_id = (
|
||||
router_model.split("/", 1)[1] if "/" in router_model else router_model
|
||||
)
|
||||
if len(model_usage_name_keys) > 0 and router_model_id not in model_name_set:
|
||||
if len(seen_pref_names) > 0 and router_model_id not in model_name_set:
|
||||
updated_model_providers.append(
|
||||
{
|
||||
"name": "arch-router",
|
||||
"name": "plano-orchestrator",
|
||||
"provider_interface": "plano",
|
||||
"model": router_model_id,
|
||||
"internal": True,
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ PLANO_COLOR = "#969FF4"
|
|||
|
||||
SERVICE_NAME_ARCHGW = "plano"
|
||||
PLANO_DOCKER_NAME = "plano"
|
||||
PLANO_DOCKER_IMAGE = os.getenv("PLANO_DOCKER_IMAGE", "katanemo/plano:0.4.16")
|
||||
PLANO_DOCKER_IMAGE = os.getenv("PLANO_DOCKER_IMAGE", "katanemo/plano:0.4.21")
|
||||
DEFAULT_OTEL_TRACING_GRPC_ENDPOINT = "http://localhost:4317"
|
||||
|
||||
# Native mode constants
|
||||
|
|
|
|||
178
cli/planoai/defaults.py
Normal file
178
cli/planoai/defaults.py
Normal file
|
|
@ -0,0 +1,178 @@
|
|||
"""Default config synthesizer for zero-config ``planoai up``.
|
||||
|
||||
When the user runs ``planoai up`` in a directory with no ``config.yaml`` /
|
||||
``plano_config.yaml``, we synthesize a pass-through config that covers the
|
||||
common LLM providers and auto-wires OTel export to ``localhost:4317`` so
|
||||
``planoai obs`` works out of the box.
|
||||
|
||||
Auth handling:
|
||||
- If the provider's env var is set, bind ``access_key: $ENV_VAR``.
|
||||
- Otherwise set ``passthrough_auth: true`` so the client's own Authorization
|
||||
header is forwarded. No env var is required to start the proxy.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
|
||||
DEFAULT_LLM_LISTENER_PORT = 12000
|
||||
# plano_config validation requires an http:// scheme on the OTLP endpoint.
|
||||
DEFAULT_OTLP_ENDPOINT = "http://localhost:4317"
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ProviderDefault:
|
||||
name: str
|
||||
env_var: str
|
||||
base_url: str
|
||||
model_pattern: str
|
||||
# Only set for providers whose prefix in the model pattern is NOT one of the
|
||||
# built-in SUPPORTED_PROVIDERS in cli/planoai/config_generator.py. For
|
||||
# built-ins, the validator infers the interface from the model prefix and
|
||||
# rejects configs that set this field explicitly.
|
||||
provider_interface: str | None = None
|
||||
|
||||
|
||||
# Keep ordering stable so synthesized configs diff cleanly across runs.
|
||||
PROVIDER_DEFAULTS: list[ProviderDefault] = [
|
||||
ProviderDefault(
|
||||
name="openai",
|
||||
env_var="OPENAI_API_KEY",
|
||||
base_url="https://api.openai.com/v1",
|
||||
model_pattern="openai/*",
|
||||
),
|
||||
ProviderDefault(
|
||||
name="anthropic",
|
||||
env_var="ANTHROPIC_API_KEY",
|
||||
base_url="https://api.anthropic.com/v1",
|
||||
model_pattern="anthropic/*",
|
||||
),
|
||||
ProviderDefault(
|
||||
name="gemini",
|
||||
env_var="GEMINI_API_KEY",
|
||||
base_url="https://generativelanguage.googleapis.com/v1beta",
|
||||
model_pattern="gemini/*",
|
||||
),
|
||||
ProviderDefault(
|
||||
name="groq",
|
||||
env_var="GROQ_API_KEY",
|
||||
base_url="https://api.groq.com/openai/v1",
|
||||
model_pattern="groq/*",
|
||||
),
|
||||
ProviderDefault(
|
||||
name="deepseek",
|
||||
env_var="DEEPSEEK_API_KEY",
|
||||
base_url="https://api.deepseek.com/v1",
|
||||
model_pattern="deepseek/*",
|
||||
),
|
||||
ProviderDefault(
|
||||
name="mistral",
|
||||
env_var="MISTRAL_API_KEY",
|
||||
base_url="https://api.mistral.ai/v1",
|
||||
model_pattern="mistral/*",
|
||||
),
|
||||
# DigitalOcean Gradient is a first-class provider post-#889 — the
|
||||
# `digitalocean/` model prefix routes to the built-in Envoy cluster, no
|
||||
# base_url needed at runtime.
|
||||
ProviderDefault(
|
||||
name="digitalocean",
|
||||
env_var="DO_API_KEY",
|
||||
base_url="https://inference.do-ai.run/v1",
|
||||
model_pattern="digitalocean/*",
|
||||
),
|
||||
ProviderDefault(
|
||||
name="vercel",
|
||||
env_var="AI_GATEWAY_API_KEY",
|
||||
base_url="https://ai-gateway.vercel.sh/v1",
|
||||
model_pattern="vercel/*",
|
||||
),
|
||||
# OpenRouter is a first-class provider — the `openrouter/` model prefix is
|
||||
# accepted by the schema and brightstaff's ProviderId parser, so no
|
||||
# provider_interface override is needed.
|
||||
ProviderDefault(
|
||||
name="openrouter",
|
||||
env_var="OPENROUTER_API_KEY",
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
model_pattern="openrouter/*",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@dataclass
|
||||
class DetectionResult:
|
||||
with_keys: list[ProviderDefault]
|
||||
passthrough: list[ProviderDefault]
|
||||
|
||||
@property
|
||||
def summary(self) -> str:
|
||||
parts = []
|
||||
if self.with_keys:
|
||||
parts.append("env-keyed: " + ", ".join(p.name for p in self.with_keys))
|
||||
if self.passthrough:
|
||||
parts.append("pass-through: " + ", ".join(p.name for p in self.passthrough))
|
||||
return " | ".join(parts) if parts else "no providers"
|
||||
|
||||
|
||||
def detect_providers(env: dict[str, str] | None = None) -> DetectionResult:
|
||||
env = env if env is not None else dict(os.environ)
|
||||
with_keys: list[ProviderDefault] = []
|
||||
passthrough: list[ProviderDefault] = []
|
||||
for p in PROVIDER_DEFAULTS:
|
||||
val = env.get(p.env_var)
|
||||
if val:
|
||||
with_keys.append(p)
|
||||
else:
|
||||
passthrough.append(p)
|
||||
return DetectionResult(with_keys=with_keys, passthrough=passthrough)
|
||||
|
||||
|
||||
def synthesize_default_config(
|
||||
env: dict[str, str] | None = None,
|
||||
*,
|
||||
listener_port: int = DEFAULT_LLM_LISTENER_PORT,
|
||||
otel_endpoint: str = DEFAULT_OTLP_ENDPOINT,
|
||||
) -> dict:
|
||||
"""Build a pass-through config dict suitable for validation + envoy rendering.
|
||||
|
||||
The returned dict can be dumped to YAML and handed to the existing `planoai up`
|
||||
pipeline unchanged.
|
||||
"""
|
||||
detection = detect_providers(env)
|
||||
|
||||
def _entry(p: ProviderDefault, base: dict) -> dict:
|
||||
row: dict = {"name": p.name, "model": p.model_pattern, "base_url": p.base_url}
|
||||
if p.provider_interface is not None:
|
||||
row["provider_interface"] = p.provider_interface
|
||||
row.update(base)
|
||||
return row
|
||||
|
||||
model_providers: list[dict] = []
|
||||
for p in detection.with_keys:
|
||||
model_providers.append(_entry(p, {"access_key": f"${p.env_var}"}))
|
||||
for p in detection.passthrough:
|
||||
model_providers.append(_entry(p, {"passthrough_auth": True}))
|
||||
|
||||
# No explicit `default: true` entry is synthesized: the plano config
|
||||
# validator rejects wildcard models as defaults, and brightstaff already
|
||||
# registers bare model names as lookup keys during wildcard expansion
|
||||
# (crates/common/src/llm_providers.rs), so `{"model": "gpt-4o-mini"}`
|
||||
# without a prefix resolves via the openai wildcard without needing
|
||||
# `default: true`. See discussion on #890.
|
||||
|
||||
return {
|
||||
"version": "v0.4.0",
|
||||
"listeners": [
|
||||
{
|
||||
"name": "llm",
|
||||
"type": "model",
|
||||
"port": listener_port,
|
||||
"address": "0.0.0.0",
|
||||
}
|
||||
],
|
||||
"model_providers": model_providers,
|
||||
"tracing": {
|
||||
"random_sampling": 100,
|
||||
"opentracing_grpc_endpoint": otel_endpoint,
|
||||
},
|
||||
}
|
||||
|
|
@ -6,7 +6,13 @@ import sys
|
|||
import contextlib
|
||||
import logging
|
||||
import rich_click as click
|
||||
import yaml
|
||||
from planoai import targets
|
||||
from planoai.defaults import (
|
||||
DEFAULT_LLM_LISTENER_PORT,
|
||||
detect_providers,
|
||||
synthesize_default_config,
|
||||
)
|
||||
|
||||
# Brand color - Plano purple
|
||||
PLANO_COLOR = "#969FF4"
|
||||
|
|
@ -31,6 +37,8 @@ from planoai.core import (
|
|||
)
|
||||
from planoai.init_cmd import init as init_cmd
|
||||
from planoai.trace_cmd import trace as trace_cmd, start_trace_listener_background
|
||||
from planoai.chatgpt_cmd import chatgpt as chatgpt_cmd
|
||||
from planoai.obs_cmd import obs as obs_cmd
|
||||
from planoai.consts import (
|
||||
DEFAULT_OTEL_TRACING_GRPC_ENDPOINT,
|
||||
DEFAULT_NATIVE_OTEL_TRACING_GRPC_ENDPOINT,
|
||||
|
|
@ -118,6 +126,28 @@ def _temporary_cli_log_level(level: str | None):
|
|||
set_log_level(current_level)
|
||||
|
||||
|
||||
def _inject_chatgpt_tokens_if_needed(config, env, console):
|
||||
"""If config uses chatgpt providers, resolve tokens from ~/.plano/chatgpt/auth.json."""
|
||||
providers = config.get("model_providers") or config.get("llm_providers") or []
|
||||
has_chatgpt = any(str(p.get("model", "")).startswith("chatgpt/") for p in providers)
|
||||
if not has_chatgpt:
|
||||
return
|
||||
|
||||
try:
|
||||
from planoai.chatgpt_auth import get_access_token
|
||||
|
||||
access_token, account_id = get_access_token()
|
||||
env["CHATGPT_ACCESS_TOKEN"] = access_token
|
||||
if account_id:
|
||||
env["CHATGPT_ACCOUNT_ID"] = account_id
|
||||
except Exception as e:
|
||||
console.print(
|
||||
f"\n[red]ChatGPT auth error:[/red] {e}\n"
|
||||
f"[dim]Run 'planoai chatgpt login' to authenticate.[/dim]\n"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def _print_missing_keys(console, missing_keys: list[str]) -> None:
|
||||
console.print(f"\n[red]✗[/red] [red]Missing API keys![/red]\n")
|
||||
for key in missing_keys:
|
||||
|
|
@ -317,7 +347,23 @@ def build(docker):
|
|||
help="Show detailed startup logs with timestamps.",
|
||||
is_flag=True,
|
||||
)
|
||||
def up(file, path, foreground, with_tracing, tracing_port, docker, verbose):
|
||||
@click.option(
|
||||
"--listener-port",
|
||||
default=DEFAULT_LLM_LISTENER_PORT,
|
||||
type=int,
|
||||
show_default=True,
|
||||
help="Override the LLM listener port when running without a config file. Ignored when a config file is present.",
|
||||
)
|
||||
def up(
|
||||
file,
|
||||
path,
|
||||
foreground,
|
||||
with_tracing,
|
||||
tracing_port,
|
||||
docker,
|
||||
verbose,
|
||||
listener_port,
|
||||
):
|
||||
"""Starts Plano."""
|
||||
from rich.status import Status
|
||||
|
||||
|
|
@ -328,12 +374,23 @@ def up(file, path, foreground, with_tracing, tracing_port, docker, verbose):
|
|||
# Use the utility function to find config file
|
||||
plano_config_file = find_config_file(path, file)
|
||||
|
||||
# Check if the file exists
|
||||
# Zero-config fallback: when no user config is present, synthesize a
|
||||
# pass-through config that covers the common LLM providers and
|
||||
# auto-wires OTel export to ``planoai obs``. See cli/planoai/defaults.py.
|
||||
if not os.path.exists(plano_config_file):
|
||||
detection = detect_providers()
|
||||
cfg_dict = synthesize_default_config(listener_port=listener_port)
|
||||
|
||||
default_dir = os.path.expanduser("~/.plano")
|
||||
os.makedirs(default_dir, exist_ok=True)
|
||||
synthesized_path = os.path.join(default_dir, "default_config.yaml")
|
||||
with open(synthesized_path, "w") as fh:
|
||||
yaml.safe_dump(cfg_dict, fh, sort_keys=False)
|
||||
plano_config_file = synthesized_path
|
||||
console.print(
|
||||
f"[red]✗[/red] Config file not found: [dim]{plano_config_file}[/dim]"
|
||||
f"[dim]No plano config found; using defaults ({detection.summary}). "
|
||||
f"Listening on :{listener_port}, tracing -> http://localhost:4317.[/dim]"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
if not docker:
|
||||
from planoai.native_runner import native_validate_config
|
||||
|
|
@ -384,6 +441,14 @@ def up(file, path, foreground, with_tracing, tracing_port, docker, verbose):
|
|||
env = os.environ.copy()
|
||||
env.pop("PATH", None)
|
||||
|
||||
import yaml
|
||||
|
||||
with open(plano_config_file, "r") as f:
|
||||
plano_config = yaml.safe_load(f)
|
||||
|
||||
# Inject ChatGPT tokens from ~/.plano/chatgpt/auth.json if any provider needs them
|
||||
_inject_chatgpt_tokens_if_needed(plano_config, env, console)
|
||||
|
||||
# Check access keys
|
||||
access_keys = get_llm_provider_access_keys(plano_config_file=plano_config_file)
|
||||
access_keys = set(access_keys)
|
||||
|
|
@ -681,6 +746,8 @@ main.add_command(cli_agent)
|
|||
main.add_command(generate_prompt_targets)
|
||||
main.add_command(init_cmd, name="init")
|
||||
main.add_command(trace_cmd, name="trace")
|
||||
main.add_command(chatgpt_cmd, name="chatgpt")
|
||||
main.add_command(obs_cmd, name="obs")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -253,6 +253,7 @@ def start_native(
|
|||
log.info("Plano is running (native mode)")
|
||||
for port in gateway_ports:
|
||||
log.info(f" http://localhost:{port}")
|
||||
|
||||
break
|
||||
|
||||
# Check if processes are still alive
|
||||
|
|
@ -367,8 +368,11 @@ def _kill_pid(pid):
|
|||
pass
|
||||
|
||||
|
||||
def stop_native():
|
||||
"""Stop natively-running Envoy and brightstaff processes.
|
||||
def stop_native(skip_pids: set | None = None):
|
||||
"""Stop natively-running Envoy, brightstaff, and watchdog processes.
|
||||
|
||||
Args:
|
||||
skip_pids: Set of PIDs to skip (used by the watchdog to avoid self-termination).
|
||||
|
||||
Returns:
|
||||
bool: True if at least one process was running and received a stop signal,
|
||||
|
|
@ -385,7 +389,12 @@ def stop_native():
|
|||
brightstaff_pid = pids.get("brightstaff_pid")
|
||||
|
||||
had_running_process = False
|
||||
for name, pid in [("envoy", envoy_pid), ("brightstaff", brightstaff_pid)]:
|
||||
for name, pid in [
|
||||
("envoy", envoy_pid),
|
||||
("brightstaff", brightstaff_pid),
|
||||
]:
|
||||
if skip_pids and pid in skip_pids:
|
||||
continue
|
||||
if pid is None:
|
||||
continue
|
||||
try:
|
||||
|
|
|
|||
6
cli/planoai/obs/__init__.py
Normal file
6
cli/planoai/obs/__init__.py
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
"""Plano observability console: in-memory live view of LLM traffic."""
|
||||
|
||||
from planoai.obs.collector import LLMCall, LLMCallStore, ObsCollector
|
||||
from planoai.obs.pricing import PricingCatalog
|
||||
|
||||
__all__ = ["LLMCall", "LLMCallStore", "ObsCollector", "PricingCatalog"]
|
||||
266
cli/planoai/obs/collector.py
Normal file
266
cli/planoai/obs/collector.py
Normal file
|
|
@ -0,0 +1,266 @@
|
|||
"""In-memory collector for LLM calls, fed by OTLP/gRPC spans from brightstaff."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import threading
|
||||
from collections import deque
|
||||
from concurrent import futures
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Iterable
|
||||
|
||||
import grpc
|
||||
from opentelemetry.proto.collector.trace.v1 import (
|
||||
trace_service_pb2,
|
||||
trace_service_pb2_grpc,
|
||||
)
|
||||
|
||||
DEFAULT_GRPC_PORT = 4317
|
||||
DEFAULT_CAPACITY = 1000
|
||||
|
||||
|
||||
@dataclass
|
||||
class LLMCall:
|
||||
"""One LLM call as reconstructed from a brightstaff LLM span.
|
||||
|
||||
Fields default to ``None`` when the underlying span attribute was absent.
|
||||
"""
|
||||
|
||||
request_id: str
|
||||
timestamp: datetime
|
||||
model: str
|
||||
provider: str | None = None
|
||||
request_model: str | None = None
|
||||
session_id: str | None = None
|
||||
route_name: str | None = None
|
||||
is_streaming: bool | None = None
|
||||
status_code: int | None = None
|
||||
prompt_tokens: int | None = None
|
||||
completion_tokens: int | None = None
|
||||
total_tokens: int | None = None
|
||||
cached_input_tokens: int | None = None
|
||||
cache_creation_tokens: int | None = None
|
||||
reasoning_tokens: int | None = None
|
||||
ttft_ms: float | None = None
|
||||
duration_ms: float | None = None
|
||||
routing_strategy: str | None = None
|
||||
routing_reason: str | None = None
|
||||
cost_usd: float | None = None
|
||||
|
||||
@property
|
||||
def tpt_ms(self) -> float | None:
|
||||
if self.duration_ms is None or self.completion_tokens in (None, 0):
|
||||
return None
|
||||
ttft = self.ttft_ms or 0.0
|
||||
generate_ms = max(0.0, self.duration_ms - ttft)
|
||||
if generate_ms <= 0:
|
||||
return None
|
||||
return generate_ms / self.completion_tokens
|
||||
|
||||
@property
|
||||
def tokens_per_sec(self) -> float | None:
|
||||
tpt = self.tpt_ms
|
||||
if tpt is None or tpt <= 0:
|
||||
return None
|
||||
return 1000.0 / tpt
|
||||
|
||||
|
||||
class LLMCallStore:
|
||||
"""Thread-safe ring buffer of recent LLM calls."""
|
||||
|
||||
def __init__(self, capacity: int = DEFAULT_CAPACITY) -> None:
|
||||
self._capacity = capacity
|
||||
self._calls: deque[LLMCall] = deque(maxlen=capacity)
|
||||
self._lock = threading.Lock()
|
||||
|
||||
@property
|
||||
def capacity(self) -> int:
|
||||
return self._capacity
|
||||
|
||||
def add(self, call: LLMCall) -> None:
|
||||
with self._lock:
|
||||
self._calls.append(call)
|
||||
|
||||
def clear(self) -> None:
|
||||
with self._lock:
|
||||
self._calls.clear()
|
||||
|
||||
def snapshot(self) -> list[LLMCall]:
|
||||
with self._lock:
|
||||
return list(self._calls)
|
||||
|
||||
def __len__(self) -> int:
|
||||
with self._lock:
|
||||
return len(self._calls)
|
||||
|
||||
|
||||
# Span attribute keys used below are the canonical OTel / Plano keys emitted by
|
||||
# brightstaff — see crates/brightstaff/src/tracing/constants.rs for the source
|
||||
# of truth.
|
||||
|
||||
|
||||
def _anyvalue_to_python(value: Any) -> Any: # AnyValue from OTLP
|
||||
kind = value.WhichOneof("value")
|
||||
if kind == "string_value":
|
||||
return value.string_value
|
||||
if kind == "bool_value":
|
||||
return value.bool_value
|
||||
if kind == "int_value":
|
||||
return value.int_value
|
||||
if kind == "double_value":
|
||||
return value.double_value
|
||||
return None
|
||||
|
||||
|
||||
def _attrs_to_dict(attrs: Iterable[Any]) -> dict[str, Any]:
|
||||
out: dict[str, Any] = {}
|
||||
for kv in attrs:
|
||||
py = _anyvalue_to_python(kv.value)
|
||||
if py is not None:
|
||||
out[kv.key] = py
|
||||
return out
|
||||
|
||||
|
||||
def _maybe_int(value: Any) -> int | None:
|
||||
if value is None:
|
||||
return None
|
||||
try:
|
||||
return int(value)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def _maybe_float(value: Any) -> float | None:
|
||||
if value is None:
|
||||
return None
|
||||
try:
|
||||
return float(value)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def span_to_llm_call(
|
||||
span: Any, service_name: str, pricing: Any | None = None
|
||||
) -> LLMCall | None:
|
||||
"""Convert an OTLP span into an LLMCall, or return None if it isn't one.
|
||||
|
||||
A span is considered an LLM call iff it carries the ``llm.model`` attribute.
|
||||
"""
|
||||
attrs = _attrs_to_dict(span.attributes)
|
||||
model = attrs.get("llm.model")
|
||||
if not model:
|
||||
return None
|
||||
|
||||
# Prefer explicit span attributes; fall back to likely aliases.
|
||||
request_id = next(
|
||||
(
|
||||
str(attrs[key])
|
||||
for key in ("request_id", "http.request_id")
|
||||
if key in attrs and attrs[key] is not None
|
||||
),
|
||||
span.span_id.hex() if span.span_id else "",
|
||||
)
|
||||
start_ns = span.start_time_unix_nano or 0
|
||||
ts = (
|
||||
datetime.fromtimestamp(start_ns / 1_000_000_000, tz=timezone.utc).astimezone()
|
||||
if start_ns
|
||||
else datetime.now().astimezone()
|
||||
)
|
||||
|
||||
call = LLMCall(
|
||||
request_id=str(request_id),
|
||||
timestamp=ts,
|
||||
model=str(model),
|
||||
provider=(
|
||||
str(attrs["llm.provider"]) if "llm.provider" in attrs else service_name
|
||||
),
|
||||
request_model=(
|
||||
str(attrs["model.requested"]) if "model.requested" in attrs else None
|
||||
),
|
||||
session_id=(
|
||||
str(attrs["plano.session_id"]) if "plano.session_id" in attrs else None
|
||||
),
|
||||
route_name=(
|
||||
str(attrs["plano.route.name"]) if "plano.route.name" in attrs else None
|
||||
),
|
||||
is_streaming=(
|
||||
bool(attrs["llm.is_streaming"]) if "llm.is_streaming" in attrs else None
|
||||
),
|
||||
status_code=_maybe_int(attrs.get("http.status_code")),
|
||||
prompt_tokens=_maybe_int(attrs.get("llm.usage.prompt_tokens")),
|
||||
completion_tokens=_maybe_int(attrs.get("llm.usage.completion_tokens")),
|
||||
total_tokens=_maybe_int(attrs.get("llm.usage.total_tokens")),
|
||||
cached_input_tokens=_maybe_int(attrs.get("llm.usage.cached_input_tokens")),
|
||||
cache_creation_tokens=_maybe_int(attrs.get("llm.usage.cache_creation_tokens")),
|
||||
reasoning_tokens=_maybe_int(attrs.get("llm.usage.reasoning_tokens")),
|
||||
ttft_ms=_maybe_float(attrs.get("llm.time_to_first_token")),
|
||||
duration_ms=_maybe_float(attrs.get("llm.duration_ms")),
|
||||
routing_strategy=(
|
||||
str(attrs["routing.strategy"]) if "routing.strategy" in attrs else None
|
||||
),
|
||||
routing_reason=(
|
||||
str(attrs["routing.selection_reason"])
|
||||
if "routing.selection_reason" in attrs
|
||||
else None
|
||||
),
|
||||
)
|
||||
|
||||
if pricing is not None:
|
||||
call.cost_usd = pricing.cost_for_call(call)
|
||||
|
||||
return call
|
||||
|
||||
|
||||
class _ObsServicer(trace_service_pb2_grpc.TraceServiceServicer):
|
||||
def __init__(self, store: LLMCallStore, pricing: Any | None) -> None:
|
||||
self._store = store
|
||||
self._pricing = pricing
|
||||
|
||||
def Export(self, request, context): # noqa: N802 — gRPC generated name
|
||||
for resource_spans in request.resource_spans:
|
||||
service_name = "unknown"
|
||||
for attr in resource_spans.resource.attributes:
|
||||
if attr.key == "service.name":
|
||||
val = _anyvalue_to_python(attr.value)
|
||||
if val is not None:
|
||||
service_name = str(val)
|
||||
break
|
||||
for scope_spans in resource_spans.scope_spans:
|
||||
for span in scope_spans.spans:
|
||||
call = span_to_llm_call(span, service_name, self._pricing)
|
||||
if call is not None:
|
||||
self._store.add(call)
|
||||
return trace_service_pb2.ExportTraceServiceResponse()
|
||||
|
||||
|
||||
@dataclass
|
||||
class ObsCollector:
|
||||
"""Owns the OTLP/gRPC server and the in-memory LLMCall ring buffer."""
|
||||
|
||||
store: LLMCallStore = field(default_factory=LLMCallStore)
|
||||
pricing: Any | None = None
|
||||
host: str = "0.0.0.0"
|
||||
port: int = DEFAULT_GRPC_PORT
|
||||
_server: grpc.Server | None = field(default=None, init=False, repr=False)
|
||||
|
||||
def start(self) -> None:
|
||||
if self._server is not None:
|
||||
return
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=4))
|
||||
trace_service_pb2_grpc.add_TraceServiceServicer_to_server(
|
||||
_ObsServicer(self.store, self.pricing), server
|
||||
)
|
||||
address = f"{self.host}:{self.port}"
|
||||
bound = server.add_insecure_port(address)
|
||||
if bound == 0:
|
||||
raise OSError(
|
||||
f"Failed to bind OTLP listener on {address}: port already in use. "
|
||||
"Stop tracing via `planoai trace down` or pick another port with --port."
|
||||
)
|
||||
server.start()
|
||||
self._server = server
|
||||
|
||||
def stop(self, grace: float = 2.0) -> None:
|
||||
if self._server is not None:
|
||||
self._server.stop(grace)
|
||||
self._server = None
|
||||
321
cli/planoai/obs/pricing.py
Normal file
321
cli/planoai/obs/pricing.py
Normal file
|
|
@ -0,0 +1,321 @@
|
|||
"""DigitalOcean Gradient pricing catalog for the obs console.
|
||||
|
||||
Ported loosely from ``crates/brightstaff/src/router/model_metrics.rs::fetch_do_pricing``.
|
||||
Single-source: one fetch at startup, cached for the life of the process.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
import threading
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
import requests
|
||||
|
||||
DEFAULT_PRICING_URL = "https://api.digitalocean.com/v2/gen-ai/models/catalog"
|
||||
FETCH_TIMEOUT_SECS = 5.0
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ModelPrice:
|
||||
"""Input/output $/token rates. Token counts are multiplied by these."""
|
||||
|
||||
input_per_token_usd: float
|
||||
output_per_token_usd: float
|
||||
cached_input_per_token_usd: float | None = None
|
||||
|
||||
|
||||
class PricingCatalog:
|
||||
"""In-memory pricing lookup keyed by model id.
|
||||
|
||||
DO's catalog uses ids like ``openai-gpt-5.4``; Plano's resolved model names
|
||||
may arrive as ``do/openai-gpt-5.4`` or bare ``openai-gpt-5.4``. We strip the
|
||||
leading provider prefix when looking up.
|
||||
"""
|
||||
|
||||
def __init__(self, prices: dict[str, ModelPrice] | None = None) -> None:
|
||||
self._prices: dict[str, ModelPrice] = prices or {}
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def __len__(self) -> int:
|
||||
with self._lock:
|
||||
return len(self._prices)
|
||||
|
||||
def sample_models(self, n: int = 5) -> list[str]:
|
||||
with self._lock:
|
||||
return list(self._prices.keys())[:n]
|
||||
|
||||
@classmethod
|
||||
def fetch(cls, url: str = DEFAULT_PRICING_URL) -> "PricingCatalog":
|
||||
"""Fetch pricing from DO's catalog endpoint. On failure, returns an
|
||||
empty catalog (cost column will be blank).
|
||||
|
||||
The catalog endpoint is public — no auth required, no signup — so
|
||||
``planoai obs`` gets cost data on first run out of the box.
|
||||
"""
|
||||
try:
|
||||
resp = requests.get(url, timeout=FETCH_TIMEOUT_SECS)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
except Exception as exc: # noqa: BLE001 — best-effort; never fatal
|
||||
logger.warning(
|
||||
"DO pricing fetch failed: %s; cost column will be blank.",
|
||||
exc,
|
||||
)
|
||||
return cls()
|
||||
|
||||
prices = _parse_do_pricing(data)
|
||||
if not prices:
|
||||
# Dump the first entry's raw shape so we can see which fields DO
|
||||
# actually returned — helps when the catalog adds new fields or
|
||||
# the response doesn't match our parser.
|
||||
import json as _json
|
||||
|
||||
sample_items = _coerce_items(data)
|
||||
sample = sample_items[0] if sample_items else data
|
||||
logger.warning(
|
||||
"DO pricing response had no parseable entries; cost column "
|
||||
"will be blank. Sample entry: %s",
|
||||
_json.dumps(sample, default=str)[:400],
|
||||
)
|
||||
return cls(prices)
|
||||
|
||||
def price_for(self, model_name: str | None) -> ModelPrice | None:
|
||||
if not model_name:
|
||||
return None
|
||||
with self._lock:
|
||||
# Try the full name first, then stripped prefix, then lowercased variants.
|
||||
for candidate in _model_key_candidates(model_name):
|
||||
hit = self._prices.get(candidate)
|
||||
if hit is not None:
|
||||
return hit
|
||||
return None
|
||||
|
||||
def cost_for_call(self, call: Any) -> float | None:
|
||||
"""Compute USD cost for an LLMCall. Returns None when pricing is unknown."""
|
||||
price = self.price_for(getattr(call, "model", None)) or self.price_for(
|
||||
getattr(call, "request_model", None)
|
||||
)
|
||||
if price is None:
|
||||
return None
|
||||
prompt = int(getattr(call, "prompt_tokens", 0) or 0)
|
||||
completion = int(getattr(call, "completion_tokens", 0) or 0)
|
||||
cached = int(getattr(call, "cached_input_tokens", 0) or 0)
|
||||
|
||||
# Cached input tokens are priced separately at the cached rate when known;
|
||||
# otherwise they're already counted in prompt tokens at the regular rate.
|
||||
fresh_prompt = prompt
|
||||
if price.cached_input_per_token_usd is not None and cached:
|
||||
fresh_prompt = max(0, prompt - cached)
|
||||
cost_cached = cached * price.cached_input_per_token_usd
|
||||
else:
|
||||
cost_cached = 0.0
|
||||
|
||||
cost = (
|
||||
fresh_prompt * price.input_per_token_usd
|
||||
+ completion * price.output_per_token_usd
|
||||
+ cost_cached
|
||||
)
|
||||
return round(cost, 6)
|
||||
|
||||
|
||||
_DATE_SUFFIX_RE = re.compile(r"-\d{8}$")
|
||||
_PROVIDER_PREFIXES = ("anthropic", "openai", "google", "meta", "cohere", "mistral")
|
||||
_ANTHROPIC_FAMILIES = {"opus", "sonnet", "haiku"}
|
||||
|
||||
|
||||
def _model_key_candidates(model_name: str) -> list[str]:
|
||||
"""Lookup-side variants of a Plano-emitted model name.
|
||||
|
||||
Plano resolves names like ``claude-haiku-4-5-20251001``; the catalog stores
|
||||
them as ``anthropic-claude-haiku-4.5``. We strip the date suffix and the
|
||||
``provider/`` prefix here; the catalog itself registers the dash/dot and
|
||||
family-order aliases at parse time (see :func:`_expand_aliases`).
|
||||
"""
|
||||
base = model_name.strip()
|
||||
out = [base]
|
||||
if "/" in base:
|
||||
out.append(base.split("/", 1)[1])
|
||||
for k in list(out):
|
||||
stripped = _DATE_SUFFIX_RE.sub("", k)
|
||||
if stripped != k:
|
||||
out.append(stripped)
|
||||
out.extend([v.lower() for v in list(out)])
|
||||
seen: set[str] = set()
|
||||
uniq = []
|
||||
for key in out:
|
||||
if key not in seen:
|
||||
seen.add(key)
|
||||
uniq.append(key)
|
||||
return uniq
|
||||
|
||||
|
||||
def _expand_aliases(model_id: str) -> set[str]:
|
||||
"""Catalog-side variants of a DO model id.
|
||||
|
||||
DO publishes Anthropic models under ids like ``anthropic-claude-opus-4.7``
|
||||
or ``anthropic-claude-4.6-sonnet`` while Plano emits ``claude-opus-4-7`` /
|
||||
``claude-sonnet-4-6``. Generate a set covering provider-prefix stripping,
|
||||
dash↔dot in version segments, and family↔version word order so a single
|
||||
catalog entry matches every name shape we'll see at lookup.
|
||||
"""
|
||||
aliases: set[str] = set()
|
||||
|
||||
def add(name: str) -> None:
|
||||
if not name:
|
||||
return
|
||||
aliases.add(name)
|
||||
aliases.add(name.lower())
|
||||
|
||||
add(model_id)
|
||||
|
||||
base = model_id
|
||||
head, _, rest = base.partition("-")
|
||||
if head.lower() in _PROVIDER_PREFIXES and rest:
|
||||
add(rest)
|
||||
base = rest
|
||||
|
||||
for key in list(aliases):
|
||||
if "." in key:
|
||||
add(key.replace(".", "-"))
|
||||
|
||||
parts = base.split("-")
|
||||
if len(parts) >= 3 and parts[0].lower() == "claude":
|
||||
rest_parts = parts[1:]
|
||||
for i, p in enumerate(rest_parts):
|
||||
if p.lower() in _ANTHROPIC_FAMILIES:
|
||||
others = rest_parts[:i] + rest_parts[i + 1 :]
|
||||
if not others:
|
||||
break
|
||||
family_last = "claude-" + "-".join(others) + "-" + p
|
||||
family_first = "claude-" + p + "-" + "-".join(others)
|
||||
add(family_last)
|
||||
add(family_first)
|
||||
add(family_last.replace(".", "-"))
|
||||
add(family_first.replace(".", "-"))
|
||||
break
|
||||
|
||||
return aliases
|
||||
|
||||
|
||||
def _parse_do_pricing(data: Any) -> dict[str, ModelPrice]:
|
||||
"""Parse DO catalog response into a ModelPrice map keyed by model id.
|
||||
|
||||
DO's shape (as of 2026-04):
|
||||
{
|
||||
"data": [
|
||||
{"model_id": "openai-gpt-5.4",
|
||||
"pricing": {"input_price_per_million": 5.0,
|
||||
"output_price_per_million": 15.0}},
|
||||
...
|
||||
]
|
||||
}
|
||||
|
||||
Older/alternate shapes are also accepted (flat top-level fields, or the
|
||||
``id``/``model``/``name`` key).
|
||||
"""
|
||||
prices: dict[str, ModelPrice] = {}
|
||||
items = _coerce_items(data)
|
||||
for item in items:
|
||||
model_id = (
|
||||
item.get("model_id")
|
||||
or item.get("id")
|
||||
or item.get("model")
|
||||
or item.get("name")
|
||||
)
|
||||
if not model_id:
|
||||
continue
|
||||
|
||||
# DO nests rates under `pricing`; try that first, then fall back to
|
||||
# top-level fields for alternate response shapes.
|
||||
sources = [item]
|
||||
if isinstance(item.get("pricing"), dict):
|
||||
sources.insert(0, item["pricing"])
|
||||
|
||||
input_rate = _extract_rate_from_sources(
|
||||
sources,
|
||||
["input_per_token", "input_token_price", "price_input"],
|
||||
["input_price_per_million", "input_per_million", "input_per_mtok"],
|
||||
)
|
||||
output_rate = _extract_rate_from_sources(
|
||||
sources,
|
||||
["output_per_token", "output_token_price", "price_output"],
|
||||
["output_price_per_million", "output_per_million", "output_per_mtok"],
|
||||
)
|
||||
cached_rate = _extract_rate_from_sources(
|
||||
sources,
|
||||
[
|
||||
"cached_input_per_token",
|
||||
"cached_input_token_price",
|
||||
"prompt_cache_read_per_token",
|
||||
],
|
||||
[
|
||||
"cached_input_price_per_million",
|
||||
"cached_input_per_million",
|
||||
"cached_input_per_mtok",
|
||||
],
|
||||
)
|
||||
|
||||
if input_rate is None or output_rate is None:
|
||||
continue
|
||||
# Treat 0-rate entries as "unknown" so cost falls back to `—` rather
|
||||
# than showing a misleading $0.0000. DO's catalog sometimes omits
|
||||
# rates for promo/open-weight models.
|
||||
if input_rate == 0 and output_rate == 0:
|
||||
continue
|
||||
price = ModelPrice(
|
||||
input_per_token_usd=input_rate,
|
||||
output_per_token_usd=output_rate,
|
||||
cached_input_per_token_usd=cached_rate,
|
||||
)
|
||||
for alias in _expand_aliases(str(model_id)):
|
||||
prices.setdefault(alias, price)
|
||||
return prices
|
||||
|
||||
|
||||
def _coerce_items(data: Any) -> list[dict]:
|
||||
if isinstance(data, list):
|
||||
return [x for x in data if isinstance(x, dict)]
|
||||
if isinstance(data, dict):
|
||||
for key in ("data", "models", "pricing", "items"):
|
||||
val = data.get(key)
|
||||
if isinstance(val, list):
|
||||
return [x for x in val if isinstance(x, dict)]
|
||||
return []
|
||||
|
||||
|
||||
def _extract_rate_from_sources(
|
||||
sources: list[dict],
|
||||
per_token_keys: list[str],
|
||||
per_million_keys: list[str],
|
||||
) -> float | None:
|
||||
"""Return a per-token rate in USD, or None if unknown.
|
||||
|
||||
Some DO catalog responses put per-token values under a field whose name
|
||||
says ``_per_million`` (e.g. ``input_price_per_million: 5E-8`` — that's
|
||||
$5e-8 per token, not per million). Heuristic: values < 1 are already
|
||||
per-token (real per-million rates are ~0.1 to ~100); values >= 1 are
|
||||
treated as per-million and divided by 1,000,000.
|
||||
"""
|
||||
for src in sources:
|
||||
for key in per_token_keys:
|
||||
if key in src and src[key] is not None:
|
||||
try:
|
||||
return float(src[key])
|
||||
except (TypeError, ValueError):
|
||||
continue
|
||||
for key in per_million_keys:
|
||||
if key in src and src[key] is not None:
|
||||
try:
|
||||
v = float(src[key])
|
||||
except (TypeError, ValueError):
|
||||
continue
|
||||
if v >= 1:
|
||||
return v / 1_000_000
|
||||
return v
|
||||
return None
|
||||
634
cli/planoai/obs/render.py
Normal file
634
cli/planoai/obs/render.py
Normal file
|
|
@ -0,0 +1,634 @@
|
|||
"""Rich TUI renderer for the observability console."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import Counter
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime
|
||||
from http import HTTPStatus
|
||||
|
||||
from rich.align import Align
|
||||
from rich.box import SIMPLE, SIMPLE_HEAVY
|
||||
from rich.console import Group
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
from rich.text import Text
|
||||
|
||||
MAX_WIDTH = 160
|
||||
|
||||
from planoai.obs.collector import LLMCall
|
||||
|
||||
|
||||
@dataclass
|
||||
class AggregateStats:
|
||||
count: int
|
||||
total_cost_usd: float
|
||||
total_input_tokens: int
|
||||
total_output_tokens: int
|
||||
distinct_sessions: int
|
||||
current_session: str | None
|
||||
p50_latency_ms: float | None = None
|
||||
p95_latency_ms: float | None = None
|
||||
p99_latency_ms: float | None = None
|
||||
p50_ttft_ms: float | None = None
|
||||
p95_ttft_ms: float | None = None
|
||||
p99_ttft_ms: float | None = None
|
||||
error_count: int = 0
|
||||
errors_4xx: int = 0
|
||||
errors_5xx: int = 0
|
||||
has_cost: bool = False
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelRollup:
|
||||
model: str
|
||||
requests: int
|
||||
input_tokens: int
|
||||
output_tokens: int
|
||||
cache_write: int
|
||||
cache_read: int
|
||||
cost_usd: float
|
||||
has_cost: bool = False
|
||||
avg_tokens_per_sec: float | None = None
|
||||
|
||||
|
||||
def _percentile(values: list[float], pct: float) -> float | None:
|
||||
if not values:
|
||||
return None
|
||||
s = sorted(values)
|
||||
k = max(0, min(len(s) - 1, int(round((pct / 100.0) * (len(s) - 1)))))
|
||||
return s[k]
|
||||
|
||||
|
||||
def aggregates(calls: list[LLMCall]) -> AggregateStats:
|
||||
total_cost = sum((c.cost_usd or 0.0) for c in calls)
|
||||
total_input = sum(int(c.prompt_tokens or 0) for c in calls)
|
||||
total_output = sum(int(c.completion_tokens or 0) for c in calls)
|
||||
session_ids = {c.session_id for c in calls if c.session_id}
|
||||
current = next(
|
||||
(c.session_id for c in reversed(calls) if c.session_id is not None), None
|
||||
)
|
||||
durations = [c.duration_ms for c in calls if c.duration_ms is not None]
|
||||
ttfts = [c.ttft_ms for c in calls if c.ttft_ms is not None]
|
||||
errors_4xx = sum(
|
||||
1 for c in calls if c.status_code is not None and 400 <= c.status_code < 500
|
||||
)
|
||||
errors_5xx = sum(
|
||||
1 for c in calls if c.status_code is not None and c.status_code >= 500
|
||||
)
|
||||
has_cost = any(c.cost_usd is not None for c in calls)
|
||||
return AggregateStats(
|
||||
count=len(calls),
|
||||
total_cost_usd=total_cost,
|
||||
total_input_tokens=total_input,
|
||||
total_output_tokens=total_output,
|
||||
distinct_sessions=len(session_ids),
|
||||
current_session=current,
|
||||
p50_latency_ms=_percentile(durations, 50),
|
||||
p95_latency_ms=_percentile(durations, 95),
|
||||
p99_latency_ms=_percentile(durations, 99),
|
||||
p50_ttft_ms=_percentile(ttfts, 50),
|
||||
p95_ttft_ms=_percentile(ttfts, 95),
|
||||
p99_ttft_ms=_percentile(ttfts, 99),
|
||||
error_count=errors_4xx + errors_5xx,
|
||||
errors_4xx=errors_4xx,
|
||||
errors_5xx=errors_5xx,
|
||||
has_cost=has_cost,
|
||||
)
|
||||
|
||||
|
||||
def model_rollups(calls: list[LLMCall]) -> list[ModelRollup]:
|
||||
buckets: dict[str, dict[str, float | int | bool]] = {}
|
||||
tps_samples: dict[str, list[float]] = {}
|
||||
for c in calls:
|
||||
key = c.model
|
||||
b = buckets.setdefault(
|
||||
key,
|
||||
{
|
||||
"requests": 0,
|
||||
"input": 0,
|
||||
"output": 0,
|
||||
"cache_write": 0,
|
||||
"cache_read": 0,
|
||||
"cost": 0.0,
|
||||
"has_cost": False,
|
||||
},
|
||||
)
|
||||
b["requests"] = int(b["requests"]) + 1
|
||||
b["input"] = int(b["input"]) + int(c.prompt_tokens or 0)
|
||||
b["output"] = int(b["output"]) + int(c.completion_tokens or 0)
|
||||
b["cache_write"] = int(b["cache_write"]) + int(c.cache_creation_tokens or 0)
|
||||
b["cache_read"] = int(b["cache_read"]) + int(c.cached_input_tokens or 0)
|
||||
b["cost"] = float(b["cost"]) + (c.cost_usd or 0.0)
|
||||
if c.cost_usd is not None:
|
||||
b["has_cost"] = True
|
||||
tps = c.tokens_per_sec
|
||||
if tps is not None:
|
||||
tps_samples.setdefault(key, []).append(tps)
|
||||
|
||||
rollups: list[ModelRollup] = []
|
||||
for model, b in buckets.items():
|
||||
samples = tps_samples.get(model)
|
||||
avg_tps = (sum(samples) / len(samples)) if samples else None
|
||||
rollups.append(
|
||||
ModelRollup(
|
||||
model=model,
|
||||
requests=int(b["requests"]),
|
||||
input_tokens=int(b["input"]),
|
||||
output_tokens=int(b["output"]),
|
||||
cache_write=int(b["cache_write"]),
|
||||
cache_read=int(b["cache_read"]),
|
||||
cost_usd=float(b["cost"]),
|
||||
has_cost=bool(b["has_cost"]),
|
||||
avg_tokens_per_sec=avg_tps,
|
||||
)
|
||||
)
|
||||
rollups.sort(key=lambda r: (r.cost_usd, r.requests), reverse=True)
|
||||
return rollups
|
||||
|
||||
|
||||
@dataclass
|
||||
class RouteHit:
|
||||
route: str
|
||||
hits: int
|
||||
pct: float
|
||||
p95_latency_ms: float | None
|
||||
error_count: int
|
||||
|
||||
|
||||
def route_hits(calls: list[LLMCall]) -> list[RouteHit]:
|
||||
counts: Counter[str] = Counter()
|
||||
per_route_latency: dict[str, list[float]] = {}
|
||||
per_route_errors: dict[str, int] = {}
|
||||
for c in calls:
|
||||
if not c.route_name:
|
||||
continue
|
||||
counts[c.route_name] += 1
|
||||
if c.duration_ms is not None:
|
||||
per_route_latency.setdefault(c.route_name, []).append(c.duration_ms)
|
||||
if c.status_code is not None and c.status_code >= 400:
|
||||
per_route_errors[c.route_name] = per_route_errors.get(c.route_name, 0) + 1
|
||||
total = sum(counts.values())
|
||||
if total == 0:
|
||||
return []
|
||||
return [
|
||||
RouteHit(
|
||||
route=r,
|
||||
hits=n,
|
||||
pct=(n / total) * 100.0,
|
||||
p95_latency_ms=_percentile(per_route_latency.get(r, []), 95),
|
||||
error_count=per_route_errors.get(r, 0),
|
||||
)
|
||||
for r, n in counts.most_common()
|
||||
]
|
||||
|
||||
|
||||
def _fmt_cost(v: float | None, *, zero: str = "—") -> str:
|
||||
if v is None:
|
||||
return "—"
|
||||
if v == 0:
|
||||
return zero
|
||||
if abs(v) < 0.0001:
|
||||
return f"${v:.8f}".rstrip("0").rstrip(".")
|
||||
if abs(v) < 0.01:
|
||||
return f"${v:.6f}".rstrip("0").rstrip(".")
|
||||
if abs(v) < 1:
|
||||
return f"${v:.4f}"
|
||||
return f"${v:,.2f}"
|
||||
|
||||
|
||||
def _fmt_ms(v: float | None) -> str:
|
||||
if v is None:
|
||||
return "—"
|
||||
if v >= 1000:
|
||||
return f"{v / 1000:.1f}s"
|
||||
return f"{v:.0f}ms"
|
||||
|
||||
|
||||
def _fmt_int(v: int | None) -> str:
|
||||
if v is None or v == 0:
|
||||
return "—"
|
||||
return f"{v:,}"
|
||||
|
||||
|
||||
def _fmt_tokens(v: int | None) -> str:
|
||||
if v is None:
|
||||
return "—"
|
||||
return f"{v:,}"
|
||||
|
||||
|
||||
def _fmt_tps(v: float | None) -> str:
|
||||
if v is None or v <= 0:
|
||||
return "—"
|
||||
if v >= 100:
|
||||
return f"{v:.0f}/s"
|
||||
return f"{v:.1f}/s"
|
||||
|
||||
|
||||
def _latency_style(v: float | None) -> str:
|
||||
if v is None:
|
||||
return "dim"
|
||||
if v < 500:
|
||||
return "green"
|
||||
if v < 2000:
|
||||
return "yellow"
|
||||
return "red"
|
||||
|
||||
|
||||
def _ttft_style(v: float | None) -> str:
|
||||
if v is None:
|
||||
return "dim"
|
||||
if v < 300:
|
||||
return "green"
|
||||
if v < 1000:
|
||||
return "yellow"
|
||||
return "red"
|
||||
|
||||
|
||||
def _truncate_model(name: str, limit: int = 32) -> str:
|
||||
if len(name) <= limit:
|
||||
return name
|
||||
return name[: limit - 1] + "…"
|
||||
|
||||
|
||||
def _status_text(code: int | None) -> Text:
|
||||
if code is None:
|
||||
return Text("—", style="dim")
|
||||
if 200 <= code < 300:
|
||||
return Text("● ok", style="green")
|
||||
if 300 <= code < 400:
|
||||
return Text(f"● {code}", style="yellow")
|
||||
if 400 <= code < 500:
|
||||
return Text(f"● {code}", style="yellow bold")
|
||||
return Text(f"● {code}", style="red bold")
|
||||
|
||||
|
||||
def _summary_panel(last: LLMCall | None, stats: AggregateStats) -> Panel:
|
||||
# Content-sized columns with a fixed gutter keep the two blocks close
|
||||
# together instead of stretching across the full terminal on wide screens.
|
||||
grid = Table.grid(padding=(0, 4))
|
||||
grid.add_column(no_wrap=True)
|
||||
grid.add_column(no_wrap=True)
|
||||
|
||||
# Left: latest request snapshot.
|
||||
left = Table.grid(padding=(0, 1))
|
||||
left.add_column(style="dim", no_wrap=True)
|
||||
left.add_column(no_wrap=True)
|
||||
if last is None:
|
||||
left.add_row("latest", Text("waiting for spans…", style="dim italic"))
|
||||
else:
|
||||
model_text = Text(_truncate_model(last.model, 48), style="bold cyan")
|
||||
if last.is_streaming:
|
||||
model_text.append(" ⟳ stream", style="dim")
|
||||
left.add_row("model", model_text)
|
||||
if last.request_model and last.request_model != last.model:
|
||||
left.add_row(
|
||||
"requested", Text(_truncate_model(last.request_model, 48), style="cyan")
|
||||
)
|
||||
if last.route_name:
|
||||
left.add_row("route", Text(last.route_name, style="yellow"))
|
||||
left.add_row("status", _status_text(last.status_code))
|
||||
tokens = Text()
|
||||
tokens.append(_fmt_tokens(last.prompt_tokens))
|
||||
tokens.append(" in", style="dim")
|
||||
tokens.append(" · ", style="dim")
|
||||
tokens.append(_fmt_tokens(last.completion_tokens), style="green")
|
||||
tokens.append(" out", style="dim")
|
||||
if last.cached_input_tokens:
|
||||
tokens.append(" · ", style="dim")
|
||||
tokens.append(_fmt_tokens(last.cached_input_tokens), style="yellow")
|
||||
tokens.append(" cached", style="dim")
|
||||
left.add_row("tokens", tokens)
|
||||
timing = Text()
|
||||
timing.append("TTFT ", style="dim")
|
||||
timing.append(_fmt_ms(last.ttft_ms), style=_ttft_style(last.ttft_ms))
|
||||
timing.append(" · ", style="dim")
|
||||
timing.append("lat ", style="dim")
|
||||
timing.append(_fmt_ms(last.duration_ms), style=_latency_style(last.duration_ms))
|
||||
tps = last.tokens_per_sec
|
||||
if tps:
|
||||
timing.append(" · ", style="dim")
|
||||
timing.append(_fmt_tps(tps), style="green")
|
||||
left.add_row("timing", timing)
|
||||
left.add_row("cost", Text(_fmt_cost(last.cost_usd), style="green bold"))
|
||||
|
||||
# Right: lifetime totals.
|
||||
right = Table.grid(padding=(0, 1))
|
||||
right.add_column(style="dim", no_wrap=True)
|
||||
right.add_column(no_wrap=True)
|
||||
right.add_row(
|
||||
"requests",
|
||||
Text(f"{stats.count:,}", style="bold"),
|
||||
)
|
||||
if stats.error_count:
|
||||
err_text = Text()
|
||||
err_text.append(f"{stats.error_count:,}", style="red bold")
|
||||
parts: list[str] = []
|
||||
if stats.errors_4xx:
|
||||
parts.append(f"{stats.errors_4xx} 4xx")
|
||||
if stats.errors_5xx:
|
||||
parts.append(f"{stats.errors_5xx} 5xx")
|
||||
if parts:
|
||||
err_text.append(f" ({' · '.join(parts)})", style="dim")
|
||||
right.add_row("errors", err_text)
|
||||
cost_str = _fmt_cost(stats.total_cost_usd) if stats.has_cost else "—"
|
||||
right.add_row("total cost", Text(cost_str, style="green bold"))
|
||||
tokens_total = Text()
|
||||
tokens_total.append(_fmt_tokens(stats.total_input_tokens))
|
||||
tokens_total.append(" in", style="dim")
|
||||
tokens_total.append(" · ", style="dim")
|
||||
tokens_total.append(_fmt_tokens(stats.total_output_tokens), style="green")
|
||||
tokens_total.append(" out", style="dim")
|
||||
right.add_row("tokens", tokens_total)
|
||||
lat_text = Text()
|
||||
lat_text.append("p50 ", style="dim")
|
||||
lat_text.append(
|
||||
_fmt_ms(stats.p50_latency_ms), style=_latency_style(stats.p50_latency_ms)
|
||||
)
|
||||
lat_text.append(" · ", style="dim")
|
||||
lat_text.append("p95 ", style="dim")
|
||||
lat_text.append(
|
||||
_fmt_ms(stats.p95_latency_ms), style=_latency_style(stats.p95_latency_ms)
|
||||
)
|
||||
lat_text.append(" · ", style="dim")
|
||||
lat_text.append("p99 ", style="dim")
|
||||
lat_text.append(
|
||||
_fmt_ms(stats.p99_latency_ms), style=_latency_style(stats.p99_latency_ms)
|
||||
)
|
||||
right.add_row("latency", lat_text)
|
||||
ttft_text = Text()
|
||||
ttft_text.append("p50 ", style="dim")
|
||||
ttft_text.append(_fmt_ms(stats.p50_ttft_ms), style=_ttft_style(stats.p50_ttft_ms))
|
||||
ttft_text.append(" · ", style="dim")
|
||||
ttft_text.append("p95 ", style="dim")
|
||||
ttft_text.append(_fmt_ms(stats.p95_ttft_ms), style=_ttft_style(stats.p95_ttft_ms))
|
||||
ttft_text.append(" · ", style="dim")
|
||||
ttft_text.append("p99 ", style="dim")
|
||||
ttft_text.append(_fmt_ms(stats.p99_ttft_ms), style=_ttft_style(stats.p99_ttft_ms))
|
||||
right.add_row("TTFT", ttft_text)
|
||||
sess = Text()
|
||||
sess.append(f"{stats.distinct_sessions}")
|
||||
if stats.current_session:
|
||||
sess.append(" · current ", style="dim")
|
||||
sess.append(stats.current_session, style="magenta")
|
||||
right.add_row("sessions", sess)
|
||||
|
||||
grid.add_row(left, right)
|
||||
return Panel(
|
||||
grid,
|
||||
title="[bold]live LLM traffic[/]",
|
||||
border_style="cyan",
|
||||
box=SIMPLE_HEAVY,
|
||||
padding=(0, 1),
|
||||
)
|
||||
|
||||
|
||||
def _model_rollup_table(rollups: list[ModelRollup]) -> Table:
|
||||
table = Table(
|
||||
title="by model",
|
||||
title_justify="left",
|
||||
title_style="bold dim",
|
||||
caption="cost via DigitalOcean Gradient catalog",
|
||||
caption_justify="left",
|
||||
caption_style="dim italic",
|
||||
box=SIMPLE,
|
||||
header_style="bold",
|
||||
pad_edge=False,
|
||||
padding=(0, 1),
|
||||
)
|
||||
table.add_column("model", style="cyan", no_wrap=True)
|
||||
table.add_column("req", justify="right")
|
||||
table.add_column("input", justify="right")
|
||||
table.add_column("output", justify="right", style="green")
|
||||
table.add_column("cache wr", justify="right", style="yellow")
|
||||
table.add_column("cache rd", justify="right", style="yellow")
|
||||
table.add_column("tok/s", justify="right")
|
||||
table.add_column("cost", justify="right", style="green")
|
||||
if not rollups:
|
||||
table.add_row(
|
||||
Text("no requests yet", style="dim italic"),
|
||||
*(["—"] * 7),
|
||||
)
|
||||
return table
|
||||
for r in rollups:
|
||||
cost_cell = _fmt_cost(r.cost_usd) if r.has_cost else "—"
|
||||
table.add_row(
|
||||
_truncate_model(r.model),
|
||||
f"{r.requests:,}",
|
||||
_fmt_tokens(r.input_tokens),
|
||||
_fmt_tokens(r.output_tokens),
|
||||
_fmt_int(r.cache_write),
|
||||
_fmt_int(r.cache_read),
|
||||
_fmt_tps(r.avg_tokens_per_sec),
|
||||
cost_cell,
|
||||
)
|
||||
return table
|
||||
|
||||
|
||||
def _route_hit_table(hits: list[RouteHit]) -> Table:
|
||||
table = Table(
|
||||
title="route share",
|
||||
title_justify="left",
|
||||
title_style="bold dim",
|
||||
box=SIMPLE,
|
||||
header_style="bold",
|
||||
pad_edge=False,
|
||||
padding=(0, 1),
|
||||
)
|
||||
table.add_column("route", style="cyan")
|
||||
table.add_column("hits", justify="right")
|
||||
table.add_column("%", justify="right")
|
||||
table.add_column("p95", justify="right")
|
||||
table.add_column("err", justify="right")
|
||||
for h in hits:
|
||||
err_cell = (
|
||||
Text(f"{h.error_count:,}", style="red bold") if h.error_count else "—"
|
||||
)
|
||||
table.add_row(
|
||||
h.route,
|
||||
f"{h.hits:,}",
|
||||
f"{h.pct:5.1f}%",
|
||||
Text(_fmt_ms(h.p95_latency_ms), style=_latency_style(h.p95_latency_ms)),
|
||||
err_cell,
|
||||
)
|
||||
return table
|
||||
|
||||
|
||||
def _recent_table(calls: list[LLMCall], limit: int = 15) -> Table:
|
||||
show_route = any(c.route_name for c in calls)
|
||||
show_cache = any((c.cached_input_tokens or 0) > 0 for c in calls)
|
||||
show_rsn = any((c.reasoning_tokens or 0) > 0 for c in calls)
|
||||
|
||||
caption_parts = ["in·new = fresh prompt tokens"]
|
||||
if show_cache:
|
||||
caption_parts.append("in·cache = cached read")
|
||||
if show_rsn:
|
||||
caption_parts.append("rsn = reasoning")
|
||||
caption_parts.append("lat = total latency")
|
||||
|
||||
table = Table(
|
||||
title=f"recent · last {min(limit, len(calls)) if calls else 0}",
|
||||
title_justify="left",
|
||||
title_style="bold dim",
|
||||
caption=" · ".join(caption_parts),
|
||||
caption_justify="left",
|
||||
caption_style="dim italic",
|
||||
box=SIMPLE,
|
||||
header_style="bold",
|
||||
pad_edge=False,
|
||||
padding=(0, 1),
|
||||
)
|
||||
table.add_column("time", no_wrap=True)
|
||||
table.add_column("model", style="cyan", no_wrap=True)
|
||||
if show_route:
|
||||
table.add_column("route", style="yellow", no_wrap=True)
|
||||
table.add_column("in·new", justify="right")
|
||||
if show_cache:
|
||||
table.add_column("in·cache", justify="right", style="yellow")
|
||||
table.add_column("out", justify="right", style="green")
|
||||
if show_rsn:
|
||||
table.add_column("rsn", justify="right")
|
||||
table.add_column("tok/s", justify="right")
|
||||
table.add_column("TTFT", justify="right")
|
||||
table.add_column("lat", justify="right")
|
||||
table.add_column("cost", justify="right", style="green")
|
||||
table.add_column("status")
|
||||
|
||||
if not calls:
|
||||
cols = len(table.columns)
|
||||
table.add_row(
|
||||
Text("waiting for spans…", style="dim italic"),
|
||||
*(["—"] * (cols - 1)),
|
||||
)
|
||||
return table
|
||||
|
||||
recent = list(reversed(calls))[:limit]
|
||||
for idx, c in enumerate(recent):
|
||||
is_newest = idx == 0
|
||||
time_style = "bold white" if is_newest else None
|
||||
model_style = "bold cyan" if is_newest else "cyan"
|
||||
row: list[object] = [
|
||||
(
|
||||
Text(c.timestamp.strftime("%H:%M:%S"), style=time_style)
|
||||
if time_style
|
||||
else c.timestamp.strftime("%H:%M:%S")
|
||||
),
|
||||
Text(_truncate_model(c.model), style=model_style),
|
||||
]
|
||||
if show_route:
|
||||
row.append(c.route_name or "—")
|
||||
row.append(_fmt_tokens(c.prompt_tokens))
|
||||
if show_cache:
|
||||
row.append(_fmt_int(c.cached_input_tokens))
|
||||
row.append(_fmt_tokens(c.completion_tokens))
|
||||
if show_rsn:
|
||||
row.append(_fmt_int(c.reasoning_tokens))
|
||||
row.extend(
|
||||
[
|
||||
_fmt_tps(c.tokens_per_sec),
|
||||
Text(_fmt_ms(c.ttft_ms), style=_ttft_style(c.ttft_ms)),
|
||||
Text(_fmt_ms(c.duration_ms), style=_latency_style(c.duration_ms)),
|
||||
_fmt_cost(c.cost_usd),
|
||||
_status_text(c.status_code),
|
||||
]
|
||||
)
|
||||
table.add_row(*row)
|
||||
return table
|
||||
|
||||
|
||||
def _last_error(calls: list[LLMCall]) -> LLMCall | None:
|
||||
for c in reversed(calls):
|
||||
if c.status_code is not None and c.status_code >= 400:
|
||||
return c
|
||||
return None
|
||||
|
||||
|
||||
def _http_reason(code: int) -> str:
|
||||
try:
|
||||
return HTTPStatus(code).phrase
|
||||
except ValueError:
|
||||
return ""
|
||||
|
||||
|
||||
def _fmt_ago(ts: datetime) -> str:
|
||||
# `ts` is produced in collector.py via datetime.now(tz=...), but fall back
|
||||
# gracefully if a naive timestamp ever sneaks in.
|
||||
now = datetime.now(tz=ts.tzinfo) if ts.tzinfo else datetime.now()
|
||||
delta = (now - ts).total_seconds()
|
||||
if delta < 0:
|
||||
delta = 0
|
||||
if delta < 60:
|
||||
return f"{int(delta)}s ago"
|
||||
if delta < 3600:
|
||||
return f"{int(delta // 60)}m ago"
|
||||
return f"{int(delta // 3600)}h ago"
|
||||
|
||||
|
||||
def _error_banner(call: LLMCall) -> Panel:
|
||||
code = call.status_code or 0
|
||||
border = "red" if code >= 500 else "yellow"
|
||||
header = Text()
|
||||
header.append(f"● {code}", style=f"{border} bold")
|
||||
reason = _http_reason(code)
|
||||
if reason:
|
||||
header.append(f" {reason}", style=border)
|
||||
header.append(" · ", style="dim")
|
||||
header.append(_truncate_model(call.model, 48), style="cyan")
|
||||
if call.route_name:
|
||||
header.append(" · ", style="dim")
|
||||
header.append(call.route_name, style="yellow")
|
||||
header.append(" · ", style="dim")
|
||||
header.append(_fmt_ago(call.timestamp), style="dim")
|
||||
if call.request_id:
|
||||
header.append(" · req ", style="dim")
|
||||
header.append(call.request_id, style="magenta")
|
||||
return Panel(
|
||||
header,
|
||||
title="[bold]last error[/]",
|
||||
title_align="left",
|
||||
border_style=border,
|
||||
box=SIMPLE,
|
||||
padding=(0, 1),
|
||||
)
|
||||
|
||||
|
||||
def _footer(stats: AggregateStats) -> Text:
|
||||
waiting = stats.count == 0
|
||||
text = Text()
|
||||
text.append("Ctrl-C ", style="bold")
|
||||
text.append("exit", style="dim")
|
||||
text.append(" · OTLP :4317", style="dim")
|
||||
text.append(" · pricing: DigitalOcean ", style="dim")
|
||||
if waiting:
|
||||
text.append("waiting for spans", style="yellow")
|
||||
text.append(
|
||||
" — set tracing.opentracing_grpc_endpoint=localhost:4317", style="dim"
|
||||
)
|
||||
else:
|
||||
text.append(f"receiving · {stats.count:,} call(s) buffered", style="green")
|
||||
return text
|
||||
|
||||
|
||||
def render(calls: list[LLMCall]) -> Align:
|
||||
last = calls[-1] if calls else None
|
||||
stats = aggregates(calls)
|
||||
rollups = model_rollups(calls)
|
||||
hits = route_hits(calls)
|
||||
|
||||
parts: list[object] = [_summary_panel(last, stats)]
|
||||
err = _last_error(calls)
|
||||
if err is not None:
|
||||
parts.append(_error_banner(err))
|
||||
if hits:
|
||||
split = Table.grid(padding=(0, 2))
|
||||
split.add_column(no_wrap=False)
|
||||
split.add_column(no_wrap=False)
|
||||
split.add_row(_model_rollup_table(rollups), _route_hit_table(hits))
|
||||
parts.append(split)
|
||||
else:
|
||||
parts.append(_model_rollup_table(rollups))
|
||||
parts.append(_recent_table(calls))
|
||||
parts.append(_footer(stats))
|
||||
# Cap overall width so wide terminals don't stretch the layout into a
|
||||
# mostly-whitespace gap between columns.
|
||||
return Align.left(Group(*parts), width=MAX_WIDTH)
|
||||
99
cli/planoai/obs_cmd.py
Normal file
99
cli/planoai/obs_cmd.py
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
"""`planoai obs` — live observability TUI."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
|
||||
import rich_click as click
|
||||
from rich.console import Console
|
||||
from rich.live import Live
|
||||
|
||||
from planoai.consts import PLANO_COLOR
|
||||
from planoai.obs.collector import (
|
||||
DEFAULT_CAPACITY,
|
||||
DEFAULT_GRPC_PORT,
|
||||
LLMCallStore,
|
||||
ObsCollector,
|
||||
)
|
||||
from planoai.obs.pricing import PricingCatalog
|
||||
from planoai.obs.render import render
|
||||
|
||||
|
||||
@click.command(name="obs", help="Live observability console for Plano LLM traffic.")
|
||||
@click.option(
|
||||
"--port",
|
||||
type=int,
|
||||
default=DEFAULT_GRPC_PORT,
|
||||
show_default=True,
|
||||
help="OTLP/gRPC port to listen on. Must match the brightstaff tracing endpoint.",
|
||||
)
|
||||
@click.option(
|
||||
"--host",
|
||||
type=str,
|
||||
default="0.0.0.0",
|
||||
show_default=True,
|
||||
help="Host to bind the OTLP listener.",
|
||||
)
|
||||
@click.option(
|
||||
"--capacity",
|
||||
type=int,
|
||||
default=DEFAULT_CAPACITY,
|
||||
show_default=True,
|
||||
help="Max LLM calls kept in memory; older calls evicted FIFO.",
|
||||
)
|
||||
@click.option(
|
||||
"--refresh-ms",
|
||||
type=int,
|
||||
default=500,
|
||||
show_default=True,
|
||||
help="TUI refresh interval.",
|
||||
)
|
||||
def obs(port: int, host: str, capacity: int, refresh_ms: int) -> None:
|
||||
console = Console()
|
||||
console.print(
|
||||
f"[bold {PLANO_COLOR}]planoai obs[/] — loading DO pricing catalog...",
|
||||
end="",
|
||||
)
|
||||
pricing = PricingCatalog.fetch()
|
||||
if len(pricing):
|
||||
sample = ", ".join(pricing.sample_models(3))
|
||||
console.print(
|
||||
f" [green]{len(pricing)} models loaded[/] [dim]({sample}, ...)[/]"
|
||||
)
|
||||
else:
|
||||
console.print(
|
||||
" [yellow]no pricing loaded[/] — "
|
||||
"[dim]cost column will be blank (DO catalog unreachable)[/]"
|
||||
)
|
||||
|
||||
store = LLMCallStore(capacity=capacity)
|
||||
collector = ObsCollector(store=store, pricing=pricing, host=host, port=port)
|
||||
try:
|
||||
collector.start()
|
||||
except OSError as exc:
|
||||
console.print(f"[red]{exc}[/]")
|
||||
raise SystemExit(1)
|
||||
|
||||
console.print(
|
||||
f"Listening for OTLP spans on [bold]{host}:{port}[/]. "
|
||||
"Ensure plano config has [cyan]tracing.opentracing_grpc_endpoint: http://localhost:4317[/] "
|
||||
"and [cyan]tracing.random_sampling: 100[/] (or run [bold]planoai up[/] "
|
||||
"with no config — it wires this automatically)."
|
||||
)
|
||||
console.print("Press [bold]Ctrl-C[/] to exit.\n")
|
||||
|
||||
refresh = max(0.05, refresh_ms / 1000.0)
|
||||
try:
|
||||
with Live(
|
||||
render(store.snapshot()),
|
||||
console=console,
|
||||
refresh_per_second=1.0 / refresh,
|
||||
screen=False,
|
||||
) as live:
|
||||
while True:
|
||||
time.sleep(refresh)
|
||||
live.update(render(store.snapshot()))
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[dim]obs stopped[/]")
|
||||
finally:
|
||||
collector.stop()
|
||||
|
|
@ -61,7 +61,7 @@ def configure_rich_click(plano_color: str) -> None:
|
|||
},
|
||||
{
|
||||
"name": "Observability",
|
||||
"commands": ["trace"],
|
||||
"commands": ["trace", "obs"],
|
||||
},
|
||||
{
|
||||
"name": "Utilities",
|
||||
|
|
|
|||
|
|
@ -91,7 +91,12 @@ def convert_legacy_listeners(
|
|||
"type": "model",
|
||||
"port": 12000,
|
||||
"address": "0.0.0.0",
|
||||
"timeout": "30s",
|
||||
# LLM streaming responses routinely exceed 30s (extended thinking,
|
||||
# long tool reasoning, large completions). Match the 300s ceiling
|
||||
# used by the direct upstream-provider routes so Envoy doesn't
|
||||
# abort streams with UT mid-response. Users can override via their
|
||||
# plano_config.yaml `listeners.timeout` field.
|
||||
"timeout": "300s",
|
||||
"model_providers": model_providers or [],
|
||||
}
|
||||
|
||||
|
|
@ -100,7 +105,7 @@ def convert_legacy_listeners(
|
|||
"type": "prompt",
|
||||
"port": 10000,
|
||||
"address": "0.0.0.0",
|
||||
"timeout": "30s",
|
||||
"timeout": "300s",
|
||||
}
|
||||
|
||||
# Handle None case
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
[project]
|
||||
name = "planoai"
|
||||
version = "0.4.16"
|
||||
version = "0.4.21"
|
||||
description = "Python-based CLI tool to manage Plano."
|
||||
authors = [{name = "Katanemo Labs, Inc."}]
|
||||
readme = "README.md"
|
||||
|
|
@ -13,7 +13,7 @@ dependencies = [
|
|||
"opentelemetry-proto>=1.20.0",
|
||||
"questionary>=2.1.1,<3.0.0",
|
||||
"pyyaml>=6.0.2,<7.0.0",
|
||||
"requests>=2.31.0,<3.0.0",
|
||||
"requests>=2.33.0,<3.0.0",
|
||||
"urllib3>=2.6.3",
|
||||
"rich>=14.2.0",
|
||||
"rich-click>=1.9.5",
|
||||
|
|
|
|||
|
|
@ -1,7 +1,11 @@
|
|||
import json
|
||||
import pytest
|
||||
import yaml
|
||||
from unittest import mock
|
||||
from planoai.config_generator import validate_and_render_schema
|
||||
from planoai.config_generator import (
|
||||
validate_and_render_schema,
|
||||
migrate_inline_routing_preferences,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
|
|
@ -253,38 +257,72 @@ llm_providers:
|
|||
base_url: "http://custom.com/api/v2"
|
||||
provider_interface: openai
|
||||
|
||||
""",
|
||||
},
|
||||
{
|
||||
"id": "vercel_is_supported_provider",
|
||||
"expected_error": None,
|
||||
"plano_config": """
|
||||
version: v0.4.0
|
||||
|
||||
listeners:
|
||||
- name: llm
|
||||
type: model
|
||||
port: 12000
|
||||
|
||||
model_providers:
|
||||
- model: vercel/*
|
||||
base_url: https://ai-gateway.vercel.sh/v1
|
||||
passthrough_auth: true
|
||||
|
||||
""",
|
||||
},
|
||||
{
|
||||
"id": "openrouter_is_supported_provider",
|
||||
"expected_error": None,
|
||||
"plano_config": """
|
||||
version: v0.4.0
|
||||
|
||||
listeners:
|
||||
- name: llm
|
||||
type: model
|
||||
port: 12000
|
||||
|
||||
model_providers:
|
||||
- model: openrouter/*
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
passthrough_auth: true
|
||||
|
||||
""",
|
||||
},
|
||||
{
|
||||
"id": "duplicate_routeing_preference_name",
|
||||
"expected_error": "Duplicate routing preference name",
|
||||
"plano_config": """
|
||||
version: v0.1.0
|
||||
version: v0.4.0
|
||||
|
||||
listeners:
|
||||
egress_traffic:
|
||||
address: 0.0.0.0
|
||||
- name: llm
|
||||
type: model
|
||||
port: 12000
|
||||
message_format: openai
|
||||
timeout: 30s
|
||||
|
||||
llm_providers:
|
||||
|
||||
model_providers:
|
||||
- model: openai/gpt-4o-mini
|
||||
access_key: $OPENAI_API_KEY
|
||||
default: true
|
||||
|
||||
- model: openai/gpt-4o
|
||||
access_key: $OPENAI_API_KEY
|
||||
routing_preferences:
|
||||
- name: code understanding
|
||||
description: understand and explain existing code snippets, functions, or libraries
|
||||
|
||||
- model: openai/gpt-4.1
|
||||
access_key: $OPENAI_API_KEY
|
||||
routing_preferences:
|
||||
- name: code understanding
|
||||
description: generating new code snippets, functions, or boilerplate based on user prompts or requirements
|
||||
routing_preferences:
|
||||
- name: code understanding
|
||||
description: understand and explain existing code snippets, functions, or libraries
|
||||
models:
|
||||
- openai/gpt-4o
|
||||
- name: code understanding
|
||||
description: generating new code snippets, functions, or boilerplate based on user prompts or requirements
|
||||
models:
|
||||
- openai/gpt-4o-mini
|
||||
|
||||
tracing:
|
||||
random_sampling: 100
|
||||
|
|
@ -465,3 +503,238 @@ def test_convert_legacy_llm_providers_no_prompt_gateway():
|
|||
"port": 12000,
|
||||
"timeout": "30s",
|
||||
}
|
||||
|
||||
|
||||
def test_inline_routing_preferences_migrated_to_top_level():
|
||||
plano_config = """
|
||||
version: v0.3.0
|
||||
|
||||
listeners:
|
||||
- type: model
|
||||
name: model_listener
|
||||
port: 12000
|
||||
|
||||
model_providers:
|
||||
- model: openai/gpt-4o-mini
|
||||
access_key: $OPENAI_API_KEY
|
||||
default: true
|
||||
|
||||
- model: openai/gpt-4o
|
||||
access_key: $OPENAI_API_KEY
|
||||
routing_preferences:
|
||||
- name: code understanding
|
||||
description: understand and explain existing code snippets, functions, or libraries
|
||||
|
||||
- model: anthropic/claude-sonnet-4-20250514
|
||||
access_key: $ANTHROPIC_API_KEY
|
||||
routing_preferences:
|
||||
- name: code generation
|
||||
description: generating new code snippets, functions, or boilerplate based on user prompts or requirements
|
||||
"""
|
||||
config_yaml = yaml.safe_load(plano_config)
|
||||
migrate_inline_routing_preferences(config_yaml)
|
||||
|
||||
assert config_yaml["version"] == "v0.4.0"
|
||||
for provider in config_yaml["model_providers"]:
|
||||
assert "routing_preferences" not in provider
|
||||
|
||||
top_level = config_yaml["routing_preferences"]
|
||||
by_name = {entry["name"]: entry for entry in top_level}
|
||||
assert set(by_name) == {"code understanding", "code generation"}
|
||||
assert by_name["code understanding"]["models"] == ["openai/gpt-4o"]
|
||||
assert by_name["code generation"]["models"] == [
|
||||
"anthropic/claude-sonnet-4-20250514"
|
||||
]
|
||||
assert (
|
||||
by_name["code understanding"]["description"]
|
||||
== "understand and explain existing code snippets, functions, or libraries"
|
||||
)
|
||||
|
||||
|
||||
def test_inline_same_name_across_providers_merges_models():
|
||||
plano_config = """
|
||||
version: v0.3.0
|
||||
|
||||
listeners:
|
||||
- type: model
|
||||
name: model_listener
|
||||
port: 12000
|
||||
|
||||
model_providers:
|
||||
- model: openai/gpt-4o
|
||||
access_key: $OPENAI_API_KEY
|
||||
routing_preferences:
|
||||
- name: code generation
|
||||
description: generating new code snippets, functions, or boilerplate based on user prompts or requirements
|
||||
|
||||
- model: anthropic/claude-sonnet-4-20250514
|
||||
access_key: $ANTHROPIC_API_KEY
|
||||
routing_preferences:
|
||||
- name: code generation
|
||||
description: generating new code snippets, functions, or boilerplate based on user prompts or requirements
|
||||
"""
|
||||
config_yaml = yaml.safe_load(plano_config)
|
||||
migrate_inline_routing_preferences(config_yaml)
|
||||
|
||||
top_level = config_yaml["routing_preferences"]
|
||||
assert len(top_level) == 1
|
||||
entry = top_level[0]
|
||||
assert entry["name"] == "code generation"
|
||||
assert entry["models"] == [
|
||||
"openai/gpt-4o",
|
||||
"anthropic/claude-sonnet-4-20250514",
|
||||
]
|
||||
assert config_yaml["version"] == "v0.4.0"
|
||||
|
||||
|
||||
def test_existing_top_level_routing_preferences_preserved():
|
||||
plano_config = """
|
||||
version: v0.4.0
|
||||
|
||||
listeners:
|
||||
- type: model
|
||||
name: model_listener
|
||||
port: 12000
|
||||
|
||||
model_providers:
|
||||
- model: openai/gpt-4o
|
||||
access_key: $OPENAI_API_KEY
|
||||
- model: anthropic/claude-sonnet-4-20250514
|
||||
access_key: $ANTHROPIC_API_KEY
|
||||
|
||||
routing_preferences:
|
||||
- name: code generation
|
||||
description: generating new code snippets or boilerplate
|
||||
models:
|
||||
- openai/gpt-4o
|
||||
- anthropic/claude-sonnet-4-20250514
|
||||
"""
|
||||
config_yaml = yaml.safe_load(plano_config)
|
||||
before = yaml.safe_dump(config_yaml, sort_keys=True)
|
||||
migrate_inline_routing_preferences(config_yaml)
|
||||
after = yaml.safe_dump(config_yaml, sort_keys=True)
|
||||
|
||||
assert before == after
|
||||
|
||||
|
||||
def test_existing_top_level_wins_over_inline_migration():
|
||||
plano_config = """
|
||||
version: v0.3.0
|
||||
|
||||
listeners:
|
||||
- type: model
|
||||
name: model_listener
|
||||
port: 12000
|
||||
|
||||
model_providers:
|
||||
- model: openai/gpt-4o
|
||||
access_key: $OPENAI_API_KEY
|
||||
routing_preferences:
|
||||
- name: code generation
|
||||
description: inline description should lose
|
||||
|
||||
routing_preferences:
|
||||
- name: code generation
|
||||
description: user-defined top-level description wins
|
||||
models:
|
||||
- openai/gpt-4o
|
||||
"""
|
||||
config_yaml = yaml.safe_load(plano_config)
|
||||
migrate_inline_routing_preferences(config_yaml)
|
||||
|
||||
top_level = config_yaml["routing_preferences"]
|
||||
assert len(top_level) == 1
|
||||
entry = top_level[0]
|
||||
assert entry["description"] == "user-defined top-level description wins"
|
||||
assert entry["models"] == ["openai/gpt-4o"]
|
||||
|
||||
|
||||
def test_wildcard_with_inline_routing_preferences_errors():
|
||||
plano_config = """
|
||||
version: v0.3.0
|
||||
|
||||
listeners:
|
||||
- type: model
|
||||
name: model_listener
|
||||
port: 12000
|
||||
|
||||
model_providers:
|
||||
- model: openrouter/*
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
passthrough_auth: true
|
||||
routing_preferences:
|
||||
- name: code generation
|
||||
description: generating code
|
||||
"""
|
||||
config_yaml = yaml.safe_load(plano_config)
|
||||
with pytest.raises(Exception) as excinfo:
|
||||
migrate_inline_routing_preferences(config_yaml)
|
||||
assert "wildcard" in str(excinfo.value).lower()
|
||||
|
||||
|
||||
def test_migration_bumps_version_even_without_inline_preferences():
|
||||
plano_config = """
|
||||
version: v0.3.0
|
||||
|
||||
listeners:
|
||||
- type: model
|
||||
name: model_listener
|
||||
port: 12000
|
||||
|
||||
model_providers:
|
||||
- model: openai/gpt-4o
|
||||
access_key: $OPENAI_API_KEY
|
||||
"""
|
||||
config_yaml = yaml.safe_load(plano_config)
|
||||
migrate_inline_routing_preferences(config_yaml)
|
||||
|
||||
assert "routing_preferences" not in config_yaml
|
||||
assert config_yaml["version"] == "v0.4.0"
|
||||
|
||||
|
||||
def test_migration_is_noop_on_v040_config_with_stray_inline_preferences():
|
||||
# v0.4.0 configs are assumed to be on the canonical top-level shape.
|
||||
# The migration intentionally does not rescue stray inline preferences
|
||||
# at v0.4.0+ so that the deprecation boundary is a clean version gate.
|
||||
plano_config = """
|
||||
version: v0.4.0
|
||||
|
||||
listeners:
|
||||
- type: model
|
||||
name: model_listener
|
||||
port: 12000
|
||||
|
||||
model_providers:
|
||||
- model: openai/gpt-4o
|
||||
access_key: $OPENAI_API_KEY
|
||||
routing_preferences:
|
||||
- name: code generation
|
||||
description: generating new code
|
||||
"""
|
||||
config_yaml = yaml.safe_load(plano_config)
|
||||
migrate_inline_routing_preferences(config_yaml)
|
||||
|
||||
assert config_yaml["version"] == "v0.4.0"
|
||||
assert "routing_preferences" not in config_yaml
|
||||
assert config_yaml["model_providers"][0]["routing_preferences"] == [
|
||||
{"name": "code generation", "description": "generating new code"}
|
||||
]
|
||||
|
||||
|
||||
def test_migration_does_not_downgrade_newer_versions():
|
||||
plano_config = """
|
||||
version: v0.5.0
|
||||
|
||||
listeners:
|
||||
- type: model
|
||||
name: model_listener
|
||||
port: 12000
|
||||
|
||||
model_providers:
|
||||
- model: openai/gpt-4o
|
||||
access_key: $OPENAI_API_KEY
|
||||
"""
|
||||
config_yaml = yaml.safe_load(plano_config)
|
||||
migrate_inline_routing_preferences(config_yaml)
|
||||
|
||||
assert config_yaml["version"] == "v0.5.0"
|
||||
|
|
|
|||
111
cli/test/test_defaults.py
Normal file
111
cli/test/test_defaults.py
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
from pathlib import Path
|
||||
|
||||
import jsonschema
|
||||
import yaml
|
||||
|
||||
from planoai.defaults import (
|
||||
PROVIDER_DEFAULTS,
|
||||
detect_providers,
|
||||
synthesize_default_config,
|
||||
)
|
||||
|
||||
_SCHEMA_PATH = Path(__file__).parents[2] / "config" / "plano_config_schema.yaml"
|
||||
|
||||
|
||||
def _schema() -> dict:
|
||||
return yaml.safe_load(_SCHEMA_PATH.read_text())
|
||||
|
||||
|
||||
def test_zero_env_vars_produces_pure_passthrough():
|
||||
cfg = synthesize_default_config(env={})
|
||||
assert cfg["version"] == "v0.4.0"
|
||||
assert cfg["listeners"][0]["port"] == 12000
|
||||
for provider in cfg["model_providers"]:
|
||||
assert provider.get("passthrough_auth") is True
|
||||
assert "access_key" not in provider
|
||||
# No provider should be marked default in pure pass-through mode.
|
||||
assert provider.get("default") is not True
|
||||
# All known providers should be listed.
|
||||
names = {p["name"] for p in cfg["model_providers"]}
|
||||
assert "digitalocean" in names
|
||||
assert "vercel" in names
|
||||
assert "openrouter" in names
|
||||
assert "openai" in names
|
||||
assert "anthropic" in names
|
||||
|
||||
|
||||
def test_env_keys_promote_providers_to_env_keyed():
|
||||
cfg = synthesize_default_config(
|
||||
env={"OPENAI_API_KEY": "sk-1", "DO_API_KEY": "do-1"}
|
||||
)
|
||||
by_name = {p["name"]: p for p in cfg["model_providers"]}
|
||||
assert by_name["openai"].get("access_key") == "$OPENAI_API_KEY"
|
||||
assert by_name["openai"].get("passthrough_auth") is None
|
||||
assert by_name["digitalocean"].get("access_key") == "$DO_API_KEY"
|
||||
# Unset env keys remain pass-through.
|
||||
assert by_name["anthropic"].get("passthrough_auth") is True
|
||||
|
||||
|
||||
def test_no_default_is_synthesized():
|
||||
# Bare model names resolve via brightstaff's wildcard expansion registering
|
||||
# bare keys, so the synthesizer intentionally never sets `default: true`.
|
||||
cfg = synthesize_default_config(
|
||||
env={"OPENAI_API_KEY": "sk-1", "ANTHROPIC_API_KEY": "a-1"}
|
||||
)
|
||||
assert not any(p.get("default") is True for p in cfg["model_providers"])
|
||||
|
||||
|
||||
def test_listener_port_is_configurable():
|
||||
cfg = synthesize_default_config(env={}, listener_port=11000)
|
||||
assert cfg["listeners"][0]["port"] == 11000
|
||||
|
||||
|
||||
def test_detection_summary_strings():
|
||||
det = detect_providers(env={"OPENAI_API_KEY": "sk", "DO_API_KEY": "d"})
|
||||
summary = det.summary
|
||||
assert "env-keyed" in summary and "openai" in summary and "digitalocean" in summary
|
||||
assert "pass-through" in summary
|
||||
|
||||
|
||||
def test_tracing_block_points_at_local_console():
|
||||
cfg = synthesize_default_config(env={})
|
||||
tracing = cfg["tracing"]
|
||||
assert tracing["opentracing_grpc_endpoint"] == "http://localhost:4317"
|
||||
# random_sampling is a percentage in the plano config — 100 = every span.
|
||||
assert tracing["random_sampling"] == 100
|
||||
|
||||
|
||||
def test_synthesized_config_validates_against_schema():
|
||||
cfg = synthesize_default_config(env={"OPENAI_API_KEY": "sk"})
|
||||
jsonschema.validate(cfg, _schema())
|
||||
|
||||
|
||||
def test_provider_defaults_digitalocean_is_configured():
|
||||
by_name = {p.name: p for p in PROVIDER_DEFAULTS}
|
||||
assert "digitalocean" in by_name
|
||||
assert by_name["digitalocean"].env_var == "DO_API_KEY"
|
||||
assert by_name["digitalocean"].base_url == "https://inference.do-ai.run/v1"
|
||||
assert by_name["digitalocean"].model_pattern == "digitalocean/*"
|
||||
|
||||
|
||||
def test_provider_defaults_vercel_is_configured():
|
||||
by_name = {p.name: p for p in PROVIDER_DEFAULTS}
|
||||
assert "vercel" in by_name
|
||||
assert by_name["vercel"].env_var == "AI_GATEWAY_API_KEY"
|
||||
assert by_name["vercel"].base_url == "https://ai-gateway.vercel.sh/v1"
|
||||
assert by_name["vercel"].model_pattern == "vercel/*"
|
||||
|
||||
|
||||
def test_provider_defaults_openrouter_is_configured():
|
||||
by_name = {p.name: p for p in PROVIDER_DEFAULTS}
|
||||
assert "openrouter" in by_name
|
||||
assert by_name["openrouter"].env_var == "OPENROUTER_API_KEY"
|
||||
assert by_name["openrouter"].base_url == "https://openrouter.ai/api/v1"
|
||||
assert by_name["openrouter"].model_pattern == "openrouter/*"
|
||||
|
||||
|
||||
def test_openrouter_env_key_promotes_to_env_keyed():
|
||||
cfg = synthesize_default_config(env={"OPENROUTER_API_KEY": "or-1"})
|
||||
by_name = {p["name"]: p for p in cfg["model_providers"]}
|
||||
assert by_name["openrouter"].get("access_key") == "$OPENROUTER_API_KEY"
|
||||
assert by_name["openrouter"].get("passthrough_auth") is None
|
||||
145
cli/test/test_obs_collector.py
Normal file
145
cli/test/test_obs_collector.py
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
import time
|
||||
from datetime import datetime, timezone
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from planoai.obs.collector import LLMCall, LLMCallStore, span_to_llm_call
|
||||
|
||||
|
||||
def _mk_attr(key: str, value):
|
||||
v = MagicMock()
|
||||
if isinstance(value, bool):
|
||||
v.WhichOneof.return_value = "bool_value"
|
||||
v.bool_value = value
|
||||
elif isinstance(value, int):
|
||||
v.WhichOneof.return_value = "int_value"
|
||||
v.int_value = value
|
||||
elif isinstance(value, float):
|
||||
v.WhichOneof.return_value = "double_value"
|
||||
v.double_value = value
|
||||
else:
|
||||
v.WhichOneof.return_value = "string_value"
|
||||
v.string_value = str(value)
|
||||
kv = MagicMock()
|
||||
kv.key = key
|
||||
kv.value = v
|
||||
return kv
|
||||
|
||||
|
||||
def _mk_span(
|
||||
attrs: dict, start_ns: int | None = None, span_id_hex: str = "ab"
|
||||
) -> MagicMock:
|
||||
span = MagicMock()
|
||||
span.attributes = [_mk_attr(k, v) for k, v in attrs.items()]
|
||||
span.start_time_unix_nano = start_ns or int(time.time() * 1_000_000_000)
|
||||
span.span_id.hex.return_value = span_id_hex
|
||||
return span
|
||||
|
||||
|
||||
def test_span_without_llm_model_is_ignored():
|
||||
span = _mk_span({"http.method": "POST"})
|
||||
assert span_to_llm_call(span, "plano(llm)") is None
|
||||
|
||||
|
||||
def test_span_with_full_llm_attrs_produces_call():
|
||||
span = _mk_span(
|
||||
{
|
||||
"llm.model": "openai-gpt-5.4",
|
||||
"model.requested": "router:software-engineering",
|
||||
"plano.session_id": "sess-abc",
|
||||
"plano.route.name": "software-engineering",
|
||||
"llm.is_streaming": False,
|
||||
"llm.duration_ms": 1234,
|
||||
"llm.time_to_first_token": 210,
|
||||
"llm.usage.prompt_tokens": 100,
|
||||
"llm.usage.completion_tokens": 50,
|
||||
"llm.usage.total_tokens": 150,
|
||||
"llm.usage.cached_input_tokens": 30,
|
||||
"llm.usage.cache_creation_tokens": 5,
|
||||
"llm.usage.reasoning_tokens": 200,
|
||||
"http.status_code": 200,
|
||||
"request_id": "req-42",
|
||||
}
|
||||
)
|
||||
call = span_to_llm_call(span, "plano(llm)")
|
||||
assert call is not None
|
||||
assert call.request_id == "req-42"
|
||||
assert call.model == "openai-gpt-5.4"
|
||||
assert call.request_model == "router:software-engineering"
|
||||
assert call.session_id == "sess-abc"
|
||||
assert call.route_name == "software-engineering"
|
||||
assert call.is_streaming is False
|
||||
assert call.duration_ms == 1234.0
|
||||
assert call.ttft_ms == 210.0
|
||||
assert call.prompt_tokens == 100
|
||||
assert call.completion_tokens == 50
|
||||
assert call.total_tokens == 150
|
||||
assert call.cached_input_tokens == 30
|
||||
assert call.cache_creation_tokens == 5
|
||||
assert call.reasoning_tokens == 200
|
||||
assert call.status_code == 200
|
||||
|
||||
|
||||
def test_pricing_lookup_attaches_cost():
|
||||
class StubPricing:
|
||||
def cost_for_call(self, call):
|
||||
# Simple: 2 * prompt + 3 * completion, in cents
|
||||
return 0.02 * (call.prompt_tokens or 0) + 0.03 * (
|
||||
call.completion_tokens or 0
|
||||
)
|
||||
|
||||
span = _mk_span(
|
||||
{
|
||||
"llm.model": "do/openai-gpt-5.4",
|
||||
"llm.usage.prompt_tokens": 10,
|
||||
"llm.usage.completion_tokens": 2,
|
||||
}
|
||||
)
|
||||
call = span_to_llm_call(span, "plano(llm)", pricing=StubPricing())
|
||||
assert call is not None
|
||||
assert call.cost_usd == pytest.approx(0.26)
|
||||
|
||||
|
||||
def test_tpt_and_tokens_per_sec_derived():
|
||||
call = LLMCall(
|
||||
request_id="x",
|
||||
timestamp=datetime.now(tz=timezone.utc),
|
||||
model="m",
|
||||
duration_ms=1000,
|
||||
ttft_ms=200,
|
||||
completion_tokens=80,
|
||||
)
|
||||
# (1000 - 200) / 80 = 10ms per token => 100 tokens/sec
|
||||
assert call.tpt_ms == 10.0
|
||||
assert call.tokens_per_sec == 100.0
|
||||
|
||||
|
||||
def test_tpt_returns_none_when_no_completion_tokens():
|
||||
call = LLMCall(
|
||||
request_id="x",
|
||||
timestamp=datetime.now(tz=timezone.utc),
|
||||
model="m",
|
||||
duration_ms=1000,
|
||||
ttft_ms=200,
|
||||
completion_tokens=0,
|
||||
)
|
||||
assert call.tpt_ms is None
|
||||
assert call.tokens_per_sec is None
|
||||
|
||||
|
||||
def test_store_evicts_fifo_at_capacity():
|
||||
store = LLMCallStore(capacity=3)
|
||||
now = datetime.now(tz=timezone.utc)
|
||||
for i in range(5):
|
||||
store.add(
|
||||
LLMCall(
|
||||
request_id=f"r{i}",
|
||||
timestamp=now,
|
||||
model="m",
|
||||
)
|
||||
)
|
||||
snap = store.snapshot()
|
||||
assert len(snap) == 3
|
||||
assert [c.request_id for c in snap] == ["r2", "r3", "r4"]
|
||||
146
cli/test/test_obs_pricing.py
Normal file
146
cli/test/test_obs_pricing.py
Normal file
|
|
@ -0,0 +1,146 @@
|
|||
from datetime import datetime, timezone
|
||||
|
||||
from planoai.obs.collector import LLMCall
|
||||
from planoai.obs.pricing import ModelPrice, PricingCatalog
|
||||
|
||||
|
||||
def _call(model: str, prompt: int, completion: int, cached: int = 0) -> LLMCall:
|
||||
return LLMCall(
|
||||
request_id="r",
|
||||
timestamp=datetime.now(tz=timezone.utc),
|
||||
model=model,
|
||||
prompt_tokens=prompt,
|
||||
completion_tokens=completion,
|
||||
cached_input_tokens=cached,
|
||||
)
|
||||
|
||||
|
||||
def test_lookup_matches_bare_and_prefixed():
|
||||
prices = {
|
||||
"openai-gpt-5.4": ModelPrice(
|
||||
input_per_token_usd=0.000001, output_per_token_usd=0.000002
|
||||
)
|
||||
}
|
||||
catalog = PricingCatalog(prices)
|
||||
assert catalog.price_for("openai-gpt-5.4") is not None
|
||||
# do/openai-gpt-5.4 should resolve after stripping the provider prefix.
|
||||
assert catalog.price_for("do/openai-gpt-5.4") is not None
|
||||
assert catalog.price_for("unknown-model") is None
|
||||
|
||||
|
||||
def test_cost_computation_without_cache():
|
||||
prices = {
|
||||
"m": ModelPrice(input_per_token_usd=0.000001, output_per_token_usd=0.000002)
|
||||
}
|
||||
cost = PricingCatalog(prices).cost_for_call(_call("m", 1000, 500))
|
||||
assert cost == 0.002 # 1000 * 1e-6 + 500 * 2e-6
|
||||
|
||||
|
||||
def test_cost_computation_with_cached_discount():
|
||||
prices = {
|
||||
"m": ModelPrice(
|
||||
input_per_token_usd=0.000001,
|
||||
output_per_token_usd=0.000002,
|
||||
cached_input_per_token_usd=0.0000001,
|
||||
)
|
||||
}
|
||||
# 800 fresh @ 1e-6 = 8e-4; 200 cached @ 1e-7 = 2e-5; 500 out @ 2e-6 = 1e-3
|
||||
cost = PricingCatalog(prices).cost_for_call(_call("m", 1000, 500, cached=200))
|
||||
assert cost == round(0.0008 + 0.00002 + 0.001, 6)
|
||||
|
||||
|
||||
def test_empty_catalog_returns_none():
|
||||
assert PricingCatalog().cost_for_call(_call("m", 100, 50)) is None
|
||||
|
||||
|
||||
def test_parse_do_catalog_treats_small_values_as_per_token():
|
||||
"""DO's real catalog uses per-token values under the `_per_million` key
|
||||
(e.g. 5E-8 for GPT-oss-20b). We treat values < 1 as already per-token."""
|
||||
from planoai.obs.pricing import _parse_do_pricing
|
||||
|
||||
sample = {
|
||||
"data": [
|
||||
{
|
||||
"model_id": "openai-gpt-oss-20b",
|
||||
"pricing": {
|
||||
"input_price_per_million": 5e-8,
|
||||
"output_price_per_million": 4.5e-7,
|
||||
},
|
||||
},
|
||||
{
|
||||
"model_id": "openai-gpt-oss-120b",
|
||||
"pricing": {
|
||||
"input_price_per_million": 1e-7,
|
||||
"output_price_per_million": 7e-7,
|
||||
},
|
||||
},
|
||||
]
|
||||
}
|
||||
prices = _parse_do_pricing(sample)
|
||||
# Values < 1 are assumed to already be per-token — no extra division.
|
||||
assert prices["openai-gpt-oss-20b"].input_per_token_usd == 5e-8
|
||||
assert prices["openai-gpt-oss-20b"].output_per_token_usd == 4.5e-7
|
||||
assert prices["openai-gpt-oss-120b"].input_per_token_usd == 1e-7
|
||||
|
||||
|
||||
def test_anthropic_aliases_match_plano_emitted_names():
|
||||
"""DO publishes 'anthropic-claude-opus-4.7' and 'anthropic-claude-haiku-4.5';
|
||||
Plano emits 'claude-opus-4-7' and 'claude-haiku-4-5-20251001'. Aliases
|
||||
registered at parse time should bridge the gap."""
|
||||
from planoai.obs.pricing import _parse_do_pricing
|
||||
|
||||
sample = {
|
||||
"data": [
|
||||
{
|
||||
"model_id": "anthropic-claude-opus-4.7",
|
||||
"pricing": {
|
||||
"input_price_per_million": 15.0,
|
||||
"output_price_per_million": 75.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
"model_id": "anthropic-claude-haiku-4.5",
|
||||
"pricing": {
|
||||
"input_price_per_million": 1.0,
|
||||
"output_price_per_million": 5.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
"model_id": "anthropic-claude-4.6-sonnet",
|
||||
"pricing": {
|
||||
"input_price_per_million": 3.0,
|
||||
"output_price_per_million": 15.0,
|
||||
},
|
||||
},
|
||||
]
|
||||
}
|
||||
catalog = PricingCatalog(_parse_do_pricing(sample))
|
||||
# Family-last shapes Plano emits.
|
||||
assert catalog.price_for("claude-opus-4-7") is not None
|
||||
assert catalog.price_for("claude-haiku-4-5") is not None
|
||||
# Date-suffixed name (Anthropic API style).
|
||||
assert catalog.price_for("claude-haiku-4-5-20251001") is not None
|
||||
# Word-order swap: DO has 'claude-4.6-sonnet', Plano emits 'claude-sonnet-4-6'.
|
||||
assert catalog.price_for("claude-sonnet-4-6") is not None
|
||||
# Original DO ids still resolve.
|
||||
assert catalog.price_for("anthropic-claude-opus-4.7") is not None
|
||||
|
||||
|
||||
def test_parse_do_catalog_divides_large_values_as_per_million():
|
||||
"""A provider that genuinely reports $5-per-million in that field gets divided."""
|
||||
from planoai.obs.pricing import _parse_do_pricing
|
||||
|
||||
sample = {
|
||||
"data": [
|
||||
{
|
||||
"model_id": "mystery-model",
|
||||
"pricing": {
|
||||
"input_price_per_million": 5.0, # > 1 → treated as per-million
|
||||
"output_price_per_million": 15.0,
|
||||
},
|
||||
},
|
||||
]
|
||||
}
|
||||
prices = _parse_do_pricing(sample)
|
||||
assert prices["mystery-model"].input_per_token_usd == 5.0 / 1_000_000
|
||||
assert prices["mystery-model"].output_per_token_usd == 15.0 / 1_000_000
|
||||
106
cli/test/test_obs_render.py
Normal file
106
cli/test/test_obs_render.py
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
from planoai.obs.collector import LLMCall
|
||||
from planoai.obs.render import aggregates, model_rollups, route_hits
|
||||
|
||||
|
||||
def _call(
|
||||
model: str,
|
||||
ts: datetime,
|
||||
prompt=0,
|
||||
completion=0,
|
||||
cost=None,
|
||||
route=None,
|
||||
session=None,
|
||||
cache_read=0,
|
||||
cache_write=0,
|
||||
):
|
||||
return LLMCall(
|
||||
request_id="r",
|
||||
timestamp=ts,
|
||||
model=model,
|
||||
prompt_tokens=prompt,
|
||||
completion_tokens=completion,
|
||||
cached_input_tokens=cache_read,
|
||||
cache_creation_tokens=cache_write,
|
||||
cost_usd=cost,
|
||||
route_name=route,
|
||||
session_id=session,
|
||||
)
|
||||
|
||||
|
||||
def test_aggregates_sum_and_session_counts():
|
||||
now = datetime.now(tz=timezone.utc).astimezone()
|
||||
calls = [
|
||||
_call(
|
||||
"m1",
|
||||
now - timedelta(seconds=50),
|
||||
prompt=10,
|
||||
completion=5,
|
||||
cost=0.001,
|
||||
session="s1",
|
||||
),
|
||||
_call(
|
||||
"m2",
|
||||
now - timedelta(seconds=40),
|
||||
prompt=20,
|
||||
completion=10,
|
||||
cost=0.002,
|
||||
session="s1",
|
||||
),
|
||||
_call(
|
||||
"m1",
|
||||
now - timedelta(seconds=30),
|
||||
prompt=30,
|
||||
completion=15,
|
||||
cost=0.003,
|
||||
session="s2",
|
||||
),
|
||||
]
|
||||
stats = aggregates(calls)
|
||||
assert stats.count == 3
|
||||
assert stats.total_cost_usd == 0.006
|
||||
assert stats.total_input_tokens == 60
|
||||
assert stats.total_output_tokens == 30
|
||||
assert stats.distinct_sessions == 2
|
||||
assert stats.current_session == "s2"
|
||||
|
||||
|
||||
def test_rollups_split_by_model_and_cache():
|
||||
now = datetime.now(tz=timezone.utc).astimezone()
|
||||
calls = [
|
||||
_call(
|
||||
"m1", now, prompt=10, completion=5, cost=0.001, cache_write=3, cache_read=7
|
||||
),
|
||||
_call("m1", now, prompt=20, completion=10, cost=0.002, cache_read=1),
|
||||
_call("m2", now, prompt=30, completion=15, cost=0.004),
|
||||
]
|
||||
rollups = model_rollups(calls)
|
||||
by_model = {r.model: r for r in rollups}
|
||||
assert by_model["m1"].requests == 2
|
||||
assert by_model["m1"].input_tokens == 30
|
||||
assert by_model["m1"].cache_write == 3
|
||||
assert by_model["m1"].cache_read == 8
|
||||
assert by_model["m2"].input_tokens == 30
|
||||
|
||||
|
||||
def test_route_hits_only_for_routed_calls():
|
||||
now = datetime.now(tz=timezone.utc).astimezone()
|
||||
calls = [
|
||||
_call("m", now, route="code"),
|
||||
_call("m", now, route="code"),
|
||||
_call("m", now, route="summarization"),
|
||||
_call("m", now), # no route
|
||||
]
|
||||
hits = route_hits(calls)
|
||||
# Only calls with route names are counted.
|
||||
assert sum(h.hits for h in hits) == 3
|
||||
hits_by_name = {h.route: h for h in hits}
|
||||
assert hits_by_name["code"].hits == 2
|
||||
assert hits_by_name["summarization"].hits == 1
|
||||
|
||||
|
||||
def test_route_hits_empty_when_no_routes():
|
||||
now = datetime.now(tz=timezone.utc).astimezone()
|
||||
calls = [_call("m", now), _call("m", now)]
|
||||
assert route_hits(calls) == []
|
||||
16
cli/uv.lock
generated
16
cli/uv.lock
generated
|
|
@ -337,7 +337,7 @@ wheels = [
|
|||
|
||||
[[package]]
|
||||
name = "planoai"
|
||||
version = "0.4.16"
|
||||
version = "0.4.21"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "click" },
|
||||
|
|
@ -373,7 +373,7 @@ requires-dist = [
|
|||
{ name = "pytest", marker = "extra == 'dev'", specifier = ">=8.4.1,<9.0.0" },
|
||||
{ name = "pyyaml", specifier = ">=6.0.2,<7.0.0" },
|
||||
{ name = "questionary", specifier = ">=2.1.1,<3.0.0" },
|
||||
{ name = "requests", specifier = ">=2.31.0,<3.0.0" },
|
||||
{ name = "requests", specifier = ">=2.33.0,<3.0.0" },
|
||||
{ name = "rich", specifier = ">=14.2.0" },
|
||||
{ name = "rich-click", specifier = ">=1.9.5" },
|
||||
{ name = "urllib3", specifier = ">=2.6.3" },
|
||||
|
|
@ -421,11 +421,11 @@ wheels = [
|
|||
|
||||
[[package]]
|
||||
name = "pygments"
|
||||
version = "2.19.2"
|
||||
version = "2.20.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c3/b2/bc9c9196916376152d655522fdcebac55e66de6603a76a02bca1b6414f6c/pygments-2.20.0.tar.gz", hash = "sha256:6757cd03768053ff99f3039c1a36d6c0aa0b263438fcab17520b30a303a82b5f", size = 4955991, upload-time = "2026-03-29T13:29:33.898Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/7e/a72dd26f3b0f4f2bf1dd8923c85f7ceb43172af56d63c7383eb62b332364/pygments-2.20.0-py3-none-any.whl", hash = "sha256:81a9e26dd42fd28a23a2d169d86d7ac03b46e2f8b59ed4698fb4785f946d0176", size = 1231151, upload-time = "2026-03-29T13:29:30.038Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -518,7 +518,7 @@ wheels = [
|
|||
|
||||
[[package]]
|
||||
name = "requests"
|
||||
version = "2.32.5"
|
||||
version = "2.33.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "certifi" },
|
||||
|
|
@ -526,9 +526,9 @@ dependencies = [
|
|||
{ name = "idna" },
|
||||
{ name = "urllib3" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/5f/a4/98b9c7c6428a668bf7e42ebb7c79d576a1c3c1e3ae2d47e674b468388871/requests-2.33.1.tar.gz", hash = "sha256:18817f8c57c6263968bc123d237e3b8b08ac046f5456bd1e307ee8f4250d3517", size = 134120, upload-time = "2026-03-30T16:09:15.531Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/8e/7540e8a2036f79a125c1d2ebadf69ed7901608859186c856fa0388ef4197/requests-2.33.1-py3-none-any.whl", hash = "sha256:4e6d1ef462f3626a1f0a0a9c42dd93c63bad33f9f1c1937509b8c5c8718ab56a", size = 64947, upload-time = "2026-03-30T16:09:13.83Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
|
|||
|
|
@ -901,6 +901,60 @@ static_resources:
|
|||
validation_context:
|
||||
trusted_ca:
|
||||
filename: {{ upstream_tls_ca_path | default('/etc/ssl/certs/ca-certificates.crt') }}
|
||||
- name: digitalocean
|
||||
connect_timeout: {{ upstream_connect_timeout | default('5s') }}
|
||||
type: LOGICAL_DNS
|
||||
dns_lookup_family: V4_ONLY
|
||||
lb_policy: ROUND_ROBIN
|
||||
load_assignment:
|
||||
cluster_name: digitalocean
|
||||
endpoints:
|
||||
- lb_endpoints:
|
||||
- endpoint:
|
||||
address:
|
||||
socket_address:
|
||||
address: inference.do-ai.run
|
||||
port_value: 443
|
||||
hostname: "inference.do-ai.run"
|
||||
transport_socket:
|
||||
name: envoy.transport_sockets.tls
|
||||
typed_config:
|
||||
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
|
||||
sni: inference.do-ai.run
|
||||
common_tls_context:
|
||||
tls_params:
|
||||
tls_minimum_protocol_version: TLSv1_2
|
||||
tls_maximum_protocol_version: TLSv1_3
|
||||
validation_context:
|
||||
trusted_ca:
|
||||
filename: {{ upstream_tls_ca_path | default('/etc/ssl/certs/ca-certificates.crt') }}
|
||||
- name: xiaomi
|
||||
connect_timeout: {{ upstream_connect_timeout | default('5s') }}
|
||||
type: LOGICAL_DNS
|
||||
dns_lookup_family: V4_ONLY
|
||||
lb_policy: ROUND_ROBIN
|
||||
load_assignment:
|
||||
cluster_name: xiaomi
|
||||
endpoints:
|
||||
- lb_endpoints:
|
||||
- endpoint:
|
||||
address:
|
||||
socket_address:
|
||||
address: api.xiaomimimo.com
|
||||
port_value: 443
|
||||
hostname: "api.xiaomimimo.com"
|
||||
transport_socket:
|
||||
name: envoy.transport_sockets.tls
|
||||
typed_config:
|
||||
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
|
||||
sni: api.xiaomimimo.com
|
||||
common_tls_context:
|
||||
tls_params:
|
||||
tls_minimum_protocol_version: TLSv1_2
|
||||
tls_maximum_protocol_version: TLSv1_3
|
||||
validation_context:
|
||||
trusted_ca:
|
||||
filename: {{ upstream_tls_ca_path | default('/etc/ssl/certs/ca-certificates.crt') }}
|
||||
- name: mistral_7b_instruct
|
||||
connect_timeout: 0.5s
|
||||
type: STRICT_DNS
|
||||
|
|
|
|||
541
config/grafana/brightstaff_dashboard.json
Normal file
541
config/grafana/brightstaff_dashboard.json
Normal file
|
|
@ -0,0 +1,541 @@
|
|||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": "-- Grafana --",
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"description": "RED, LLM upstream, routing service, and process metrics for brightstaff. Pair with Envoy admin metrics from cluster=bright_staff.",
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 1,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"panels": [
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 0 },
|
||||
"id": 100,
|
||||
"panels": [],
|
||||
"title": "HTTP RED",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": {
|
||||
"axisLabel": "req/s",
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 10,
|
||||
"lineWidth": 1,
|
||||
"showPoints": "never"
|
||||
},
|
||||
"unit": "reqps"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 1 },
|
||||
"id": 1,
|
||||
"options": {
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "sum by (handler) (rate(brightstaff_http_requests_total[1m]))",
|
||||
"legendFormat": "{{handler}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Rate — brightstaff RPS by handler",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"description": "5xx fraction over 5m. Page-worthy when sustained above ~1%.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 0.01 },
|
||||
{ "color": "red", "value": 0.05 }
|
||||
]
|
||||
},
|
||||
"unit": "percentunit"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 1 },
|
||||
"id": 2,
|
||||
"options": {
|
||||
"colorMode": "background",
|
||||
"graphMode": "area",
|
||||
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "sum(rate(brightstaff_http_requests_total{status_class=\"5xx\"}[5m])) / clamp_min(sum(rate(brightstaff_http_requests_total[5m])), 1)",
|
||||
"legendFormat": "5xx rate",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Errors — brightstaff 5xx rate",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"description": "p50/p95/p99 by handler, computed from histogram buckets over 5m.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": { "drawStyle": "line", "fillOpacity": 5, "lineWidth": 1, "showPoints": "never" },
|
||||
"unit": "s"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 9, "w": 24, "x": 0, "y": 9 },
|
||||
"id": 3,
|
||||
"options": {
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "histogram_quantile(0.50, sum by (le, handler) (rate(brightstaff_http_request_duration_seconds_bucket[5m])))",
|
||||
"legendFormat": "p50 {{handler}}",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "histogram_quantile(0.95, sum by (le, handler) (rate(brightstaff_http_request_duration_seconds_bucket[5m])))",
|
||||
"legendFormat": "p95 {{handler}}",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "histogram_quantile(0.99, sum by (le, handler) (rate(brightstaff_http_request_duration_seconds_bucket[5m])))",
|
||||
"legendFormat": "p99 {{handler}}",
|
||||
"refId": "C"
|
||||
}
|
||||
],
|
||||
"title": "Duration — p50 / p95 / p99 by handler",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"description": "In-flight requests by handler. Climbs before latency does when brightstaff is saturated.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": { "drawStyle": "line", "fillOpacity": 10, "lineWidth": 1, "showPoints": "never" },
|
||||
"unit": "short"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 18 },
|
||||
"id": 4,
|
||||
"options": {
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "sum by (handler) (brightstaff_http_in_flight_requests)",
|
||||
"legendFormat": "{{handler}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "In-flight requests by handler",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 26 },
|
||||
"id": 200,
|
||||
"panels": [],
|
||||
"title": "LLM upstream",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": { "drawStyle": "line", "fillOpacity": 5, "lineWidth": 1, "showPoints": "never" },
|
||||
"unit": "s"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 9, "w": 12, "x": 0, "y": 27 },
|
||||
"id": 5,
|
||||
"options": {
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "histogram_quantile(0.95, sum by (le, provider, model) (rate(brightstaff_llm_upstream_duration_seconds_bucket[5m])))",
|
||||
"legendFormat": "p95 {{provider}}/{{model}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "LLM upstream p95 by provider/model",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"description": "All non-success error classes. timeout/connect = network, 5xx/429 = provider, parse = body shape mismatch, stream = mid-stream disconnect.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": { "drawStyle": "line", "fillOpacity": 30, "lineWidth": 1, "showPoints": "never", "stacking": { "mode": "normal" } },
|
||||
"unit": "reqps"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 9, "w": 12, "x": 12, "y": 27 },
|
||||
"id": 6,
|
||||
"options": {
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "sum by (provider, error_class) (rate(brightstaff_llm_upstream_requests_total{error_class!=\"none\"}[5m]))",
|
||||
"legendFormat": "{{provider}} / {{error_class}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "LLM upstream errors by provider / class",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"description": "Streaming only. Empty if the route never streams.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": { "drawStyle": "line", "fillOpacity": 5, "lineWidth": 1, "showPoints": "never" },
|
||||
"unit": "s"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 9, "w": 12, "x": 0, "y": 36 },
|
||||
"id": 7,
|
||||
"options": {
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "histogram_quantile(0.95, sum by (le, provider, model) (rate(brightstaff_llm_time_to_first_token_seconds_bucket[5m])))",
|
||||
"legendFormat": "p95 {{provider}}/{{model}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Time-to-first-token p95 (streaming)",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"description": "Tokens/sec by provider/model/kind — proxy for cost. Stacked.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": { "drawStyle": "line", "fillOpacity": 30, "lineWidth": 1, "showPoints": "never", "stacking": { "mode": "normal" } },
|
||||
"unit": "tokens/s"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 9, "w": 12, "x": 12, "y": 36 },
|
||||
"id": 8,
|
||||
"options": {
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "sum by (provider, model, kind) (rate(brightstaff_llm_tokens_total[5m]))",
|
||||
"legendFormat": "{{provider}}/{{model}} {{kind}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Token throughput by provider / model / kind",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 45 },
|
||||
"id": 300,
|
||||
"panels": [],
|
||||
"title": "Routing service",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"description": "Which models the orchestrator picked over the last 15 minutes.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"unit": "short"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 9, "w": 12, "x": 0, "y": 46 },
|
||||
"id": 9,
|
||||
"options": {
|
||||
"displayMode": "gradient",
|
||||
"orientation": "horizontal",
|
||||
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "sum by (selected_model) (increase(brightstaff_router_decisions_total[15m]))",
|
||||
"legendFormat": "{{selected_model}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Model selection distribution (last 15m)",
|
||||
"type": "bargauge"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"description": "Fraction of decisions that fell back (orchestrator returned `none` or errored). High = router can't classify intent or no candidates configured.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": { "drawStyle": "line", "fillOpacity": 10, "lineWidth": 1, "showPoints": "never" },
|
||||
"unit": "percentunit"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 9, "w": 12, "x": 12, "y": 46 },
|
||||
"id": 10,
|
||||
"options": {
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "sum by (route) (rate(brightstaff_router_decisions_total{fallback=\"true\"}[5m])) / clamp_min(sum by (route) (rate(brightstaff_router_decisions_total[5m])), 1)",
|
||||
"legendFormat": "{{route}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Fallback rate by route",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": { "drawStyle": "line", "fillOpacity": 5, "lineWidth": 1, "showPoints": "never" },
|
||||
"unit": "s"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 55 },
|
||||
"id": 11,
|
||||
"options": {
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "histogram_quantile(0.95, sum by (le, route) (rate(brightstaff_router_decision_duration_seconds_bucket[5m])))",
|
||||
"legendFormat": "p95 {{route}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Router decision p95 latency",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"description": "Hit / (hit + miss). Low ratio = sessions aren't being reused or TTL too short.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "thresholds" },
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{ "color": "red", "value": null },
|
||||
{ "color": "yellow", "value": 0.5 },
|
||||
{ "color": "green", "value": 0.8 }
|
||||
]
|
||||
},
|
||||
"unit": "percentunit",
|
||||
"min": 0,
|
||||
"max": 1
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 6, "x": 12, "y": 55 },
|
||||
"id": 12,
|
||||
"options": {
|
||||
"colorMode": "background",
|
||||
"graphMode": "area",
|
||||
"reduceOptions": { "calcs": ["lastNotNull"], "fields": "", "values": false }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "sum(rate(brightstaff_session_cache_events_total{outcome=\"hit\"}[5m])) / clamp_min(sum(rate(brightstaff_session_cache_events_total{outcome=~\"hit|miss\"}[5m])), 1)",
|
||||
"legendFormat": "hit rate",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Session cache hit rate",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"description": "decision_served = a real model picked. no_candidates = sentinel `none` returned. policy_error = orchestrator failed.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": { "drawStyle": "line", "fillOpacity": 30, "lineWidth": 1, "showPoints": "never", "stacking": { "mode": "normal" } },
|
||||
"unit": "reqps"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 6, "x": 18, "y": 55 },
|
||||
"id": 13,
|
||||
"options": {
|
||||
"legend": { "displayMode": "list", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "sum by (outcome) (rate(brightstaff_routing_service_requests_total[5m]))",
|
||||
"legendFormat": "{{outcome}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "/routing/* outcomes",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": { "h": 1, "w": 24, "x": 0, "y": 63 },
|
||||
"id": 400,
|
||||
"panels": [],
|
||||
"title": "Process & Envoy link",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"description": "Compare to brightstaff RPS (panel 1) — sustained gap = network or Envoy queueing.",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": { "drawStyle": "line", "fillOpacity": 10, "lineWidth": 1, "showPoints": "never" },
|
||||
"unit": "reqps"
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 64 },
|
||||
"id": 14,
|
||||
"options": {
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "sum(rate(envoy_cluster_upstream_rq_total{envoy_cluster_name=\"bright_staff\"}[1m]))",
|
||||
"legendFormat": "envoy → bright_staff",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "sum(rate(brightstaff_http_requests_total[1m]))",
|
||||
"legendFormat": "brightstaff served",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"title": "Envoy → brightstaff link health",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": { "mode": "palette-classic" },
|
||||
"custom": { "drawStyle": "line", "fillOpacity": 10, "lineWidth": 1, "showPoints": "never" }
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": { "id": "byName", "options": "RSS" },
|
||||
"properties": [{ "id": "unit", "value": "bytes" }]
|
||||
},
|
||||
{
|
||||
"matcher": { "id": "byName", "options": "CPU" },
|
||||
"properties": [{ "id": "unit", "value": "percentunit" }]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 64 },
|
||||
"id": 15,
|
||||
"options": {
|
||||
"legend": { "displayMode": "table", "placement": "bottom", "showLegend": true },
|
||||
"tooltip": { "mode": "multi" }
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "process_resident_memory_bytes{job=\"brightstaff\"}",
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" },
|
||||
"expr": "rate(process_cpu_seconds_total{job=\"brightstaff\"}[1m])",
|
||||
"legendFormat": "CPU",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"title": "Brightstaff process RSS / CPU",
|
||||
"type": "timeseries"
|
||||
}
|
||||
],
|
||||
"refresh": "30s",
|
||||
"schemaVersion": 39,
|
||||
"tags": ["plano", "brightstaff", "llm"],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"name": "DS_PROMETHEUS",
|
||||
"label": "Prometheus",
|
||||
"type": "datasource",
|
||||
"query": "prometheus",
|
||||
"current": { "selected": false, "text": "Prometheus", "value": "DS_PROMETHEUS" },
|
||||
"hide": 0,
|
||||
"refresh": 1,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"includeAll": false,
|
||||
"multi": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": { "from": "now-1h", "to": "now" },
|
||||
"timepicker": {},
|
||||
"timezone": "browser",
|
||||
"title": "Brightstaff (Plano dataplane)",
|
||||
"uid": "brightstaff",
|
||||
"version": 1,
|
||||
"weekStart": ""
|
||||
}
|
||||
43
config/grafana/docker-compose.yaml
Normal file
43
config/grafana/docker-compose.yaml
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
# One-command Prometheus + Grafana stack for observing a locally-running
|
||||
# Plano (Envoy admin :9901 + brightstaff :9092 on the host).
|
||||
#
|
||||
# cd config/grafana
|
||||
# docker compose up -d
|
||||
# open http://localhost:3000 (admin / admin)
|
||||
#
|
||||
# Grafana is preloaded with:
|
||||
# - Prometheus datasource (uid=DS_PROMETHEUS) → http://prometheus:9090
|
||||
# - Brightstaff dashboard (auto-imported from brightstaff_dashboard.json)
|
||||
#
|
||||
# Prometheus scrapes the host's :9092 and :9901 via host.docker.internal.
|
||||
# On Linux this works because of the `extra_hosts: host-gateway` mapping
|
||||
# below. On Mac/Win it works natively.
|
||||
|
||||
services:
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
container_name: plano-prometheus
|
||||
ports:
|
||||
- "9090:9090"
|
||||
volumes:
|
||||
- ./prometheus_scrape.yaml:/etc/prometheus/prometheus.yml:ro
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
restart: unless-stopped
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana:latest
|
||||
container_name: plano-grafana
|
||||
ports:
|
||||
- "3000:3000"
|
||||
environment:
|
||||
GF_SECURITY_ADMIN_USER: admin
|
||||
GF_SECURITY_ADMIN_PASSWORD: admin
|
||||
GF_AUTH_ANONYMOUS_ENABLED: "true"
|
||||
GF_AUTH_ANONYMOUS_ORG_ROLE: Viewer
|
||||
volumes:
|
||||
- ./provisioning:/etc/grafana/provisioning:ro
|
||||
- ./brightstaff_dashboard.json:/var/lib/grafana/dashboards/brightstaff_dashboard.json:ro
|
||||
depends_on:
|
||||
- prometheus
|
||||
restart: unless-stopped
|
||||
44
config/grafana/prometheus_scrape.yaml
Normal file
44
config/grafana/prometheus_scrape.yaml
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
# Prometheus config that scrapes Plano (Envoy admin + brightstaff). This is
|
||||
# a complete Prometheus config — mount it directly at
|
||||
# /etc/prometheus/prometheus.yml. The included docker-compose.yaml does this
|
||||
# for you.
|
||||
#
|
||||
# Targets:
|
||||
# - envoy:9901 Envoy admin → envoy_cluster_*, envoy_http_*, envoy_server_*.
|
||||
# - brightstaff:9092 Native dataplane → brightstaff_http_*, brightstaff_llm_*,
|
||||
# brightstaff_router_*, process_*.
|
||||
#
|
||||
# Hostname `host.docker.internal` works on Docker Desktop (Mac/Win) and on
|
||||
# Linux when the container is started with `--add-host=host.docker.internal:
|
||||
# host-gateway` (the included compose does this). If Plano runs *inside*
|
||||
# Docker on the same network as Prometheus, replace it with the container
|
||||
# name (e.g. `plano:9092`).
|
||||
#
|
||||
# This file is unrelated to demos/llm_routing/model_routing_service/prometheus.yaml,
|
||||
# which scrapes a fake metrics service to feed the routing engine.
|
||||
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
evaluation_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: envoy
|
||||
honor_timestamps: true
|
||||
metrics_path: /stats
|
||||
params:
|
||||
format: ["prometheus"]
|
||||
static_configs:
|
||||
- targets:
|
||||
- host.docker.internal:9901
|
||||
labels:
|
||||
service: plano
|
||||
|
||||
- job_name: brightstaff
|
||||
honor_timestamps: true
|
||||
metrics_path: /metrics
|
||||
static_configs:
|
||||
- targets:
|
||||
- host.docker.internal:9092
|
||||
labels:
|
||||
service: plano
|
||||
15
config/grafana/provisioning/dashboards/brightstaff.yaml
Normal file
15
config/grafana/provisioning/dashboards/brightstaff.yaml
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
# Auto-load the brightstaff dashboard JSON on Grafana startup.
|
||||
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: brightstaff
|
||||
orgId: 1
|
||||
folder: Plano
|
||||
type: file
|
||||
disableDeletion: false
|
||||
updateIntervalSeconds: 30
|
||||
allowUiUpdates: true
|
||||
options:
|
||||
path: /var/lib/grafana/dashboards
|
||||
foldersFromFilesStructure: false
|
||||
14
config/grafana/provisioning/datasources/prometheus.yaml
Normal file
14
config/grafana/provisioning/datasources/prometheus.yaml
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
# Auto-provision the Prometheus datasource so the bundled dashboard wires up
|
||||
# without any clicks. The `uid: DS_PROMETHEUS` matches the templated input in
|
||||
# brightstaff_dashboard.json.
|
||||
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
uid: DS_PROMETHEUS
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: http://prometheus:9090
|
||||
isDefault: true
|
||||
editable: true
|
||||
|
|
@ -188,9 +188,20 @@ properties:
|
|||
- groq
|
||||
- mistral
|
||||
- openai
|
||||
- xiaomi
|
||||
- gemini
|
||||
- chatgpt
|
||||
- digitalocean
|
||||
- vercel
|
||||
- openrouter
|
||||
headers:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: "Additional headers to send with upstream requests (e.g., ChatGPT-Account-Id, originator)."
|
||||
routing_preferences:
|
||||
type: array
|
||||
description: "[DEPRECATED] Inline routing_preferences under a model_provider are auto-migrated to the top-level routing_preferences list by the config generator. New configs should declare routing_preferences at the top level with an explicit models: [...] list. See docs/routing-api.md."
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -235,9 +246,20 @@ properties:
|
|||
- groq
|
||||
- mistral
|
||||
- openai
|
||||
- xiaomi
|
||||
- gemini
|
||||
- chatgpt
|
||||
- digitalocean
|
||||
- vercel
|
||||
- openrouter
|
||||
headers:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: "Additional headers to send with upstream requests (e.g., ChatGPT-Account-Id, originator)."
|
||||
routing_preferences:
|
||||
type: array
|
||||
description: "[DEPRECATED] Inline routing_preferences under an llm_provider are auto-migrated to the top-level routing_preferences list by the config generator. New configs should declare routing_preferences at the top level with an explicit models: [...] list. See docs/routing-api.md."
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -274,6 +296,9 @@ properties:
|
|||
type: boolean
|
||||
use_agent_orchestrator:
|
||||
type: boolean
|
||||
disable_signals:
|
||||
type: boolean
|
||||
description: "Disable agentic signal analysis (frustration, repetition, escalation, etc.) on LLM responses to save CPU. Default false."
|
||||
upstream_connect_timeout:
|
||||
type: string
|
||||
description: "Connect timeout for upstream provider clusters (e.g., '5s', '10s'). Default is '5s'."
|
||||
|
|
@ -282,10 +307,13 @@ properties:
|
|||
description: "Path to the trusted CA bundle for upstream TLS verification. Default is '/etc/ssl/certs/ca-certificates.crt'."
|
||||
llm_routing_model:
|
||||
type: string
|
||||
description: "Model name for the LLM router (e.g., 'Arch-Router'). Must match a model in model_providers."
|
||||
description: "Model name for the LLM router (e.g., 'Plano-Orchestrator'). Must match a model in model_providers."
|
||||
agent_orchestration_model:
|
||||
type: string
|
||||
description: "Model name for the agent orchestrator (e.g., 'Plano-Orchestrator'). Must match a model in model_providers."
|
||||
orchestrator_model_context_length:
|
||||
type: integer
|
||||
description: "Maximum token length for the orchestrator/routing model context window. Default is 8192."
|
||||
system_prompt:
|
||||
type: string
|
||||
prompt_targets:
|
||||
|
|
@ -423,6 +451,42 @@ properties:
|
|||
enum:
|
||||
- llm
|
||||
- prompt
|
||||
routing:
|
||||
type: object
|
||||
properties:
|
||||
llm_provider:
|
||||
type: string
|
||||
model:
|
||||
type: string
|
||||
session_ttl_seconds:
|
||||
type: integer
|
||||
minimum: 1
|
||||
description: TTL in seconds for session-pinned routing cache entries. Default 600 (10 minutes).
|
||||
session_max_entries:
|
||||
type: integer
|
||||
minimum: 1
|
||||
maximum: 10000
|
||||
description: Maximum number of session-pinned routing cache entries. Default 10000.
|
||||
session_cache:
|
||||
type: object
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- memory
|
||||
- redis
|
||||
default: memory
|
||||
description: Session cache backend. "memory" (default) is in-process; "redis" is shared across replicas.
|
||||
url:
|
||||
type: string
|
||||
description: Redis URL, e.g. redis://localhost:6379. Required when type is redis.
|
||||
tenant_header:
|
||||
type: string
|
||||
description: >
|
||||
Optional HTTP header name whose value is used as a tenant prefix in the cache key.
|
||||
When set, keys are scoped as plano:affinity:{tenant_id}:{session_id}.
|
||||
additionalProperties: false
|
||||
additionalProperties: false
|
||||
state_storage:
|
||||
type: object
|
||||
properties:
|
||||
|
|
|
|||
2135
crates/Cargo.lock
generated
2135
crates/Cargo.lock
generated
File diff suppressed because it is too large
Load diff
|
|
@ -3,6 +3,18 @@ name = "brightstaff"
|
|||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
default = ["jemalloc"]
|
||||
jemalloc = ["tikv-jemallocator", "tikv-jemalloc-ctl"]
|
||||
|
||||
[[bin]]
|
||||
name = "brightstaff"
|
||||
path = "src/main.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "signals_replay"
|
||||
path = "src/bin/signals_replay.rs"
|
||||
|
||||
[dependencies]
|
||||
async-openai = "0.30.1"
|
||||
async-trait = "0.1"
|
||||
|
|
@ -26,6 +38,12 @@ opentelemetry-stdout = "0.31"
|
|||
opentelemetry_sdk = { version = "0.31", features = ["rt-tokio"] }
|
||||
pretty_assertions = "1.4.1"
|
||||
rand = "0.9.2"
|
||||
regex = "1.10"
|
||||
lru = "0.12"
|
||||
metrics = "0.23"
|
||||
metrics-exporter-prometheus = { version = "0.15", default-features = false, features = ["http-listener"] }
|
||||
metrics-process = "2.1"
|
||||
redis = { version = "0.27", features = ["tokio-comp"] }
|
||||
reqwest = { version = "0.12.15", features = ["stream"] }
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = "1.0.140"
|
||||
|
|
@ -33,6 +51,8 @@ serde_with = "3.13.0"
|
|||
strsim = "0.11"
|
||||
serde_yaml = "0.9.34"
|
||||
thiserror = "2.0.12"
|
||||
tikv-jemallocator = { version = "0.6", optional = true }
|
||||
tikv-jemalloc-ctl = { version = "0.6", features = ["stats"], optional = true }
|
||||
tokio = { version = "1.44.2", features = ["full"] }
|
||||
tokio-postgres = { version = "0.7", features = ["with-serde_json-1"] }
|
||||
tokio-stream = "0.1"
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ use common::configuration::{Agent, FilterPipeline, Listener, ModelAlias, SpanAtt
|
|||
use common::llm_providers::LlmProviders;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::router::llm::RouterService;
|
||||
use crate::router::orchestrator::OrchestratorService;
|
||||
use crate::state::StateStorage;
|
||||
|
||||
|
|
@ -14,7 +13,6 @@ use crate::state::StateStorage;
|
|||
/// Instead of cloning 8+ individual `Arc`s per connection, a single
|
||||
/// `Arc<AppState>` is cloned once and passed to the request handler.
|
||||
pub struct AppState {
|
||||
pub router_service: Arc<RouterService>,
|
||||
pub orchestrator_service: Arc<OrchestratorService>,
|
||||
pub model_aliases: Option<HashMap<String, ModelAlias>>,
|
||||
pub llm_providers: Arc<RwLock<LlmProviders>>,
|
||||
|
|
@ -26,4 +24,7 @@ pub struct AppState {
|
|||
/// Shared HTTP client for upstream LLM requests (connection pooling / keep-alive).
|
||||
pub http_client: reqwest::Client,
|
||||
pub filter_pipeline: Arc<FilterPipeline>,
|
||||
/// When false, agentic signal analysis is skipped on LLM responses to save CPU.
|
||||
/// Controlled by `overrides.disable_signals` in plano config.
|
||||
pub signals_enabled: bool,
|
||||
}
|
||||
|
|
|
|||
175
crates/brightstaff/src/bin/signals_replay.rs
Normal file
175
crates/brightstaff/src/bin/signals_replay.rs
Normal file
|
|
@ -0,0 +1,175 @@
|
|||
//! `signals-replay` — batch driver for the `brightstaff` signal analyzer.
|
||||
//!
|
||||
//! Reads JSONL conversations from stdin (one per line) and emits matching
|
||||
//! JSONL reports on stdout, one per input conversation, in the same order.
|
||||
//!
|
||||
//! Input shape (per line):
|
||||
//! ```json
|
||||
//! {"id": "convo-42", "messages": [{"from": "human", "value": "..."}, ...]}
|
||||
//! ```
|
||||
//!
|
||||
//! Output shape (per line, success):
|
||||
//! ```json
|
||||
//! {"id": "convo-42", "report": { ...python-compatible SignalReport dict... }}
|
||||
//! ```
|
||||
//!
|
||||
//! On per-line failure (parse / analyzer error), emits:
|
||||
//! ```json
|
||||
//! {"id": "convo-42", "error": "..."}
|
||||
//! ```
|
||||
//!
|
||||
//! The output report dict is shaped to match the Python reference's
|
||||
//! `SignalReport.to_dict()` byte-for-byte so the parity comparator can do a
|
||||
//! direct structural diff.
|
||||
|
||||
use std::io::{self, BufRead, BufWriter, Write};
|
||||
|
||||
use serde::Deserialize;
|
||||
use serde_json::{json, Map, Value};
|
||||
|
||||
use brightstaff::signals::{SignalAnalyzer, SignalGroup, SignalReport};
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct InputLine {
|
||||
id: Value,
|
||||
messages: Vec<MessageRow>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct MessageRow {
|
||||
#[serde(default)]
|
||||
from: String,
|
||||
#[serde(default)]
|
||||
value: String,
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let stdin = io::stdin();
|
||||
let stdout = io::stdout();
|
||||
let mut out = BufWriter::new(stdout.lock());
|
||||
let analyzer = SignalAnalyzer::default();
|
||||
|
||||
for line in stdin.lock().lines() {
|
||||
let line = match line {
|
||||
Ok(l) => l,
|
||||
Err(e) => {
|
||||
eprintln!("read error: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
let trimmed = line.trim();
|
||||
if trimmed.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let result = process_line(&analyzer, trimmed);
|
||||
// Always emit one line per input line so id ordering stays aligned.
|
||||
if let Err(e) = writeln!(out, "{result}") {
|
||||
eprintln!("write error: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
// Flush periodically isn't strictly needed — BufWriter handles it,
|
||||
// and the parent process reads the whole stream when we're done.
|
||||
}
|
||||
let _ = out.flush();
|
||||
}
|
||||
|
||||
fn process_line(analyzer: &SignalAnalyzer, line: &str) -> Value {
|
||||
let parsed: InputLine = match serde_json::from_str(line) {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
return json!({
|
||||
"id": Value::Null,
|
||||
"error": format!("input parse: {e}"),
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let id = parsed.id.clone();
|
||||
|
||||
let view: Vec<brightstaff::signals::analyzer::ShareGptMessage<'_>> = parsed
|
||||
.messages
|
||||
.iter()
|
||||
.map(|m| brightstaff::signals::analyzer::ShareGptMessage {
|
||||
from: m.from.as_str(),
|
||||
value: m.value.as_str(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let report = analyzer.analyze_sharegpt(&view);
|
||||
let report_dict = report_to_python_dict(&report);
|
||||
json!({
|
||||
"id": id,
|
||||
"report": report_dict,
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert a `SignalReport` into the Python reference's `to_dict()` shape.
|
||||
///
|
||||
/// Ordering of category keys in each layer dict follows the Python source
|
||||
/// exactly so even string-equality comparisons behave deterministically.
|
||||
fn report_to_python_dict(r: &SignalReport) -> Value {
|
||||
let mut interaction = Map::new();
|
||||
interaction.insert(
|
||||
"misalignment".to_string(),
|
||||
signal_group_to_python(&r.interaction.misalignment),
|
||||
);
|
||||
interaction.insert(
|
||||
"stagnation".to_string(),
|
||||
signal_group_to_python(&r.interaction.stagnation),
|
||||
);
|
||||
interaction.insert(
|
||||
"disengagement".to_string(),
|
||||
signal_group_to_python(&r.interaction.disengagement),
|
||||
);
|
||||
interaction.insert(
|
||||
"satisfaction".to_string(),
|
||||
signal_group_to_python(&r.interaction.satisfaction),
|
||||
);
|
||||
|
||||
let mut execution = Map::new();
|
||||
execution.insert(
|
||||
"failure".to_string(),
|
||||
signal_group_to_python(&r.execution.failure),
|
||||
);
|
||||
execution.insert(
|
||||
"loops".to_string(),
|
||||
signal_group_to_python(&r.execution.loops),
|
||||
);
|
||||
|
||||
let mut environment = Map::new();
|
||||
environment.insert(
|
||||
"exhaustion".to_string(),
|
||||
signal_group_to_python(&r.environment.exhaustion),
|
||||
);
|
||||
|
||||
json!({
|
||||
"interaction_signals": Value::Object(interaction),
|
||||
"execution_signals": Value::Object(execution),
|
||||
"environment_signals": Value::Object(environment),
|
||||
"overall_quality": r.overall_quality.as_str(),
|
||||
"summary": r.summary,
|
||||
})
|
||||
}
|
||||
|
||||
fn signal_group_to_python(g: &SignalGroup) -> Value {
|
||||
let signals: Vec<Value> = g
|
||||
.signals
|
||||
.iter()
|
||||
.map(|s| {
|
||||
json!({
|
||||
"signal_type": s.signal_type.as_str(),
|
||||
"message_index": s.message_index,
|
||||
"snippet": s.snippet,
|
||||
"confidence": s.confidence,
|
||||
"metadata": s.metadata,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
json!({
|
||||
"category": g.category,
|
||||
"count": g.count,
|
||||
"severity": g.severity,
|
||||
"signals": signals,
|
||||
})
|
||||
}
|
||||
|
|
@ -177,6 +177,7 @@ mod tests {
|
|||
"http://localhost:8080".to_string(),
|
||||
"test-model".to_string(),
|
||||
"plano-orchestrator".to_string(),
|
||||
crate::router::orchestrator_model_v1::MAX_TOKEN_LEN,
|
||||
))
|
||||
}
|
||||
|
||||
|
|
|
|||
53
crates/brightstaff/src/handlers/debug.rs
Normal file
53
crates/brightstaff/src/handlers/debug.rs
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
use bytes::Bytes;
|
||||
use http_body_util::combinators::BoxBody;
|
||||
use hyper::{Response, StatusCode};
|
||||
|
||||
use super::full;
|
||||
|
||||
#[derive(serde::Serialize)]
|
||||
struct MemStats {
|
||||
allocated_bytes: usize,
|
||||
resident_bytes: usize,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
/// Returns jemalloc memory statistics as JSON.
|
||||
/// Falls back to a stub when the jemalloc feature is disabled.
|
||||
pub async fn memstats() -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
let stats = get_jemalloc_stats();
|
||||
let json = serde_json::to_string(&stats).unwrap();
|
||||
Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", "application/json")
|
||||
.body(full(json))
|
||||
.unwrap())
|
||||
}
|
||||
|
||||
#[cfg(feature = "jemalloc")]
|
||||
fn get_jemalloc_stats() -> MemStats {
|
||||
use tikv_jemalloc_ctl::{epoch, stats};
|
||||
|
||||
if let Err(e) = epoch::advance() {
|
||||
return MemStats {
|
||||
allocated_bytes: 0,
|
||||
resident_bytes: 0,
|
||||
error: Some(format!("failed to advance jemalloc epoch: {e}")),
|
||||
};
|
||||
}
|
||||
|
||||
MemStats {
|
||||
allocated_bytes: stats::allocated::read().unwrap_or(0),
|
||||
resident_bytes: stats::resident::read().unwrap_or(0),
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "jemalloc"))]
|
||||
fn get_jemalloc_stats() -> MemStats {
|
||||
MemStats {
|
||||
allocated_bytes: 0,
|
||||
resident_bytes: 0,
|
||||
error: Some("jemalloc feature not enabled".to_string()),
|
||||
}
|
||||
}
|
||||
|
|
@ -441,10 +441,8 @@ impl ArchFunctionHandler {
|
|||
}
|
||||
}
|
||||
// Handle str/string conversions
|
||||
"str" | "string" => {
|
||||
if !value.is_string() {
|
||||
return Ok(json!(value.to_string()));
|
||||
}
|
||||
"str" | "string" if !value.is_string() => {
|
||||
return Ok(json!(value.to_string()));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
|
@ -762,7 +760,7 @@ impl ArchFunctionHandler {
|
|||
|
||||
// Keep system message if present
|
||||
if let Some(first) = messages.first() {
|
||||
if first.role == Role::System {
|
||||
if first.role == Role::System || first.role == Role::Developer {
|
||||
if let Some(MessageContent::Text(content)) = &first.content {
|
||||
num_tokens += content.len() / 4; // Approximate 4 chars per token
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ mod tests {
|
|||
"http://localhost:8080".to_string(),
|
||||
"test-model".to_string(),
|
||||
"plano-orchestrator".to_string(),
|
||||
crate::router::orchestrator_model_v1::MAX_TOKEN_LEN,
|
||||
))
|
||||
}
|
||||
|
||||
|
|
@ -147,8 +148,8 @@ mod tests {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_error_handling_flow() {
|
||||
let router_service = create_test_orchestrator_service();
|
||||
let agent_selector = AgentSelector::new(router_service);
|
||||
let orchestrator_service = create_test_orchestrator_service();
|
||||
let agent_selector = AgentSelector::new(orchestrator_service);
|
||||
|
||||
// Test listener not found
|
||||
let result = agent_selector.find_listener(Some("nonexistent"), &[]);
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use bytes::Bytes;
|
||||
use common::configuration::{FilterPipeline, ModelAlias};
|
||||
use common::consts::{ARCH_IS_STREAMING_HEADER, ARCH_PROVIDER_HINT_HEADER};
|
||||
use common::consts::{ARCH_IS_STREAMING_HEADER, ARCH_PROVIDER_HINT_HEADER, MODEL_AFFINITY_HEADER};
|
||||
use common::llm_providers::LlmProviders;
|
||||
use hermesllm::apis::openai::Message;
|
||||
use hermesllm::apis::openai_responses::InputParam;
|
||||
|
|
@ -22,19 +22,20 @@ pub(crate) mod model_selection;
|
|||
|
||||
use crate::app_state::AppState;
|
||||
use crate::handlers::agents::pipeline::PipelineProcessor;
|
||||
use crate::handlers::extract_or_generate_traceparent;
|
||||
use crate::handlers::extract_request_id;
|
||||
use crate::handlers::full;
|
||||
use crate::metrics as bs_metrics;
|
||||
use crate::state::response_state_processor::ResponsesStateProcessor;
|
||||
use crate::state::{
|
||||
extract_input_items, retrieve_and_combine_input, StateStorage, StateStorageError,
|
||||
};
|
||||
use crate::streaming::{
|
||||
create_streaming_response, create_streaming_response_with_output_filter, truncate_message,
|
||||
ObservableStreamProcessor, StreamProcessor,
|
||||
LlmMetricsCtx, ObservableStreamProcessor, StreamProcessor,
|
||||
};
|
||||
use crate::tracing::{
|
||||
collect_custom_trace_attributes, llm as tracing_llm, operation_component, set_service_name,
|
||||
collect_custom_trace_attributes, llm as tracing_llm, operation_component,
|
||||
plano as tracing_plano, set_service_name,
|
||||
};
|
||||
use model_selection::router_chat_get_upstream_model;
|
||||
|
||||
|
|
@ -92,7 +93,47 @@ async fn llm_chat_inner(
|
|||
}
|
||||
});
|
||||
|
||||
let traceparent = extract_or_generate_traceparent(&request_headers);
|
||||
// Session pinning: extract session ID and check cache before routing
|
||||
let session_id: Option<String> = request_headers
|
||||
.get(MODEL_AFFINITY_HEADER)
|
||||
.and_then(|h| h.to_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
let tenant_id: Option<String> = state
|
||||
.orchestrator_service
|
||||
.tenant_header()
|
||||
.and_then(|hdr| request_headers.get(hdr))
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
let cached_route = if let Some(ref sid) = session_id {
|
||||
state
|
||||
.orchestrator_service
|
||||
.get_cached_route(sid, tenant_id.as_deref())
|
||||
.await
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let (pinned_model, pinned_route_name): (Option<String>, Option<String>) = match cached_route {
|
||||
Some(c) => (Some(c.model_name), c.route_name),
|
||||
None => (None, None),
|
||||
};
|
||||
|
||||
// Record session id on the LLM span for the observability console.
|
||||
if let Some(ref sid) = session_id {
|
||||
get_active_span(|span| {
|
||||
span.set_attribute(opentelemetry::KeyValue::new(
|
||||
tracing_plano::SESSION_ID,
|
||||
sid.clone(),
|
||||
));
|
||||
});
|
||||
}
|
||||
if let Some(ref route_name) = pinned_route_name {
|
||||
get_active_span(|span| {
|
||||
span.set_attribute(opentelemetry::KeyValue::new(
|
||||
tracing_plano::ROUTE_NAME,
|
||||
route_name.clone(),
|
||||
));
|
||||
});
|
||||
}
|
||||
|
||||
let full_qualified_llm_provider_url = format!("{}{}", state.llm_provider_url, request_path);
|
||||
|
||||
|
|
@ -102,6 +143,7 @@ async fn llm_chat_inner(
|
|||
&request_path,
|
||||
&state.model_aliases,
|
||||
&state.llm_providers,
|
||||
state.signals_enabled,
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
|
@ -213,7 +255,15 @@ async fn llm_chat_inner(
|
|||
if let Some(ref client_api_kind) = client_api {
|
||||
let upstream_api =
|
||||
provider_id.compatible_api_for_client(client_api_kind, is_streaming_request);
|
||||
client_request.normalize_for_upstream(provider_id, &upstream_api);
|
||||
if let Err(e) = client_request.normalize_for_upstream(provider_id, &upstream_api) {
|
||||
warn!(
|
||||
"request_id={}: normalize_for_upstream failed: {}",
|
||||
request_id, e
|
||||
);
|
||||
let mut bad_request = Response::new(full(e.message));
|
||||
*bad_request.status_mut() = StatusCode::BAD_REQUEST;
|
||||
return Ok(bad_request);
|
||||
}
|
||||
}
|
||||
|
||||
// --- Phase 2: Resolve conversation state (v1/responses API) ---
|
||||
|
|
@ -244,46 +294,75 @@ async fn llm_chat_inner(
|
|||
}
|
||||
};
|
||||
|
||||
// --- Phase 3: Route the request ---
|
||||
let routing_span = info_span!(
|
||||
"routing",
|
||||
component = "routing",
|
||||
http.method = "POST",
|
||||
http.target = %request_path,
|
||||
model.requested = %model_from_request,
|
||||
model.alias_resolved = %alias_resolved_model,
|
||||
route.selected_model = tracing::field::Empty,
|
||||
routing.determination_ms = tracing::field::Empty,
|
||||
);
|
||||
let routing_result = match async {
|
||||
set_service_name(operation_component::ROUTING);
|
||||
router_chat_get_upstream_model(
|
||||
Arc::clone(&state.router_service),
|
||||
client_request,
|
||||
&traceparent,
|
||||
&request_path,
|
||||
&request_id,
|
||||
inline_routing_preferences,
|
||||
)
|
||||
.await
|
||||
}
|
||||
.instrument(routing_span)
|
||||
.await
|
||||
{
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
let mut internal_error = Response::new(full(err.message));
|
||||
*internal_error.status_mut() = err.status_code;
|
||||
return Ok(internal_error);
|
||||
}
|
||||
};
|
||||
|
||||
// Determine final model (router returns "none" when it doesn't select a specific model)
|
||||
let router_selected_model = routing_result.model_name;
|
||||
let resolved_model = if router_selected_model != "none" {
|
||||
router_selected_model
|
||||
// --- Phase 3: Route the request (or use pinned model from session cache) ---
|
||||
let resolved_model = if let Some(cached_model) = pinned_model {
|
||||
info!(
|
||||
session_id = %session_id.as_deref().unwrap_or(""),
|
||||
model = %cached_model,
|
||||
"using pinned routing decision from cache"
|
||||
);
|
||||
cached_model
|
||||
} else {
|
||||
alias_resolved_model.clone()
|
||||
let routing_span = info_span!(
|
||||
"routing",
|
||||
component = "routing",
|
||||
http.method = "POST",
|
||||
http.target = %request_path,
|
||||
model.requested = %model_from_request,
|
||||
model.alias_resolved = %alias_resolved_model,
|
||||
route.selected_model = tracing::field::Empty,
|
||||
routing.determination_ms = tracing::field::Empty,
|
||||
);
|
||||
let routing_result = match async {
|
||||
set_service_name(operation_component::ROUTING);
|
||||
router_chat_get_upstream_model(
|
||||
Arc::clone(&state.orchestrator_service),
|
||||
client_request,
|
||||
&request_path,
|
||||
&request_id,
|
||||
inline_routing_preferences,
|
||||
)
|
||||
.await
|
||||
}
|
||||
.instrument(routing_span)
|
||||
.await
|
||||
{
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
let mut internal_error = Response::new(full(err.message));
|
||||
*internal_error.status_mut() = err.status_code;
|
||||
return Ok(internal_error);
|
||||
}
|
||||
};
|
||||
|
||||
let (router_selected_model, route_name) =
|
||||
(routing_result.model_name, routing_result.route_name);
|
||||
let model = if router_selected_model != "none" {
|
||||
router_selected_model
|
||||
} else {
|
||||
alias_resolved_model.clone()
|
||||
};
|
||||
|
||||
// Record route name on the LLM span (only when the orchestrator produced one).
|
||||
if let Some(ref rn) = route_name {
|
||||
if !rn.is_empty() && rn != "none" {
|
||||
get_active_span(|span| {
|
||||
span.set_attribute(opentelemetry::KeyValue::new(
|
||||
tracing_plano::ROUTE_NAME,
|
||||
rn.clone(),
|
||||
));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref sid) = session_id {
|
||||
state
|
||||
.orchestrator_service
|
||||
.cache_route(sid.clone(), tenant_id.as_deref(), model.clone(), route_name)
|
||||
.await;
|
||||
}
|
||||
|
||||
model
|
||||
};
|
||||
tracing::Span::current().record(tracing_llm::MODEL_NAME, resolved_model.as_str());
|
||||
|
||||
|
|
@ -338,6 +417,7 @@ async fn parse_and_validate_request(
|
|||
request_path: &str,
|
||||
model_aliases: &Option<HashMap<String, ModelAlias>>,
|
||||
llm_providers: &Arc<RwLock<LlmProviders>>,
|
||||
signals_enabled: bool,
|
||||
) -> Result<PreparedRequest, Response<BoxBody<Bytes, hyper::Error>>> {
|
||||
let raw_bytes = request
|
||||
.collect()
|
||||
|
|
@ -416,7 +496,11 @@ async fn parse_and_validate_request(
|
|||
let user_message_preview = client_request
|
||||
.get_recent_user_message()
|
||||
.map(|msg| truncate_message(&msg, 50));
|
||||
let messages_for_signals = Some(client_request.get_messages());
|
||||
let messages_for_signals = if signals_enabled {
|
||||
Some(client_request.get_messages())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Set the upstream model name and strip routing metadata
|
||||
client_request.set_model(model_name_only.clone());
|
||||
|
|
@ -617,6 +701,13 @@ async fn send_upstream(
|
|||
|
||||
let request_start_time = std::time::Instant::now();
|
||||
|
||||
// Labels for LLM upstream metrics. We prefer `resolved_model` (post-routing)
|
||||
// and derive the provider from its `provider/model` prefix. This matches the
|
||||
// same model id the cost/latency router keys off.
|
||||
let (metric_provider_raw, metric_model_raw) = bs_metrics::split_provider_model(resolved_model);
|
||||
let metric_provider = metric_provider_raw.to_string();
|
||||
let metric_model = metric_model_raw.to_string();
|
||||
|
||||
let llm_response = match http_client
|
||||
.post(upstream_url)
|
||||
.headers(request_headers.clone())
|
||||
|
|
@ -626,6 +717,14 @@ async fn send_upstream(
|
|||
{
|
||||
Ok(res) => res,
|
||||
Err(err) => {
|
||||
let err_class = bs_metrics::llm_error_class_from_reqwest(&err);
|
||||
bs_metrics::record_llm_upstream(
|
||||
&metric_provider,
|
||||
&metric_model,
|
||||
0,
|
||||
err_class,
|
||||
request_start_time.elapsed(),
|
||||
);
|
||||
let err_msg = format!("Failed to send request: {}", err);
|
||||
let mut internal_error = Response::new(full(err_msg));
|
||||
*internal_error.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||
|
|
@ -636,6 +735,36 @@ async fn send_upstream(
|
|||
// Propagate upstream headers and status
|
||||
let response_headers = llm_response.headers().clone();
|
||||
let upstream_status = llm_response.status();
|
||||
|
||||
// Upstream routers (e.g. DigitalOcean Gradient) may return an
|
||||
// `x-model-router-selected-route` header indicating which task-level
|
||||
// route the request was classified into (e.g. "Code Generation"). Surface
|
||||
// it as `plano.route.name` so the obs console's Route hit % panel can
|
||||
// show the breakdown even when Plano's own orchestrator wasn't in the
|
||||
// routing path. Any value from Plano's orchestrator already set earlier
|
||||
// takes precedence — this only fires when the span doesn't already have
|
||||
// a route name.
|
||||
if let Some(upstream_route) = response_headers
|
||||
.get("x-model-router-selected-route")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
{
|
||||
if !upstream_route.is_empty() {
|
||||
get_active_span(|span| {
|
||||
span.set_attribute(opentelemetry::KeyValue::new(
|
||||
crate::tracing::plano::ROUTE_NAME,
|
||||
upstream_route.to_string(),
|
||||
));
|
||||
});
|
||||
}
|
||||
}
|
||||
// Record the upstream HTTP status on the span for the obs console.
|
||||
get_active_span(|span| {
|
||||
span.set_attribute(opentelemetry::KeyValue::new(
|
||||
crate::tracing::http::STATUS_CODE,
|
||||
upstream_status.as_u16() as i64,
|
||||
));
|
||||
});
|
||||
|
||||
let mut response = Response::builder().status(upstream_status);
|
||||
if let Some(headers) = response.headers_mut() {
|
||||
for (name, value) in response_headers.iter() {
|
||||
|
|
@ -651,7 +780,12 @@ async fn send_upstream(
|
|||
span_name,
|
||||
request_start_time,
|
||||
messages_for_signals,
|
||||
);
|
||||
)
|
||||
.with_llm_metrics(LlmMetricsCtx {
|
||||
provider: metric_provider.clone(),
|
||||
model: metric_model.clone(),
|
||||
upstream_status: upstream_status.as_u16(),
|
||||
});
|
||||
|
||||
let output_filter_request_headers = if filter_pipeline.has_output_filters() {
|
||||
Some(request_headers.clone())
|
||||
|
|
|
|||
|
|
@ -5,10 +5,24 @@ use hyper::StatusCode;
|
|||
use std::sync::Arc;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::router::llm::RouterService;
|
||||
use crate::metrics as bs_metrics;
|
||||
use crate::metrics::labels as metric_labels;
|
||||
use crate::router::orchestrator::OrchestratorService;
|
||||
use crate::streaming::truncate_message;
|
||||
use crate::tracing::routing;
|
||||
|
||||
/// Classify a request path (already stripped of `/agents` or `/routing` by
|
||||
/// the caller) into the fixed `route` label used on routing metrics.
|
||||
fn route_label_for_path(request_path: &str) -> &'static str {
|
||||
if request_path.starts_with("/agents") {
|
||||
metric_labels::ROUTE_AGENT
|
||||
} else if request_path.starts_with("/routing") {
|
||||
metric_labels::ROUTE_ROUTING
|
||||
} else {
|
||||
metric_labels::ROUTE_LLM
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RoutingResult {
|
||||
/// Primary model to use (first in the ranked list).
|
||||
pub model_name: String,
|
||||
|
|
@ -37,9 +51,8 @@ impl RoutingError {
|
|||
/// * `Ok(RoutingResult)` - Contains the selected model name and span ID
|
||||
/// * `Err(RoutingError)` - Contains error details and optional span ID
|
||||
pub async fn router_chat_get_upstream_model(
|
||||
router_service: Arc<RouterService>,
|
||||
orchestrator_service: Arc<OrchestratorService>,
|
||||
client_request: ProviderRequestType,
|
||||
traceparent: &str,
|
||||
request_path: &str,
|
||||
request_id: &str,
|
||||
inline_routing_preferences: Option<Vec<TopLevelRoutingPreference>>,
|
||||
|
|
@ -99,25 +112,31 @@ pub async fn router_chat_get_upstream_model(
|
|||
// Capture start time for routing span
|
||||
let routing_start_time = std::time::Instant::now();
|
||||
|
||||
// Attempt to determine route using the router service
|
||||
let routing_result = router_service
|
||||
let routing_result = orchestrator_service
|
||||
.determine_route(
|
||||
&chat_request.messages,
|
||||
traceparent,
|
||||
inline_routing_preferences,
|
||||
request_id,
|
||||
)
|
||||
.await;
|
||||
|
||||
let determination_ms = routing_start_time.elapsed().as_millis() as i64;
|
||||
let determination_elapsed = routing_start_time.elapsed();
|
||||
let determination_ms = determination_elapsed.as_millis() as i64;
|
||||
let current_span = tracing::Span::current();
|
||||
current_span.record(routing::ROUTE_DETERMINATION_MS, determination_ms);
|
||||
let route_label = route_label_for_path(request_path);
|
||||
|
||||
match routing_result {
|
||||
Ok(route) => match route {
|
||||
Some((route_name, ranked_models)) => {
|
||||
let model_name = ranked_models.first().cloned().unwrap_or_default();
|
||||
current_span.record("route.selected_model", model_name.as_str());
|
||||
bs_metrics::record_router_decision(
|
||||
route_label,
|
||||
&model_name,
|
||||
false,
|
||||
determination_elapsed,
|
||||
);
|
||||
Ok(RoutingResult {
|
||||
model_name,
|
||||
models: ranked_models,
|
||||
|
|
@ -129,6 +148,12 @@ pub async fn router_chat_get_upstream_model(
|
|||
// This signals to llm.rs to use the original validated request model
|
||||
current_span.record("route.selected_model", "none");
|
||||
info!("no route determined, using default model");
|
||||
bs_metrics::record_router_decision(
|
||||
route_label,
|
||||
"none",
|
||||
true,
|
||||
determination_elapsed,
|
||||
);
|
||||
|
||||
Ok(RoutingResult {
|
||||
model_name: "none".to_string(),
|
||||
|
|
@ -139,6 +164,7 @@ pub async fn router_chat_get_upstream_model(
|
|||
},
|
||||
Err(err) => {
|
||||
current_span.record("route.selected_model", "unknown");
|
||||
bs_metrics::record_router_decision(route_label, "unknown", true, determination_elapsed);
|
||||
Err(RoutingError::internal_error(format!(
|
||||
"Failed to determine route: {}",
|
||||
err
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
pub mod agents;
|
||||
pub mod debug;
|
||||
pub mod function_calling;
|
||||
pub mod llm;
|
||||
pub mod models;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use bytes::Bytes;
|
||||
use common::configuration::{SpanAttributes, TopLevelRoutingPreference};
|
||||
use common::consts::REQUEST_ID_HEADER;
|
||||
use common::consts::{MODEL_AFFINITY_HEADER, REQUEST_ID_HEADER};
|
||||
use common::errors::BrightStaffError;
|
||||
use hermesllm::clients::SupportedAPIsFromClient;
|
||||
use hermesllm::ProviderRequestType;
|
||||
|
|
@ -12,7 +12,9 @@ use tracing::{debug, info, info_span, warn, Instrument};
|
|||
|
||||
use super::extract_or_generate_traceparent;
|
||||
use crate::handlers::llm::model_selection::router_chat_get_upstream_model;
|
||||
use crate::router::llm::RouterService;
|
||||
use crate::metrics as bs_metrics;
|
||||
use crate::metrics::labels as metric_labels;
|
||||
use crate::router::orchestrator::OrchestratorService;
|
||||
use crate::tracing::{collect_custom_trace_attributes, operation_component, set_service_name};
|
||||
|
||||
/// Extracts `routing_preferences` from a JSON body, returning the cleaned body bytes
|
||||
|
|
@ -53,11 +55,14 @@ struct RoutingDecisionResponse {
|
|||
models: Vec<String>,
|
||||
route: Option<String>,
|
||||
trace_id: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
session_id: Option<String>,
|
||||
pinned: bool,
|
||||
}
|
||||
|
||||
pub async fn routing_decision(
|
||||
request: Request<hyper::body::Incoming>,
|
||||
router_service: Arc<RouterService>,
|
||||
orchestrator_service: Arc<OrchestratorService>,
|
||||
request_path: String,
|
||||
span_attributes: &Option<SpanAttributes>,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
|
|
@ -68,6 +73,17 @@ pub async fn routing_decision(
|
|||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| uuid::Uuid::new_v4().to_string());
|
||||
|
||||
let session_id: Option<String> = request_headers
|
||||
.get(MODEL_AFFINITY_HEADER)
|
||||
.and_then(|h| h.to_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let tenant_id: Option<String> = orchestrator_service
|
||||
.tenant_header()
|
||||
.and_then(|hdr| request_headers.get(hdr))
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let custom_attrs = collect_custom_trace_attributes(&request_headers, span_attributes.as_ref());
|
||||
|
||||
let request_span = info_span!(
|
||||
|
|
@ -80,23 +96,28 @@ pub async fn routing_decision(
|
|||
|
||||
routing_decision_inner(
|
||||
request,
|
||||
router_service,
|
||||
orchestrator_service,
|
||||
request_id,
|
||||
request_path,
|
||||
request_headers,
|
||||
custom_attrs,
|
||||
session_id,
|
||||
tenant_id,
|
||||
)
|
||||
.instrument(request_span)
|
||||
.await
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn routing_decision_inner(
|
||||
request: Request<hyper::body::Incoming>,
|
||||
router_service: Arc<RouterService>,
|
||||
orchestrator_service: Arc<OrchestratorService>,
|
||||
request_id: String,
|
||||
request_path: String,
|
||||
request_headers: hyper::HeaderMap,
|
||||
custom_attrs: std::collections::HashMap<String, String>,
|
||||
session_id: Option<String>,
|
||||
tenant_id: Option<String>,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
set_service_name(operation_component::ROUTING);
|
||||
opentelemetry::trace::get_active_span(|span| {
|
||||
|
|
@ -114,6 +135,36 @@ async fn routing_decision_inner(
|
|||
.unwrap_or("unknown")
|
||||
.to_string();
|
||||
|
||||
if let Some(ref sid) = session_id {
|
||||
if let Some(cached) = orchestrator_service
|
||||
.get_cached_route(sid, tenant_id.as_deref())
|
||||
.await
|
||||
{
|
||||
info!(
|
||||
session_id = %sid,
|
||||
model = %cached.model_name,
|
||||
route = ?cached.route_name,
|
||||
"returning pinned routing decision from cache"
|
||||
);
|
||||
let response = RoutingDecisionResponse {
|
||||
models: vec![cached.model_name],
|
||||
route: cached.route_name,
|
||||
trace_id,
|
||||
session_id: Some(sid.clone()),
|
||||
pinned: true,
|
||||
};
|
||||
let json = serde_json::to_string(&response).unwrap();
|
||||
let body = Full::new(Bytes::from(json))
|
||||
.map_err(|never| match never {})
|
||||
.boxed();
|
||||
return Ok(Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", "application/json")
|
||||
.body(body)
|
||||
.unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
// Parse request body
|
||||
let raw_bytes = request.collect().await?.to_bytes();
|
||||
|
||||
|
|
@ -152,9 +203,8 @@ async fn routing_decision_inner(
|
|||
};
|
||||
|
||||
let routing_result = router_chat_get_upstream_model(
|
||||
router_service,
|
||||
Arc::clone(&orchestrator_service),
|
||||
client_request,
|
||||
&traceparent,
|
||||
&request_path,
|
||||
&request_id,
|
||||
inline_routing_preferences,
|
||||
|
|
@ -163,12 +213,36 @@ async fn routing_decision_inner(
|
|||
|
||||
match routing_result {
|
||||
Ok(result) => {
|
||||
if let Some(ref sid) = session_id {
|
||||
orchestrator_service
|
||||
.cache_route(
|
||||
sid.clone(),
|
||||
tenant_id.as_deref(),
|
||||
result.model_name.clone(),
|
||||
result.route_name.clone(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
let response = RoutingDecisionResponse {
|
||||
models: result.models,
|
||||
route: result.route_name,
|
||||
trace_id,
|
||||
session_id,
|
||||
pinned: false,
|
||||
};
|
||||
|
||||
// Distinguish "decision served" (a concrete model picked) from
|
||||
// "no_candidates" (the sentinel "none" returned when nothing
|
||||
// matched). The handler still responds 200 in both cases, so RED
|
||||
// metrics alone can't tell them apart.
|
||||
let outcome = if response.models.first().map(|m| m == "none").unwrap_or(true) {
|
||||
metric_labels::ROUTING_SVC_NO_CANDIDATES
|
||||
} else {
|
||||
metric_labels::ROUTING_SVC_DECISION_SERVED
|
||||
};
|
||||
bs_metrics::record_routing_service_outcome(outcome);
|
||||
|
||||
info!(
|
||||
primary_model = %response.models.first().map(|s| s.as_str()).unwrap_or("none"),
|
||||
total_models = response.models.len(),
|
||||
|
|
@ -188,6 +262,7 @@ async fn routing_decision_inner(
|
|||
.unwrap())
|
||||
}
|
||||
Err(err) => {
|
||||
bs_metrics::record_routing_service_outcome(metric_labels::ROUTING_SVC_POLICY_ERROR);
|
||||
warn!(error = %err.message, "routing decision failed");
|
||||
Ok(BrightStaffError::InternalServerError(err.message).into_response())
|
||||
}
|
||||
|
|
@ -329,6 +404,8 @@ mod tests {
|
|||
],
|
||||
route: Some("code_generation".to_string()),
|
||||
trace_id: "abc123".to_string(),
|
||||
session_id: Some("sess-abc".to_string()),
|
||||
pinned: true,
|
||||
};
|
||||
let json = serde_json::to_string(&response).unwrap();
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).unwrap();
|
||||
|
|
@ -336,6 +413,8 @@ mod tests {
|
|||
assert_eq!(parsed["models"][1], "openai/gpt-4o");
|
||||
assert_eq!(parsed["route"], "code_generation");
|
||||
assert_eq!(parsed["trace_id"], "abc123");
|
||||
assert_eq!(parsed["session_id"], "sess-abc");
|
||||
assert_eq!(parsed["pinned"], true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
@ -344,10 +423,14 @@ mod tests {
|
|||
models: vec!["none".to_string()],
|
||||
route: None,
|
||||
trace_id: "abc123".to_string(),
|
||||
session_id: None,
|
||||
pinned: false,
|
||||
};
|
||||
let json = serde_json::to_string(&response).unwrap();
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(parsed["models"][0], "none");
|
||||
assert!(parsed["route"].is_null());
|
||||
assert!(parsed.get("session_id").is_none());
|
||||
assert_eq!(parsed["pinned"], false);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
pub mod app_state;
|
||||
pub mod handlers;
|
||||
pub mod metrics;
|
||||
pub mod router;
|
||||
pub mod session_cache;
|
||||
pub mod signals;
|
||||
pub mod state;
|
||||
pub mod streaming;
|
||||
|
|
|
|||
|
|
@ -1,13 +1,20 @@
|
|||
#[cfg(feature = "jemalloc")]
|
||||
#[global_allocator]
|
||||
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
|
||||
use brightstaff::app_state::AppState;
|
||||
use brightstaff::handlers::agents::orchestrator::agent_chat;
|
||||
use brightstaff::handlers::debug;
|
||||
use brightstaff::handlers::empty;
|
||||
use brightstaff::handlers::function_calling::function_calling_chat_handler;
|
||||
use brightstaff::handlers::llm::llm_chat;
|
||||
use brightstaff::handlers::models::list_models;
|
||||
use brightstaff::handlers::routing_service::routing_decision;
|
||||
use brightstaff::router::llm::RouterService;
|
||||
use brightstaff::metrics as bs_metrics;
|
||||
use brightstaff::metrics::labels as metric_labels;
|
||||
use brightstaff::router::model_metrics::ModelMetricsService;
|
||||
use brightstaff::router::orchestrator::OrchestratorService;
|
||||
use brightstaff::session_cache::init_session_cache;
|
||||
use brightstaff::state::memory::MemoryConversationalStorage;
|
||||
use brightstaff::state::postgresql::PostgreSQLConversationStorage;
|
||||
use brightstaff::state::StateStorage;
|
||||
|
|
@ -36,8 +43,6 @@ use tokio::sync::RwLock;
|
|||
use tracing::{debug, info, warn};
|
||||
|
||||
const BIND_ADDRESS: &str = "0.0.0.0:9091";
|
||||
const DEFAULT_ROUTING_LLM_PROVIDER: &str = "arch-router";
|
||||
const DEFAULT_ROUTING_MODEL_NAME: &str = "Arch-Router";
|
||||
const DEFAULT_ORCHESTRATOR_LLM_PROVIDER: &str = "plano-orchestrator";
|
||||
const DEFAULT_ORCHESTRATOR_MODEL_NAME: &str = "Plano-Orchestrator";
|
||||
|
||||
|
|
@ -160,19 +165,8 @@ async fn init_app_state(
|
|||
|
||||
let overrides = config.overrides.clone().unwrap_or_default();
|
||||
|
||||
let routing_model_name: String = overrides
|
||||
.llm_routing_model
|
||||
.as_deref()
|
||||
.map(|m| m.split_once('/').map(|(_, id)| id).unwrap_or(m))
|
||||
.unwrap_or(DEFAULT_ROUTING_MODEL_NAME)
|
||||
.to_string();
|
||||
|
||||
let routing_llm_provider = config
|
||||
.model_providers
|
||||
.iter()
|
||||
.find(|p| p.model.as_deref() == Some(routing_model_name.as_str()))
|
||||
.map(|p| p.name.clone())
|
||||
.unwrap_or_else(|| DEFAULT_ROUTING_LLM_PROVIDER.to_string());
|
||||
let session_ttl_seconds = config.routing.as_ref().and_then(|r| r.session_ttl_seconds);
|
||||
let session_cache = init_session_cache(config).await?;
|
||||
|
||||
// Validate that top-level routing_preferences requires v0.4.0+.
|
||||
let config_version = parse_semver(&config.version);
|
||||
|
|
@ -294,17 +288,17 @@ async fn init_app_state(
|
|||
}
|
||||
}
|
||||
|
||||
let router_service = Arc::new(RouterService::new(
|
||||
config.routing_preferences.clone(),
|
||||
metrics_service,
|
||||
format!("{llm_provider_url}{CHAT_COMPLETIONS_PATH}"),
|
||||
routing_model_name,
|
||||
routing_llm_provider,
|
||||
));
|
||||
let session_tenant_header = config
|
||||
.routing
|
||||
.as_ref()
|
||||
.and_then(|r| r.session_cache.as_ref())
|
||||
.and_then(|c| c.tenant_header.clone());
|
||||
|
||||
// Resolve model name: prefer llm_routing_model override, then agent_orchestration_model, then default.
|
||||
let orchestrator_model_name: String = overrides
|
||||
.agent_orchestration_model
|
||||
.llm_routing_model
|
||||
.as_deref()
|
||||
.or(overrides.agent_orchestration_model.as_deref())
|
||||
.map(|m| m.split_once('/').map(|(_, id)| id).unwrap_or(m))
|
||||
.unwrap_or(DEFAULT_ORCHESTRATOR_MODEL_NAME)
|
||||
.to_string();
|
||||
|
|
@ -316,10 +310,20 @@ async fn init_app_state(
|
|||
.map(|p| p.name.clone())
|
||||
.unwrap_or_else(|| DEFAULT_ORCHESTRATOR_LLM_PROVIDER.to_string());
|
||||
|
||||
let orchestrator_service = Arc::new(OrchestratorService::new(
|
||||
let orchestrator_max_tokens = overrides
|
||||
.orchestrator_model_context_length
|
||||
.unwrap_or(brightstaff::router::orchestrator_model_v1::MAX_TOKEN_LEN);
|
||||
|
||||
let orchestrator_service = Arc::new(OrchestratorService::with_routing(
|
||||
format!("{llm_provider_url}{CHAT_COMPLETIONS_PATH}"),
|
||||
orchestrator_model_name,
|
||||
orchestrator_llm_provider,
|
||||
config.routing_preferences.clone(),
|
||||
metrics_service,
|
||||
session_ttl_seconds,
|
||||
session_cache,
|
||||
session_tenant_header,
|
||||
orchestrator_max_tokens,
|
||||
));
|
||||
|
||||
let state_storage = init_state_storage(config).await?;
|
||||
|
|
@ -329,8 +333,9 @@ async fn init_app_state(
|
|||
.as_ref()
|
||||
.and_then(|tracing| tracing.span_attributes.clone());
|
||||
|
||||
let signals_enabled = !overrides.disable_signals.unwrap_or(false);
|
||||
|
||||
Ok(AppState {
|
||||
router_service,
|
||||
orchestrator_service,
|
||||
model_aliases: config.model_aliases.clone(),
|
||||
llm_providers: Arc::new(RwLock::new(llm_providers)),
|
||||
|
|
@ -341,6 +346,7 @@ async fn init_app_state(
|
|||
span_attributes,
|
||||
http_client: reqwest::Client::new(),
|
||||
filter_pipeline,
|
||||
signals_enabled,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -388,10 +394,79 @@ async fn init_state_storage(
|
|||
// Request routing
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Normalized method label — limited set so we never emit a free-form string.
|
||||
fn method_label(method: &Method) -> &'static str {
|
||||
match *method {
|
||||
Method::GET => "GET",
|
||||
Method::POST => "POST",
|
||||
Method::PUT => "PUT",
|
||||
Method::DELETE => "DELETE",
|
||||
Method::PATCH => "PATCH",
|
||||
Method::HEAD => "HEAD",
|
||||
Method::OPTIONS => "OPTIONS",
|
||||
_ => "OTHER",
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the fixed `handler` metric label from the request's path+method.
|
||||
/// Returning `None` for fall-through means `route()` will hand the request to
|
||||
/// the catch-all 404 branch.
|
||||
fn handler_label_for(method: &Method, path: &str) -> &'static str {
|
||||
if let Some(stripped) = path.strip_prefix("/agents") {
|
||||
if matches!(
|
||||
stripped,
|
||||
CHAT_COMPLETIONS_PATH | MESSAGES_PATH | OPENAI_RESPONSES_API_PATH
|
||||
) {
|
||||
return metric_labels::HANDLER_AGENT_CHAT;
|
||||
}
|
||||
}
|
||||
if let Some(stripped) = path.strip_prefix("/routing") {
|
||||
if matches!(
|
||||
stripped,
|
||||
CHAT_COMPLETIONS_PATH | MESSAGES_PATH | OPENAI_RESPONSES_API_PATH
|
||||
) {
|
||||
return metric_labels::HANDLER_ROUTING_DECISION;
|
||||
}
|
||||
}
|
||||
match (method, path) {
|
||||
(&Method::POST, CHAT_COMPLETIONS_PATH | MESSAGES_PATH | OPENAI_RESPONSES_API_PATH) => {
|
||||
metric_labels::HANDLER_LLM_CHAT
|
||||
}
|
||||
(&Method::POST, "/function_calling") => metric_labels::HANDLER_FUNCTION_CALLING,
|
||||
(&Method::GET, "/v1/models" | "/agents/v1/models") => metric_labels::HANDLER_LIST_MODELS,
|
||||
(&Method::OPTIONS, "/v1/models" | "/agents/v1/models") => {
|
||||
metric_labels::HANDLER_CORS_PREFLIGHT
|
||||
}
|
||||
_ => metric_labels::HANDLER_NOT_FOUND,
|
||||
}
|
||||
}
|
||||
|
||||
/// Route an incoming HTTP request to the appropriate handler.
|
||||
async fn route(
|
||||
req: Request<Incoming>,
|
||||
state: Arc<AppState>,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
let handler = handler_label_for(req.method(), req.uri().path());
|
||||
let method = method_label(req.method());
|
||||
let started = std::time::Instant::now();
|
||||
let _in_flight = bs_metrics::InFlightGuard::new(handler);
|
||||
|
||||
let result = dispatch(req, state).await;
|
||||
|
||||
let status = match &result {
|
||||
Ok(resp) => resp.status().as_u16(),
|
||||
// hyper::Error here means the body couldn't be produced; conventionally 500.
|
||||
Err(_) => 500,
|
||||
};
|
||||
bs_metrics::record_http(handler, method, status, started);
|
||||
result
|
||||
}
|
||||
|
||||
/// Inner dispatcher split out so `route()` can wrap it with metrics without
|
||||
/// duplicating the match tree.
|
||||
async fn dispatch(
|
||||
req: Request<Incoming>,
|
||||
state: Arc<AppState>,
|
||||
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
|
||||
let parent_cx = global::get_text_map_propagator(|p| p.extract(&HeaderExtractor(req.headers())));
|
||||
let path = req.uri().path().to_string();
|
||||
|
|
@ -417,7 +492,7 @@ async fn route(
|
|||
) {
|
||||
return routing_decision(
|
||||
req,
|
||||
Arc::clone(&state.router_service),
|
||||
Arc::clone(&state.orchestrator_service),
|
||||
stripped,
|
||||
&state.span_attributes,
|
||||
)
|
||||
|
|
@ -443,6 +518,7 @@ async fn route(
|
|||
Ok(list_models(Arc::clone(&state.llm_providers)).await)
|
||||
}
|
||||
(&Method::OPTIONS, "/v1/models" | "/agents/v1/models") => cors_preflight(),
|
||||
(&Method::GET, "/debug/memstats") => debug::memstats().await,
|
||||
_ => {
|
||||
debug!(method = %req.method(), path = %path, "no route found");
|
||||
let mut not_found = Response::new(empty());
|
||||
|
|
@ -507,6 +583,7 @@ async fn run_server(state: Arc<AppState>) -> Result<(), Box<dyn std::error::Erro
|
|||
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let config = load_config()?;
|
||||
let _tracer_provider = init_tracer(config.tracing.as_ref());
|
||||
bs_metrics::init();
|
||||
info!("loaded plano_config.yaml");
|
||||
let state = Arc::new(init_app_state(&config).await?);
|
||||
run_server(state).await
|
||||
|
|
|
|||
38
crates/brightstaff/src/metrics/labels.rs
Normal file
38
crates/brightstaff/src/metrics/labels.rs
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
//! Fixed label-value constants so callers never emit free-form strings
|
||||
//! (which would blow up cardinality).
|
||||
|
||||
// Handler enum — derived from the path+method match in `route()`.
|
||||
pub const HANDLER_AGENT_CHAT: &str = "agent_chat";
|
||||
pub const HANDLER_ROUTING_DECISION: &str = "routing_decision";
|
||||
pub const HANDLER_LLM_CHAT: &str = "llm_chat";
|
||||
pub const HANDLER_FUNCTION_CALLING: &str = "function_calling";
|
||||
pub const HANDLER_LIST_MODELS: &str = "list_models";
|
||||
pub const HANDLER_CORS_PREFLIGHT: &str = "cors_preflight";
|
||||
pub const HANDLER_NOT_FOUND: &str = "not_found";
|
||||
|
||||
// Router "route" class — which brightstaff endpoint prompted the decision.
|
||||
pub const ROUTE_AGENT: &str = "agent";
|
||||
pub const ROUTE_ROUTING: &str = "routing";
|
||||
pub const ROUTE_LLM: &str = "llm";
|
||||
|
||||
// Token kind for brightstaff_llm_tokens_total.
|
||||
pub const TOKEN_KIND_PROMPT: &str = "prompt";
|
||||
pub const TOKEN_KIND_COMPLETION: &str = "completion";
|
||||
|
||||
// LLM error_class values (match docstring in metrics/mod.rs).
|
||||
pub const LLM_ERR_NONE: &str = "none";
|
||||
pub const LLM_ERR_TIMEOUT: &str = "timeout";
|
||||
pub const LLM_ERR_CONNECT: &str = "connect";
|
||||
pub const LLM_ERR_PARSE: &str = "parse";
|
||||
pub const LLM_ERR_OTHER: &str = "other";
|
||||
pub const LLM_ERR_STREAM: &str = "stream";
|
||||
|
||||
// Routing service outcome values.
|
||||
pub const ROUTING_SVC_DECISION_SERVED: &str = "decision_served";
|
||||
pub const ROUTING_SVC_NO_CANDIDATES: &str = "no_candidates";
|
||||
pub const ROUTING_SVC_POLICY_ERROR: &str = "policy_error";
|
||||
|
||||
// Session cache outcome values.
|
||||
pub const SESSION_CACHE_HIT: &str = "hit";
|
||||
pub const SESSION_CACHE_MISS: &str = "miss";
|
||||
pub const SESSION_CACHE_STORE: &str = "store";
|
||||
377
crates/brightstaff/src/metrics/mod.rs
Normal file
377
crates/brightstaff/src/metrics/mod.rs
Normal file
|
|
@ -0,0 +1,377 @@
|
|||
//! Prometheus metrics for brightstaff.
|
||||
//!
|
||||
//! Installs the `metrics` global recorder backed by
|
||||
//! `metrics-exporter-prometheus` and exposes a `/metrics` HTTP endpoint on a
|
||||
//! dedicated admin port (default `0.0.0.0:9092`, overridable via
|
||||
//! `METRICS_BIND_ADDRESS`).
|
||||
//!
|
||||
//! Emitted metric families (see `describe_all` for full list):
|
||||
//! - HTTP RED: `brightstaff_http_requests_total`,
|
||||
//! `brightstaff_http_request_duration_seconds`,
|
||||
//! `brightstaff_http_in_flight_requests`.
|
||||
//! - LLM upstream: `brightstaff_llm_upstream_requests_total`,
|
||||
//! `brightstaff_llm_upstream_duration_seconds`,
|
||||
//! `brightstaff_llm_time_to_first_token_seconds`,
|
||||
//! `brightstaff_llm_tokens_total`,
|
||||
//! `brightstaff_llm_tokens_usage_missing_total`.
|
||||
//! - Routing: `brightstaff_router_decisions_total`,
|
||||
//! `brightstaff_router_decision_duration_seconds`,
|
||||
//! `brightstaff_routing_service_requests_total`,
|
||||
//! `brightstaff_session_cache_events_total`.
|
||||
//! - Process: via `metrics-process`.
|
||||
//! - Build: `brightstaff_build_info`.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::OnceLock;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use metrics::{counter, describe_counter, describe_gauge, describe_histogram, gauge, histogram};
|
||||
use metrics_exporter_prometheus::{Matcher, PrometheusBuilder};
|
||||
use tracing::{info, warn};
|
||||
|
||||
pub mod labels;
|
||||
|
||||
/// Guard flag so tests don't re-install the global recorder.
|
||||
static INIT: OnceLock<()> = OnceLock::new();
|
||||
|
||||
const DEFAULT_METRICS_BIND: &str = "0.0.0.0:9092";
|
||||
|
||||
/// HTTP request duration buckets (seconds). Capped at 60s.
|
||||
const HTTP_BUCKETS: &[f64] = &[
|
||||
0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 30.0, 60.0,
|
||||
];
|
||||
|
||||
/// LLM upstream / TTFT buckets (seconds). Capped at 120s because provider
|
||||
/// completions routinely run that long.
|
||||
const LLM_BUCKETS: &[f64] = &[0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 30.0, 60.0, 120.0];
|
||||
|
||||
/// Router decision buckets (seconds). The orchestrator call itself is usually
|
||||
/// sub-second but bucketed generously in case of upstream slowness.
|
||||
const ROUTER_BUCKETS: &[f64] = &[
|
||||
0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 30.0,
|
||||
];
|
||||
|
||||
/// Install the global recorder and spawn the `/metrics` HTTP listener.
|
||||
///
|
||||
/// Safe to call more than once; subsequent calls are no-ops so tests that
|
||||
/// construct their own recorder still work.
|
||||
pub fn init() {
|
||||
if INIT.get().is_some() {
|
||||
return;
|
||||
}
|
||||
|
||||
let bind: SocketAddr = std::env::var("METRICS_BIND_ADDRESS")
|
||||
.unwrap_or_else(|_| DEFAULT_METRICS_BIND.to_string())
|
||||
.parse()
|
||||
.unwrap_or_else(|err| {
|
||||
warn!(error = %err, default = DEFAULT_METRICS_BIND, "invalid METRICS_BIND_ADDRESS, falling back to default");
|
||||
DEFAULT_METRICS_BIND.parse().expect("default bind parses")
|
||||
});
|
||||
|
||||
let builder = PrometheusBuilder::new()
|
||||
.with_http_listener(bind)
|
||||
.set_buckets_for_metric(
|
||||
Matcher::Full("brightstaff_http_request_duration_seconds".to_string()),
|
||||
HTTP_BUCKETS,
|
||||
)
|
||||
.and_then(|b| {
|
||||
b.set_buckets_for_metric(Matcher::Prefix("brightstaff_llm_".to_string()), LLM_BUCKETS)
|
||||
})
|
||||
.and_then(|b| {
|
||||
b.set_buckets_for_metric(
|
||||
Matcher::Full("brightstaff_router_decision_duration_seconds".to_string()),
|
||||
ROUTER_BUCKETS,
|
||||
)
|
||||
});
|
||||
|
||||
let builder = match builder {
|
||||
Ok(b) => b,
|
||||
Err(err) => {
|
||||
warn!(error = %err, "failed to configure metrics buckets, using defaults");
|
||||
PrometheusBuilder::new().with_http_listener(bind)
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(err) = builder.install() {
|
||||
warn!(error = %err, "failed to install Prometheus recorder; metrics disabled");
|
||||
return;
|
||||
}
|
||||
|
||||
let _ = INIT.set(());
|
||||
|
||||
describe_all();
|
||||
emit_build_info();
|
||||
|
||||
// Register process-level collector (RSS, CPU, FDs).
|
||||
let collector = metrics_process::Collector::default();
|
||||
collector.describe();
|
||||
// Prime once at startup; subsequent scrapes refresh via the exporter's
|
||||
// per-scrape render, so we additionally refresh on a short interval to
|
||||
// keep gauges moving between scrapes without requiring client pull.
|
||||
collector.collect();
|
||||
tokio::spawn(async move {
|
||||
let mut tick = tokio::time::interval(Duration::from_secs(10));
|
||||
tick.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
|
||||
loop {
|
||||
tick.tick().await;
|
||||
collector.collect();
|
||||
}
|
||||
});
|
||||
|
||||
info!(address = %bind, "metrics listener started");
|
||||
}
|
||||
|
||||
fn describe_all() {
|
||||
describe_counter!(
|
||||
"brightstaff_http_requests_total",
|
||||
"Total HTTP requests served by brightstaff, by handler and status class."
|
||||
);
|
||||
describe_histogram!(
|
||||
"brightstaff_http_request_duration_seconds",
|
||||
"Wall-clock duration of HTTP requests served by brightstaff, by handler."
|
||||
);
|
||||
describe_gauge!(
|
||||
"brightstaff_http_in_flight_requests",
|
||||
"Number of HTTP requests currently being served by brightstaff, by handler."
|
||||
);
|
||||
|
||||
describe_counter!(
|
||||
"brightstaff_llm_upstream_requests_total",
|
||||
"LLM upstream request outcomes, by provider, model, status class and error class."
|
||||
);
|
||||
describe_histogram!(
|
||||
"brightstaff_llm_upstream_duration_seconds",
|
||||
"Wall-clock duration of LLM upstream calls (stream close for streaming), by provider and model."
|
||||
);
|
||||
describe_histogram!(
|
||||
"brightstaff_llm_time_to_first_token_seconds",
|
||||
"Time from request start to first streamed byte, by provider and model (streaming only)."
|
||||
);
|
||||
describe_counter!(
|
||||
"brightstaff_llm_tokens_total",
|
||||
"Tokens reported in the provider `usage` field, by provider, model and kind (prompt/completion)."
|
||||
);
|
||||
describe_counter!(
|
||||
"brightstaff_llm_tokens_usage_missing_total",
|
||||
"LLM responses that completed without a usable `usage` block (so token counts are unknown)."
|
||||
);
|
||||
|
||||
describe_counter!(
|
||||
"brightstaff_router_decisions_total",
|
||||
"Routing decisions made by the orchestrator, by route, selected model, and whether a fallback was used."
|
||||
);
|
||||
describe_histogram!(
|
||||
"brightstaff_router_decision_duration_seconds",
|
||||
"Time spent in the orchestrator deciding a route, by route."
|
||||
);
|
||||
describe_counter!(
|
||||
"brightstaff_routing_service_requests_total",
|
||||
"Outcomes of /routing/* decision requests: decision_served, no_candidates, policy_error."
|
||||
);
|
||||
describe_counter!(
|
||||
"brightstaff_session_cache_events_total",
|
||||
"Session affinity cache lookups and stores, by outcome."
|
||||
);
|
||||
|
||||
describe_gauge!(
|
||||
"brightstaff_build_info",
|
||||
"Build metadata. Always 1; labels carry version and git SHA."
|
||||
);
|
||||
}
|
||||
|
||||
fn emit_build_info() {
|
||||
let version = env!("CARGO_PKG_VERSION");
|
||||
let git_sha = option_env!("GIT_SHA").unwrap_or("unknown");
|
||||
gauge!(
|
||||
"brightstaff_build_info",
|
||||
"version" => version.to_string(),
|
||||
"git_sha" => git_sha.to_string(),
|
||||
)
|
||||
.set(1.0);
|
||||
}
|
||||
|
||||
/// Split a provider-qualified model id like `"openai/gpt-4o"` into
|
||||
/// `(provider, model)`. Returns `("unknown", raw)` when there is no `/`.
|
||||
pub fn split_provider_model(full: &str) -> (&str, &str) {
|
||||
match full.split_once('/') {
|
||||
Some((p, m)) => (p, m),
|
||||
None => ("unknown", full),
|
||||
}
|
||||
}
|
||||
|
||||
/// Bucket an HTTP status code into `"2xx"` / `"4xx"` / `"5xx"` / `"1xx"` / `"3xx"`.
|
||||
pub fn status_class(status: u16) -> &'static str {
|
||||
match status {
|
||||
100..=199 => "1xx",
|
||||
200..=299 => "2xx",
|
||||
300..=399 => "3xx",
|
||||
400..=499 => "4xx",
|
||||
500..=599 => "5xx",
|
||||
_ => "other",
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// HTTP RED helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// RAII guard that increments the in-flight gauge on construction and
|
||||
/// decrements on drop. Pair with [`HttpTimer`] in the `route()` wrapper so the
|
||||
/// gauge drops even on error paths.
|
||||
pub struct InFlightGuard {
|
||||
handler: &'static str,
|
||||
}
|
||||
|
||||
impl InFlightGuard {
|
||||
pub fn new(handler: &'static str) -> Self {
|
||||
gauge!(
|
||||
"brightstaff_http_in_flight_requests",
|
||||
"handler" => handler,
|
||||
)
|
||||
.increment(1.0);
|
||||
Self { handler }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for InFlightGuard {
|
||||
fn drop(&mut self) {
|
||||
gauge!(
|
||||
"brightstaff_http_in_flight_requests",
|
||||
"handler" => self.handler,
|
||||
)
|
||||
.decrement(1.0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Record the HTTP request counter + duration histogram.
|
||||
pub fn record_http(handler: &'static str, method: &'static str, status: u16, started: Instant) {
|
||||
let class = status_class(status);
|
||||
counter!(
|
||||
"brightstaff_http_requests_total",
|
||||
"handler" => handler,
|
||||
"method" => method,
|
||||
"status_class" => class,
|
||||
)
|
||||
.increment(1);
|
||||
histogram!(
|
||||
"brightstaff_http_request_duration_seconds",
|
||||
"handler" => handler,
|
||||
)
|
||||
.record(started.elapsed().as_secs_f64());
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// LLM upstream helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Classify an outcome of an LLM upstream call for the `error_class` label.
|
||||
pub fn llm_error_class_from_reqwest(err: &reqwest::Error) -> &'static str {
|
||||
if err.is_timeout() {
|
||||
"timeout"
|
||||
} else if err.is_connect() {
|
||||
"connect"
|
||||
} else if err.is_decode() {
|
||||
"parse"
|
||||
} else {
|
||||
"other"
|
||||
}
|
||||
}
|
||||
|
||||
/// Record the outcome of an LLM upstream call. `status` is the HTTP status
|
||||
/// the upstream returned (0 if the call never produced one, e.g. send failure).
|
||||
/// `error_class` is `"none"` on success, or a discriminated error label.
|
||||
pub fn record_llm_upstream(
|
||||
provider: &str,
|
||||
model: &str,
|
||||
status: u16,
|
||||
error_class: &str,
|
||||
duration: Duration,
|
||||
) {
|
||||
let class = if status == 0 {
|
||||
"error"
|
||||
} else {
|
||||
status_class(status)
|
||||
};
|
||||
counter!(
|
||||
"brightstaff_llm_upstream_requests_total",
|
||||
"provider" => provider.to_string(),
|
||||
"model" => model.to_string(),
|
||||
"status_class" => class,
|
||||
"error_class" => error_class.to_string(),
|
||||
)
|
||||
.increment(1);
|
||||
histogram!(
|
||||
"brightstaff_llm_upstream_duration_seconds",
|
||||
"provider" => provider.to_string(),
|
||||
"model" => model.to_string(),
|
||||
)
|
||||
.record(duration.as_secs_f64());
|
||||
}
|
||||
|
||||
pub fn record_llm_ttft(provider: &str, model: &str, ttft: Duration) {
|
||||
histogram!(
|
||||
"brightstaff_llm_time_to_first_token_seconds",
|
||||
"provider" => provider.to_string(),
|
||||
"model" => model.to_string(),
|
||||
)
|
||||
.record(ttft.as_secs_f64());
|
||||
}
|
||||
|
||||
pub fn record_llm_tokens(provider: &str, model: &str, kind: &'static str, count: u64) {
|
||||
counter!(
|
||||
"brightstaff_llm_tokens_total",
|
||||
"provider" => provider.to_string(),
|
||||
"model" => model.to_string(),
|
||||
"kind" => kind,
|
||||
)
|
||||
.increment(count);
|
||||
}
|
||||
|
||||
pub fn record_llm_tokens_usage_missing(provider: &str, model: &str) {
|
||||
counter!(
|
||||
"brightstaff_llm_tokens_usage_missing_total",
|
||||
"provider" => provider.to_string(),
|
||||
"model" => model.to_string(),
|
||||
)
|
||||
.increment(1);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Router helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
pub fn record_router_decision(
|
||||
route: &'static str,
|
||||
selected_model: &str,
|
||||
fallback: bool,
|
||||
duration: Duration,
|
||||
) {
|
||||
counter!(
|
||||
"brightstaff_router_decisions_total",
|
||||
"route" => route,
|
||||
"selected_model" => selected_model.to_string(),
|
||||
"fallback" => if fallback { "true" } else { "false" },
|
||||
)
|
||||
.increment(1);
|
||||
histogram!(
|
||||
"brightstaff_router_decision_duration_seconds",
|
||||
"route" => route,
|
||||
)
|
||||
.record(duration.as_secs_f64());
|
||||
}
|
||||
|
||||
pub fn record_routing_service_outcome(outcome: &'static str) {
|
||||
counter!(
|
||||
"brightstaff_routing_service_requests_total",
|
||||
"outcome" => outcome,
|
||||
)
|
||||
.increment(1);
|
||||
}
|
||||
|
||||
pub fn record_session_cache_event(outcome: &'static str) {
|
||||
counter!(
|
||||
"brightstaff_session_cache_events_total",
|
||||
"outcome" => outcome,
|
||||
)
|
||||
.increment(1);
|
||||
}
|
||||
|
|
@ -1,8 +1,14 @@
|
|||
use hermesllm::apis::openai::ChatCompletionsResponse;
|
||||
use hyper::header;
|
||||
use serde::Deserialize;
|
||||
use thiserror::Error;
|
||||
use tracing::warn;
|
||||
|
||||
/// Max bytes of raw upstream body we include in a log message or error text
|
||||
/// when the body is not a recognizable error envelope. Keeps logs from being
|
||||
/// flooded by huge HTML error pages.
|
||||
const RAW_BODY_LOG_LIMIT: usize = 512;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum HttpError {
|
||||
#[error("Failed to send request: {0}")]
|
||||
|
|
@ -10,13 +16,64 @@ pub enum HttpError {
|
|||
|
||||
#[error("Failed to parse JSON response: {0}")]
|
||||
Json(serde_json::Error, String),
|
||||
|
||||
#[error("Upstream returned {status}: {message}")]
|
||||
Upstream { status: u16, message: String },
|
||||
}
|
||||
|
||||
/// Shape of an OpenAI-style error response body, e.g.
|
||||
/// `{"error": {"message": "...", "type": "...", "param": "...", "code": ...}}`.
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct UpstreamErrorEnvelope {
|
||||
error: UpstreamErrorBody,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct UpstreamErrorBody {
|
||||
message: String,
|
||||
#[serde(default, rename = "type")]
|
||||
err_type: Option<String>,
|
||||
#[serde(default)]
|
||||
param: Option<String>,
|
||||
}
|
||||
|
||||
/// Extract a human-readable error message from an upstream response body.
|
||||
/// Tries to parse an OpenAI-style `{"error": {"message": ...}}` envelope; if
|
||||
/// that fails, falls back to the first `RAW_BODY_LOG_LIMIT` bytes of the raw
|
||||
/// body (UTF-8 safe).
|
||||
fn extract_upstream_error_message(body: &str) -> String {
|
||||
if let Ok(env) = serde_json::from_str::<UpstreamErrorEnvelope>(body) {
|
||||
let mut msg = env.error.message;
|
||||
if let Some(param) = env.error.param {
|
||||
msg.push_str(&format!(" (param={param})"));
|
||||
}
|
||||
if let Some(err_type) = env.error.err_type {
|
||||
msg.push_str(&format!(" [type={err_type}]"));
|
||||
}
|
||||
return msg;
|
||||
}
|
||||
truncate_for_log(body).to_string()
|
||||
}
|
||||
|
||||
fn truncate_for_log(s: &str) -> &str {
|
||||
if s.len() <= RAW_BODY_LOG_LIMIT {
|
||||
return s;
|
||||
}
|
||||
let mut end = RAW_BODY_LOG_LIMIT;
|
||||
while end > 0 && !s.is_char_boundary(end) {
|
||||
end -= 1;
|
||||
}
|
||||
&s[..end]
|
||||
}
|
||||
|
||||
/// Sends a POST request to the given URL and extracts the text content
|
||||
/// from the first choice of the `ChatCompletionsResponse`.
|
||||
///
|
||||
/// Returns `Some((content, elapsed))` on success, or `None` if the response
|
||||
/// had no choices or the first choice had no content.
|
||||
/// Returns `Some((content, elapsed))` on success, `None` if the response
|
||||
/// had no choices or the first choice had no content. Returns
|
||||
/// `HttpError::Upstream` for any non-2xx status, carrying a message
|
||||
/// extracted from the OpenAI-style error envelope (or a truncated raw body
|
||||
/// if the body is not in that shape).
|
||||
pub async fn post_and_extract_content(
|
||||
client: &reqwest::Client,
|
||||
url: &str,
|
||||
|
|
@ -26,17 +83,36 @@ pub async fn post_and_extract_content(
|
|||
let start_time = std::time::Instant::now();
|
||||
|
||||
let res = client.post(url).headers(headers).body(body).send().await?;
|
||||
let status = res.status();
|
||||
|
||||
let body = res.text().await?;
|
||||
let elapsed = start_time.elapsed();
|
||||
|
||||
if !status.is_success() {
|
||||
let message = extract_upstream_error_message(&body);
|
||||
warn!(
|
||||
status = status.as_u16(),
|
||||
message = %message,
|
||||
body_size = body.len(),
|
||||
"upstream returned error response"
|
||||
);
|
||||
return Err(HttpError::Upstream {
|
||||
status: status.as_u16(),
|
||||
message,
|
||||
});
|
||||
}
|
||||
|
||||
let response: ChatCompletionsResponse = serde_json::from_str(&body).map_err(|err| {
|
||||
warn!(error = %err, body = %body, "failed to parse json response");
|
||||
warn!(
|
||||
error = %err,
|
||||
body = %truncate_for_log(&body),
|
||||
"failed to parse json response",
|
||||
);
|
||||
HttpError::Json(err, format!("Failed to parse JSON: {}", body))
|
||||
})?;
|
||||
|
||||
if response.choices.is_empty() {
|
||||
warn!(body = %body, "no choices in response");
|
||||
warn!(body = %truncate_for_log(&body), "no choices in response");
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
|
|
@ -46,3 +122,52 @@ pub async fn post_and_extract_content(
|
|||
.as_ref()
|
||||
.map(|c| (c.clone(), elapsed)))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn extracts_message_from_openai_style_error_envelope() {
|
||||
let body = r#"{"error":{"code":400,"message":"This model's maximum context length is 32768 tokens. However, you requested 0 output tokens and your prompt contains at least 32769 input tokens, for a total of at least 32769 tokens.","param":"input_tokens","type":"BadRequestError"}}"#;
|
||||
let msg = extract_upstream_error_message(body);
|
||||
assert!(
|
||||
msg.starts_with("This model's maximum context length is 32768 tokens."),
|
||||
"unexpected message: {msg}"
|
||||
);
|
||||
assert!(msg.contains("(param=input_tokens)"));
|
||||
assert!(msg.contains("[type=BadRequestError]"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extracts_message_without_optional_fields() {
|
||||
let body = r#"{"error":{"message":"something broke"}}"#;
|
||||
let msg = extract_upstream_error_message(body);
|
||||
assert_eq!(msg, "something broke");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn falls_back_to_raw_body_when_not_error_envelope() {
|
||||
let body = "<html><body>502 Bad Gateway</body></html>";
|
||||
let msg = extract_upstream_error_message(body);
|
||||
assert_eq!(msg, body);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncates_non_envelope_bodies_in_logs() {
|
||||
let body = "x".repeat(RAW_BODY_LOG_LIMIT * 3);
|
||||
let msg = extract_upstream_error_message(&body);
|
||||
assert_eq!(msg.len(), RAW_BODY_LOG_LIMIT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncate_for_log_respects_utf8_boundaries() {
|
||||
// 2-byte characters; picking a length that would split mid-char.
|
||||
let body = "é".repeat(RAW_BODY_LOG_LIMIT);
|
||||
let out = truncate_for_log(&body);
|
||||
// Should be a valid &str (implicit — would panic if we returned
|
||||
// a non-boundary slice) and at most RAW_BODY_LOG_LIMIT bytes.
|
||||
assert!(out.len() <= RAW_BODY_LOG_LIMIT);
|
||||
assert!(out.chars().all(|c| c == 'é'));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,200 +0,0 @@
|
|||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use common::{
|
||||
configuration::TopLevelRoutingPreference,
|
||||
consts::{ARCH_PROVIDER_HINT_HEADER, REQUEST_ID_HEADER, TRACE_PARENT_HEADER},
|
||||
};
|
||||
|
||||
use super::router_model::{ModelUsagePreference, RoutingPreference};
|
||||
use hermesllm::apis::openai::Message;
|
||||
use hyper::header;
|
||||
use thiserror::Error;
|
||||
use tracing::{debug, info};
|
||||
|
||||
use super::http::{self, post_and_extract_content};
|
||||
use super::model_metrics::ModelMetricsService;
|
||||
use super::router_model::RouterModel;
|
||||
|
||||
use crate::router::router_model_v1;
|
||||
|
||||
pub struct RouterService {
|
||||
router_url: String,
|
||||
client: reqwest::Client,
|
||||
router_model: Arc<dyn RouterModel>,
|
||||
routing_provider_name: String,
|
||||
top_level_preferences: HashMap<String, TopLevelRoutingPreference>,
|
||||
metrics_service: Option<Arc<ModelMetricsService>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum RoutingError {
|
||||
#[error(transparent)]
|
||||
Http(#[from] http::HttpError),
|
||||
|
||||
#[error("Router model error: {0}")]
|
||||
RouterModelError(#[from] super::router_model::RoutingModelError),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, RoutingError>;
|
||||
|
||||
impl RouterService {
|
||||
pub fn new(
|
||||
top_level_prefs: Option<Vec<TopLevelRoutingPreference>>,
|
||||
metrics_service: Option<Arc<ModelMetricsService>>,
|
||||
router_url: String,
|
||||
routing_model_name: String,
|
||||
routing_provider_name: String,
|
||||
) -> Self {
|
||||
let top_level_preferences: HashMap<String, TopLevelRoutingPreference> = top_level_prefs
|
||||
.map_or_else(HashMap::new, |prefs| {
|
||||
prefs.into_iter().map(|p| (p.name.clone(), p)).collect()
|
||||
});
|
||||
|
||||
// Build sentinel routes for RouterModelV1: route_name → first model.
|
||||
// RouterModelV1 uses this to build its prompt; RouterService overrides
|
||||
// the model selection via rank_models() after the route is determined.
|
||||
let sentinel_routes: HashMap<String, Vec<RoutingPreference>> = top_level_preferences
|
||||
.iter()
|
||||
.filter_map(|(name, pref)| {
|
||||
pref.models.first().map(|first_model| {
|
||||
(
|
||||
first_model.clone(),
|
||||
vec![RoutingPreference {
|
||||
name: name.clone(),
|
||||
description: pref.description.clone(),
|
||||
}],
|
||||
)
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let router_model = Arc::new(router_model_v1::RouterModelV1::new(
|
||||
sentinel_routes,
|
||||
routing_model_name,
|
||||
router_model_v1::MAX_TOKEN_LEN,
|
||||
));
|
||||
|
||||
RouterService {
|
||||
router_url,
|
||||
client: reqwest::Client::new(),
|
||||
router_model,
|
||||
routing_provider_name,
|
||||
top_level_preferences,
|
||||
metrics_service,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn determine_route(
|
||||
&self,
|
||||
messages: &[Message],
|
||||
traceparent: &str,
|
||||
inline_routing_preferences: Option<Vec<TopLevelRoutingPreference>>,
|
||||
request_id: &str,
|
||||
) -> Result<Option<(String, Vec<String>)>> {
|
||||
if messages.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Build inline top-level map from request if present (inline overrides config).
|
||||
let inline_top_map: Option<HashMap<String, TopLevelRoutingPreference>> =
|
||||
inline_routing_preferences
|
||||
.map(|prefs| prefs.into_iter().map(|p| (p.name.clone(), p)).collect());
|
||||
|
||||
// No routing defined — skip the router call entirely.
|
||||
if inline_top_map.is_none() && self.top_level_preferences.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// For inline overrides, build synthetic ModelUsagePreference list so RouterModelV1
|
||||
// generates the correct prompt (route name + description pairs).
|
||||
// For config-level prefs the sentinel routes are already baked into RouterModelV1.
|
||||
let effective_usage_preferences: Option<Vec<ModelUsagePreference>> =
|
||||
inline_top_map.as_ref().map(|inline_map| {
|
||||
inline_map
|
||||
.values()
|
||||
.map(|p| ModelUsagePreference {
|
||||
model: p.models.first().cloned().unwrap_or_default(),
|
||||
routing_preferences: vec![RoutingPreference {
|
||||
name: p.name.clone(),
|
||||
description: p.description.clone(),
|
||||
}],
|
||||
})
|
||||
.collect()
|
||||
});
|
||||
|
||||
let router_request = self
|
||||
.router_model
|
||||
.generate_request(messages, &effective_usage_preferences);
|
||||
|
||||
debug!(
|
||||
model = %self.router_model.get_model_name(),
|
||||
endpoint = %self.router_url,
|
||||
"sending request to arch-router"
|
||||
);
|
||||
|
||||
let body = serde_json::to_string(&router_request)
|
||||
.map_err(super::router_model::RoutingModelError::from)?;
|
||||
debug!(body = %body, "arch router request");
|
||||
|
||||
let mut headers = header::HeaderMap::new();
|
||||
headers.insert(
|
||||
header::CONTENT_TYPE,
|
||||
header::HeaderValue::from_static("application/json"),
|
||||
);
|
||||
if let Ok(val) = header::HeaderValue::from_str(&self.routing_provider_name) {
|
||||
headers.insert(
|
||||
header::HeaderName::from_static(ARCH_PROVIDER_HINT_HEADER),
|
||||
val,
|
||||
);
|
||||
}
|
||||
if let Ok(val) = header::HeaderValue::from_str(traceparent) {
|
||||
headers.insert(header::HeaderName::from_static(TRACE_PARENT_HEADER), val);
|
||||
}
|
||||
if let Ok(val) = header::HeaderValue::from_str(request_id) {
|
||||
headers.insert(header::HeaderName::from_static(REQUEST_ID_HEADER), val);
|
||||
}
|
||||
headers.insert(
|
||||
header::HeaderName::from_static("model"),
|
||||
header::HeaderValue::from_static("arch-router"),
|
||||
);
|
||||
|
||||
let Some((content, elapsed)) =
|
||||
post_and_extract_content(&self.client, &self.router_url, headers, body).await?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Parse the route name from the router response.
|
||||
let parsed = self
|
||||
.router_model
|
||||
.parse_response(&content, &effective_usage_preferences)?;
|
||||
|
||||
let result = if let Some((route_name, _sentinel)) = parsed {
|
||||
let top_pref = inline_top_map
|
||||
.as_ref()
|
||||
.and_then(|m| m.get(&route_name))
|
||||
.or_else(|| self.top_level_preferences.get(&route_name));
|
||||
|
||||
if let Some(pref) = top_pref {
|
||||
let ranked = match &self.metrics_service {
|
||||
Some(svc) => svc.rank_models(&pref.models, &pref.selection_policy).await,
|
||||
None => pref.models.clone(),
|
||||
};
|
||||
Some((route_name, ranked))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
info!(
|
||||
content = %content.replace("\n", "\\n"),
|
||||
selected_model = ?result,
|
||||
response_time_ms = elapsed.as_millis(),
|
||||
"arch-router determined route"
|
||||
);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,8 +1,7 @@
|
|||
pub(crate) mod http;
|
||||
pub mod llm;
|
||||
pub mod model_metrics;
|
||||
pub mod orchestrator;
|
||||
pub mod orchestrator_model;
|
||||
pub mod orchestrator_model_v1;
|
||||
pub mod router_model;
|
||||
pub mod router_model_v1;
|
||||
#[cfg(test)]
|
||||
mod stress_tests;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
use std::{collections::HashMap, sync::Arc};
|
||||
use std::{borrow::Cow, collections::HashMap, sync::Arc, time::Duration};
|
||||
|
||||
use common::{
|
||||
configuration::{AgentUsagePreference, OrchestrationPreference},
|
||||
configuration::{AgentUsagePreference, OrchestrationPreference, TopLevelRoutingPreference},
|
||||
consts::{ARCH_PROVIDER_HINT_HEADER, REQUEST_ID_HEADER},
|
||||
};
|
||||
use hermesllm::apis::openai::Message;
|
||||
|
|
@ -12,15 +12,28 @@ use thiserror::Error;
|
|||
use tracing::{debug, info};
|
||||
|
||||
use super::http::{self, post_and_extract_content};
|
||||
use super::model_metrics::ModelMetricsService;
|
||||
use super::orchestrator_model::OrchestratorModel;
|
||||
|
||||
use crate::metrics as bs_metrics;
|
||||
use crate::metrics::labels as metric_labels;
|
||||
use crate::router::orchestrator_model_v1;
|
||||
use crate::session_cache::SessionCache;
|
||||
|
||||
pub use crate::session_cache::CachedRoute;
|
||||
|
||||
const DEFAULT_SESSION_TTL_SECONDS: u64 = 600;
|
||||
|
||||
pub struct OrchestratorService {
|
||||
orchestrator_url: String,
|
||||
client: reqwest::Client,
|
||||
orchestrator_model: Arc<dyn OrchestratorModel>,
|
||||
orchestrator_provider_name: String,
|
||||
top_level_preferences: HashMap<String, TopLevelRoutingPreference>,
|
||||
metrics_service: Option<Arc<ModelMetricsService>>,
|
||||
session_cache: Option<Arc<dyn SessionCache>>,
|
||||
session_ttl: Duration,
|
||||
tenant_header: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
|
|
@ -39,13 +52,12 @@ impl OrchestratorService {
|
|||
orchestrator_url: String,
|
||||
orchestration_model_name: String,
|
||||
orchestrator_provider_name: String,
|
||||
max_token_length: usize,
|
||||
) -> Self {
|
||||
let agent_orchestrations: HashMap<String, Vec<OrchestrationPreference>> = HashMap::new();
|
||||
|
||||
let orchestrator_model = Arc::new(orchestrator_model_v1::OrchestratorModelV1::new(
|
||||
agent_orchestrations,
|
||||
orchestration_model_name.clone(),
|
||||
orchestrator_model_v1::MAX_TOKEN_LEN,
|
||||
HashMap::new(),
|
||||
orchestration_model_name,
|
||||
max_token_length,
|
||||
));
|
||||
|
||||
OrchestratorService {
|
||||
|
|
@ -53,9 +65,189 @@ impl OrchestratorService {
|
|||
client: reqwest::Client::new(),
|
||||
orchestrator_model,
|
||||
orchestrator_provider_name,
|
||||
top_level_preferences: HashMap::new(),
|
||||
metrics_service: None,
|
||||
session_cache: None,
|
||||
session_ttl: Duration::from_secs(DEFAULT_SESSION_TTL_SECONDS),
|
||||
tenant_header: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn with_routing(
|
||||
orchestrator_url: String,
|
||||
orchestration_model_name: String,
|
||||
orchestrator_provider_name: String,
|
||||
top_level_prefs: Option<Vec<TopLevelRoutingPreference>>,
|
||||
metrics_service: Option<Arc<ModelMetricsService>>,
|
||||
session_ttl_seconds: Option<u64>,
|
||||
session_cache: Arc<dyn SessionCache>,
|
||||
tenant_header: Option<String>,
|
||||
max_token_length: usize,
|
||||
) -> Self {
|
||||
let top_level_preferences: HashMap<String, TopLevelRoutingPreference> = top_level_prefs
|
||||
.map_or_else(HashMap::new, |prefs| {
|
||||
prefs.into_iter().map(|p| (p.name.clone(), p)).collect()
|
||||
});
|
||||
|
||||
let orchestrator_model = Arc::new(orchestrator_model_v1::OrchestratorModelV1::new(
|
||||
HashMap::new(),
|
||||
orchestration_model_name,
|
||||
max_token_length,
|
||||
));
|
||||
|
||||
let session_ttl =
|
||||
Duration::from_secs(session_ttl_seconds.unwrap_or(DEFAULT_SESSION_TTL_SECONDS));
|
||||
|
||||
OrchestratorService {
|
||||
orchestrator_url,
|
||||
client: reqwest::Client::new(),
|
||||
orchestrator_model,
|
||||
orchestrator_provider_name,
|
||||
top_level_preferences,
|
||||
metrics_service,
|
||||
session_cache: Some(session_cache),
|
||||
session_ttl,
|
||||
tenant_header,
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Session cache methods ----
|
||||
|
||||
#[must_use]
|
||||
pub fn tenant_header(&self) -> Option<&str> {
|
||||
self.tenant_header.as_deref()
|
||||
}
|
||||
|
||||
fn session_key<'a>(tenant_id: Option<&str>, session_id: &'a str) -> Cow<'a, str> {
|
||||
match tenant_id {
|
||||
Some(t) => Cow::Owned(format!("{t}:{session_id}")),
|
||||
None => Cow::Borrowed(session_id),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_cached_route(
|
||||
&self,
|
||||
session_id: &str,
|
||||
tenant_id: Option<&str>,
|
||||
) -> Option<CachedRoute> {
|
||||
let cache = self.session_cache.as_ref()?;
|
||||
let result = cache.get(&Self::session_key(tenant_id, session_id)).await;
|
||||
bs_metrics::record_session_cache_event(if result.is_some() {
|
||||
metric_labels::SESSION_CACHE_HIT
|
||||
} else {
|
||||
metric_labels::SESSION_CACHE_MISS
|
||||
});
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn cache_route(
|
||||
&self,
|
||||
session_id: String,
|
||||
tenant_id: Option<&str>,
|
||||
model_name: String,
|
||||
route_name: Option<String>,
|
||||
) {
|
||||
if let Some(ref cache) = self.session_cache {
|
||||
cache
|
||||
.put(
|
||||
&Self::session_key(tenant_id, &session_id),
|
||||
CachedRoute {
|
||||
model_name,
|
||||
route_name,
|
||||
},
|
||||
self.session_ttl,
|
||||
)
|
||||
.await;
|
||||
bs_metrics::record_session_cache_event(metric_labels::SESSION_CACHE_STORE);
|
||||
}
|
||||
}
|
||||
|
||||
// ---- LLM routing ----
|
||||
|
||||
pub async fn determine_route(
|
||||
&self,
|
||||
messages: &[Message],
|
||||
inline_routing_preferences: Option<Vec<TopLevelRoutingPreference>>,
|
||||
request_id: &str,
|
||||
) -> Result<Option<(String, Vec<String>)>> {
|
||||
if messages.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let inline_top_map: Option<HashMap<String, TopLevelRoutingPreference>> =
|
||||
inline_routing_preferences
|
||||
.map(|prefs| prefs.into_iter().map(|p| (p.name.clone(), p)).collect());
|
||||
|
||||
if inline_top_map.is_none() && self.top_level_preferences.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let effective_source = inline_top_map
|
||||
.as_ref()
|
||||
.unwrap_or(&self.top_level_preferences);
|
||||
|
||||
let effective_prefs: Vec<AgentUsagePreference> = effective_source
|
||||
.values()
|
||||
.map(|p| AgentUsagePreference {
|
||||
model: p.models.first().cloned().unwrap_or_default(),
|
||||
orchestration_preferences: vec![OrchestrationPreference {
|
||||
name: p.name.clone(),
|
||||
description: p.description.clone(),
|
||||
}],
|
||||
})
|
||||
.collect();
|
||||
|
||||
let orchestration_result = self
|
||||
.determine_orchestration(
|
||||
messages,
|
||||
Some(effective_prefs),
|
||||
Some(request_id.to_string()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let result = if let Some(ref routes) = orchestration_result {
|
||||
if routes.len() > 1 {
|
||||
let all_routes: Vec<&str> = routes.iter().map(|(name, _)| name.as_str()).collect();
|
||||
info!(
|
||||
routes = ?all_routes,
|
||||
using = %all_routes.first().unwrap_or(&"none"),
|
||||
"plano-orchestrator detected multiple intents, using first"
|
||||
);
|
||||
}
|
||||
|
||||
if let Some((route_name, _)) = routes.first() {
|
||||
let top_pref = inline_top_map
|
||||
.as_ref()
|
||||
.and_then(|m| m.get(route_name))
|
||||
.or_else(|| self.top_level_preferences.get(route_name));
|
||||
|
||||
if let Some(pref) = top_pref {
|
||||
let ranked = match &self.metrics_service {
|
||||
Some(svc) => svc.rank_models(&pref.models, &pref.selection_policy).await,
|
||||
None => pref.models.clone(),
|
||||
};
|
||||
Some((route_name.clone(), ranked))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
info!(
|
||||
selected_model = ?result,
|
||||
"plano-orchestrator determined route"
|
||||
);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
// ---- Agent orchestration (existing) ----
|
||||
|
||||
pub async fn determine_orchestration(
|
||||
&self,
|
||||
messages: &[Message],
|
||||
|
|
@ -80,12 +272,12 @@ impl OrchestratorService {
|
|||
debug!(
|
||||
model = %self.orchestrator_model.get_model_name(),
|
||||
endpoint = %self.orchestrator_url,
|
||||
"sending request to arch-orchestrator"
|
||||
"sending request to plano-orchestrator"
|
||||
);
|
||||
|
||||
let body = serde_json::to_string(&orchestrator_request)
|
||||
.map_err(super::orchestrator_model::OrchestratorModelError::from)?;
|
||||
debug!(body = %body, "arch orchestrator request");
|
||||
debug!(body = %body, "plano-orchestrator request");
|
||||
|
||||
let mut headers = header::HeaderMap::new();
|
||||
headers.insert(
|
||||
|
|
@ -98,7 +290,6 @@ impl OrchestratorService {
|
|||
.unwrap_or_else(|_| header::HeaderValue::from_static("plano-orchestrator")),
|
||||
);
|
||||
|
||||
// Inject OpenTelemetry trace context from current span
|
||||
global::get_text_map_propagator(|propagator| {
|
||||
let cx =
|
||||
tracing_opentelemetry::OpenTelemetrySpanExt::context(&tracing::Span::current());
|
||||
|
|
@ -130,9 +321,113 @@ impl OrchestratorService {
|
|||
content = %content.replace("\n", "\\n"),
|
||||
selected_routes = ?parsed,
|
||||
response_time_ms = elapsed.as_millis(),
|
||||
"arch-orchestrator determined routes"
|
||||
"plano-orchestrator determined routes"
|
||||
);
|
||||
|
||||
Ok(parsed)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::session_cache::memory::MemorySessionCache;
|
||||
|
||||
fn make_orchestrator_service(ttl_seconds: u64, max_entries: usize) -> OrchestratorService {
|
||||
let session_cache = Arc::new(MemorySessionCache::new(max_entries));
|
||||
OrchestratorService::with_routing(
|
||||
"http://localhost:12001/v1/chat/completions".to_string(),
|
||||
"Plano-Orchestrator".to_string(),
|
||||
"plano-orchestrator".to_string(),
|
||||
None,
|
||||
None,
|
||||
Some(ttl_seconds),
|
||||
session_cache,
|
||||
None,
|
||||
orchestrator_model_v1::MAX_TOKEN_LEN,
|
||||
)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cache_miss_returns_none() {
|
||||
let svc = make_orchestrator_service(600, 100);
|
||||
assert!(svc
|
||||
.get_cached_route("unknown-session", None)
|
||||
.await
|
||||
.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cache_hit_returns_cached_route() {
|
||||
let svc = make_orchestrator_service(600, 100);
|
||||
svc.cache_route(
|
||||
"s1".to_string(),
|
||||
None,
|
||||
"gpt-4o".to_string(),
|
||||
Some("code".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let cached = svc.get_cached_route("s1", None).await.unwrap();
|
||||
assert_eq!(cached.model_name, "gpt-4o");
|
||||
assert_eq!(cached.route_name, Some("code".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cache_expired_entry_returns_none() {
|
||||
let svc = make_orchestrator_service(0, 100);
|
||||
svc.cache_route("s1".to_string(), None, "gpt-4o".to_string(), None)
|
||||
.await;
|
||||
assert!(svc.get_cached_route("s1", None).await.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_expired_entries_not_returned() {
|
||||
let svc = make_orchestrator_service(0, 100);
|
||||
svc.cache_route("s1".to_string(), None, "gpt-4o".to_string(), None)
|
||||
.await;
|
||||
svc.cache_route("s2".to_string(), None, "claude".to_string(), None)
|
||||
.await;
|
||||
|
||||
assert!(svc.get_cached_route("s1", None).await.is_none());
|
||||
assert!(svc.get_cached_route("s2", None).await.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cache_evicts_oldest_when_full() {
|
||||
let svc = make_orchestrator_service(600, 2);
|
||||
svc.cache_route("s1".to_string(), None, "model-a".to_string(), None)
|
||||
.await;
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
svc.cache_route("s2".to_string(), None, "model-b".to_string(), None)
|
||||
.await;
|
||||
|
||||
svc.cache_route("s3".to_string(), None, "model-c".to_string(), None)
|
||||
.await;
|
||||
|
||||
assert!(svc.get_cached_route("s1", None).await.is_none());
|
||||
assert!(svc.get_cached_route("s2", None).await.is_some());
|
||||
assert!(svc.get_cached_route("s3", None).await.is_some());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cache_update_existing_session_does_not_evict() {
|
||||
let svc = make_orchestrator_service(600, 2);
|
||||
svc.cache_route("s1".to_string(), None, "model-a".to_string(), None)
|
||||
.await;
|
||||
svc.cache_route("s2".to_string(), None, "model-b".to_string(), None)
|
||||
.await;
|
||||
|
||||
svc.cache_route(
|
||||
"s1".to_string(),
|
||||
None,
|
||||
"model-a-updated".to_string(),
|
||||
Some("route".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let s1 = svc.get_cached_route("s1", None).await.unwrap();
|
||||
assert_eq!(s1.model_name, "model-a-updated");
|
||||
assert!(svc.get_cached_route("s2", None).await.is_some());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,8 +11,7 @@ pub enum OrchestratorModelError {
|
|||
pub type Result<T> = std::result::Result<T, OrchestratorModelError>;
|
||||
|
||||
/// OrchestratorModel trait for handling orchestration requests.
|
||||
/// Unlike RouterModel which returns a single route, OrchestratorModel
|
||||
/// can return multiple routes as the model output format is:
|
||||
/// Returns multiple routes as the model output format is:
|
||||
/// {"route": ["route_name_1", "route_name_2", ...]}
|
||||
pub trait OrchestratorModel: Send + Sync {
|
||||
fn generate_request(
|
||||
|
|
|
|||
|
|
@ -8,7 +8,19 @@ use tracing::{debug, warn};
|
|||
|
||||
use super::orchestrator_model::{OrchestratorModel, OrchestratorModelError};
|
||||
|
||||
pub const MAX_TOKEN_LEN: usize = 2048; // Default max token length for the orchestration model
|
||||
pub const MAX_TOKEN_LEN: usize = 8192; // Default max token length for the orchestration model
|
||||
|
||||
/// Hard cap on the number of recent messages considered when building the
|
||||
/// routing prompt. Bounds prompt growth for long-running conversations and
|
||||
/// acts as an outer guardrail before the token-budget loop runs. The most
|
||||
/// recent `MAX_ROUTING_TURNS` filtered messages are kept; older turns are
|
||||
/// dropped entirely.
|
||||
pub const MAX_ROUTING_TURNS: usize = 16;
|
||||
|
||||
/// Unicode ellipsis used to mark where content was trimmed out of a long
|
||||
/// message. Helps signal to the downstream router model that the message was
|
||||
/// truncated.
|
||||
const TRIM_MARKER: &str = "…";
|
||||
|
||||
/// Custom JSON formatter that produces spaced JSON (space after colons and commas), same as JSON in python
|
||||
struct SpacedJsonFormatter;
|
||||
|
|
@ -176,47 +188,82 @@ impl OrchestratorModel for OrchestratorModelV1 {
|
|||
messages: &[Message],
|
||||
usage_preferences_from_request: &Option<Vec<AgentUsagePreference>>,
|
||||
) -> ChatCompletionsRequest {
|
||||
// remove system prompt, tool calls, tool call response and messages without content
|
||||
// if content is empty its likely a tool call
|
||||
// when role == tool its tool call response
|
||||
let messages_vec = messages
|
||||
// Remove system/developer/tool messages and messages without extractable
|
||||
// text (tool calls have no text content we can classify against).
|
||||
let filtered: Vec<&Message> = messages
|
||||
.iter()
|
||||
.filter(|m| {
|
||||
m.role != Role::System
|
||||
&& m.role != Role::Developer
|
||||
&& m.role != Role::Tool
|
||||
&& !m.content.extract_text().is_empty()
|
||||
})
|
||||
.collect::<Vec<&Message>>();
|
||||
.collect();
|
||||
|
||||
// Following code is to ensure that the conversation does not exceed max token length
|
||||
// Note: we use a simple heuristic to estimate token count based on character length to optimize for performance
|
||||
// Outer guardrail: only consider the last `MAX_ROUTING_TURNS` filtered
|
||||
// messages when building the routing prompt. Keeps prompt growth
|
||||
// predictable for long conversations regardless of per-message size.
|
||||
let start = filtered.len().saturating_sub(MAX_ROUTING_TURNS);
|
||||
let messages_vec: &[&Message] = &filtered[start..];
|
||||
|
||||
// Ensure the conversation does not exceed the configured token budget.
|
||||
// We use `len() / TOKEN_LENGTH_DIVISOR` as a cheap token estimate to
|
||||
// avoid running a real tokenizer on the hot path.
|
||||
let mut token_count = ARCH_ORCHESTRATOR_V1_SYSTEM_PROMPT.len() / TOKEN_LENGTH_DIVISOR;
|
||||
let mut selected_messages_list_reversed: Vec<&Message> = vec![];
|
||||
let mut selected_messages_list_reversed: Vec<Message> = vec![];
|
||||
for (selected_messsage_count, message) in messages_vec.iter().rev().enumerate() {
|
||||
let message_token_count = message.content.extract_text().len() / TOKEN_LENGTH_DIVISOR;
|
||||
token_count += message_token_count;
|
||||
if token_count > self.max_token_length {
|
||||
let message_text = message.content.extract_text();
|
||||
let message_token_count = message_text.len() / TOKEN_LENGTH_DIVISOR;
|
||||
if token_count + message_token_count > self.max_token_length {
|
||||
let remaining_tokens = self.max_token_length.saturating_sub(token_count);
|
||||
debug!(
|
||||
token_count = token_count,
|
||||
attempted_total_tokens = token_count + message_token_count,
|
||||
max_tokens = self.max_token_length,
|
||||
remaining_tokens,
|
||||
selected = selected_messsage_count,
|
||||
total = messages_vec.len(),
|
||||
"token count exceeds max, truncating conversation"
|
||||
);
|
||||
if message.role == Role::User {
|
||||
// If message that exceeds max token length is from user, we need to keep it
|
||||
selected_messages_list_reversed.push(message);
|
||||
// If the overflow message is from the user we need to keep
|
||||
// some of it so the orchestrator still sees the latest user
|
||||
// intent. Use a middle-trim (head + ellipsis + tail): users
|
||||
// often frame the task at the start AND put the actual ask
|
||||
// at the end of a long pasted block, so preserving both is
|
||||
// better than a head-only cut. The ellipsis also signals to
|
||||
// the router model that content was dropped.
|
||||
if message.role == Role::User && remaining_tokens > 0 {
|
||||
let max_bytes = remaining_tokens.saturating_mul(TOKEN_LENGTH_DIVISOR);
|
||||
let truncated = trim_middle_utf8(&message_text, max_bytes);
|
||||
selected_messages_list_reversed.push(Message {
|
||||
role: Role::User,
|
||||
content: Some(MessageContent::Text(truncated)),
|
||||
name: None,
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
// If we are here, it means that the message is within the max token length
|
||||
selected_messages_list_reversed.push(message);
|
||||
token_count += message_token_count;
|
||||
selected_messages_list_reversed.push(Message {
|
||||
role: message.role.clone(),
|
||||
content: Some(MessageContent::Text(message_text)),
|
||||
name: None,
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
});
|
||||
}
|
||||
|
||||
if selected_messages_list_reversed.is_empty() {
|
||||
debug!("no messages selected, using last message");
|
||||
if let Some(last_message) = messages_vec.last() {
|
||||
selected_messages_list_reversed.push(last_message);
|
||||
selected_messages_list_reversed.push(Message {
|
||||
role: last_message.role.clone(),
|
||||
content: Some(MessageContent::Text(last_message.content.extract_text())),
|
||||
name: None,
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -236,22 +283,8 @@ impl OrchestratorModel for OrchestratorModelV1 {
|
|||
}
|
||||
|
||||
// Reverse the selected messages to maintain the conversation order
|
||||
let selected_conversation_list = selected_messages_list_reversed
|
||||
.iter()
|
||||
.rev()
|
||||
.map(|message| Message {
|
||||
role: message.role.clone(),
|
||||
content: Some(MessageContent::Text(
|
||||
message
|
||||
.content
|
||||
.as_ref()
|
||||
.map_or(String::new(), |c| c.to_string()),
|
||||
)),
|
||||
name: None,
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
})
|
||||
.collect::<Vec<Message>>();
|
||||
let selected_conversation_list: Vec<Message> =
|
||||
selected_messages_list_reversed.into_iter().rev().collect();
|
||||
|
||||
// Generate the orchestrator request message based on the usage preferences.
|
||||
// If preferences are passed in request then we use them;
|
||||
|
|
@ -404,6 +437,45 @@ fn fix_json_response(body: &str) -> String {
|
|||
body.replace("'", "\"").replace("\\n", "")
|
||||
}
|
||||
|
||||
/// Truncate `s` so the result is at most `max_bytes` bytes long, keeping
|
||||
/// roughly 60% from the start and 40% from the end, with a Unicode ellipsis
|
||||
/// separating the two. All splits respect UTF-8 character boundaries. When
|
||||
/// `max_bytes` is too small to fit the marker at all, falls back to a
|
||||
/// head-only truncation.
|
||||
fn trim_middle_utf8(s: &str, max_bytes: usize) -> String {
|
||||
if s.len() <= max_bytes {
|
||||
return s.to_string();
|
||||
}
|
||||
if max_bytes <= TRIM_MARKER.len() {
|
||||
// Not enough room even for the marker — just keep the start.
|
||||
let mut end = max_bytes;
|
||||
while end > 0 && !s.is_char_boundary(end) {
|
||||
end -= 1;
|
||||
}
|
||||
return s[..end].to_string();
|
||||
}
|
||||
|
||||
let available = max_bytes - TRIM_MARKER.len();
|
||||
// Bias toward the start (60%) where task framing typically lives, while
|
||||
// still preserving ~40% of the tail where the user's actual ask often
|
||||
// appears after a long paste.
|
||||
let mut start_len = available * 3 / 5;
|
||||
while start_len > 0 && !s.is_char_boundary(start_len) {
|
||||
start_len -= 1;
|
||||
}
|
||||
let end_len = available - start_len;
|
||||
let mut end_start = s.len().saturating_sub(end_len);
|
||||
while end_start < s.len() && !s.is_char_boundary(end_start) {
|
||||
end_start += 1;
|
||||
}
|
||||
|
||||
let mut out = String::with_capacity(start_len + TRIM_MARKER.len() + (s.len() - end_start));
|
||||
out.push_str(&s[..start_len]);
|
||||
out.push_str(TRIM_MARKER);
|
||||
out.push_str(&s[end_start..]);
|
||||
out
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for dyn OrchestratorModel {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "OrchestratorModel")
|
||||
|
|
@ -776,6 +848,10 @@ If no routes are needed, return an empty list for `route`.
|
|||
|
||||
#[test]
|
||||
fn test_conversation_trim_upto_user_message() {
|
||||
// With max_token_length=230, the older user message "given the image
|
||||
// In style of Andy Warhol" overflows the remaining budget and gets
|
||||
// middle-trimmed (head + ellipsis + tail) until it fits. Newer turns
|
||||
// are kept in full.
|
||||
let expected_prompt = r#"
|
||||
You are a helpful assistant that selects the most suitable routes based on user intent.
|
||||
You are provided with a list of available routes enclosed within <routes></routes> XML tags:
|
||||
|
|
@ -788,7 +864,7 @@ You are also given the conversation context enclosed within <conversation></conv
|
|||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "given the image In style of Andy Warhol"
|
||||
"content": "given…rhol"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
|
|
@ -861,6 +937,190 @@ If no routes are needed, return an empty list for `route`.
|
|||
assert_eq!(expected_prompt, prompt);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_huge_single_user_message_is_middle_trimmed() {
|
||||
// Regression test for the case where a single, extremely large user
|
||||
// message was being passed to the orchestrator verbatim and blowing
|
||||
// past the upstream model's context window. The trimmer must now
|
||||
// middle-trim (head + ellipsis + tail) the oversized message so the
|
||||
// resulting request stays within the configured budget, and the
|
||||
// trim marker must be present so the router model knows content
|
||||
// was dropped.
|
||||
let orchestrations_str = r#"
|
||||
{
|
||||
"gpt-4o": [
|
||||
{"name": "Image generation", "description": "generating image"}
|
||||
]
|
||||
}
|
||||
"#;
|
||||
let agent_orchestrations = serde_json::from_str::<
|
||||
HashMap<String, Vec<OrchestrationPreference>>,
|
||||
>(orchestrations_str)
|
||||
.unwrap();
|
||||
|
||||
let max_token_length = 2048;
|
||||
let orchestrator = OrchestratorModelV1::new(
|
||||
agent_orchestrations,
|
||||
"test-model".to_string(),
|
||||
max_token_length,
|
||||
);
|
||||
|
||||
// ~500KB of content — same scale as the real payload that triggered
|
||||
// the production upstream 400.
|
||||
let head = "HEAD_MARKER_START ";
|
||||
let tail = " TAIL_MARKER_END";
|
||||
let filler = "A".repeat(500_000);
|
||||
let huge_user_content = format!("{head}{filler}{tail}");
|
||||
|
||||
let conversation = vec![Message {
|
||||
role: Role::User,
|
||||
content: Some(MessageContent::Text(huge_user_content.clone())),
|
||||
name: None,
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
}];
|
||||
|
||||
let req = orchestrator.generate_request(&conversation, &None);
|
||||
let prompt = req.messages[0].content.extract_text();
|
||||
|
||||
// Prompt must stay bounded. Generous ceiling = budget-in-bytes +
|
||||
// scaffolding + slack. Real result should be well under this.
|
||||
let byte_ceiling = max_token_length * TOKEN_LENGTH_DIVISOR
|
||||
+ ARCH_ORCHESTRATOR_V1_SYSTEM_PROMPT.len()
|
||||
+ 1024;
|
||||
assert!(
|
||||
prompt.len() < byte_ceiling,
|
||||
"prompt length {} exceeded ceiling {} — truncation did not apply",
|
||||
prompt.len(),
|
||||
byte_ceiling,
|
||||
);
|
||||
|
||||
// Not all 500k filler chars survive.
|
||||
let a_count = prompt.chars().filter(|c| *c == 'A').count();
|
||||
assert!(
|
||||
a_count < filler.len(),
|
||||
"expected user message to be truncated; all {} 'A's survived",
|
||||
a_count
|
||||
);
|
||||
assert!(
|
||||
a_count > 0,
|
||||
"expected some of the user message to survive truncation"
|
||||
);
|
||||
|
||||
// Head and tail of the message must both be preserved (that's the
|
||||
// whole point of middle-trim over head-only).
|
||||
assert!(
|
||||
prompt.contains(head),
|
||||
"head marker missing — head was not preserved"
|
||||
);
|
||||
assert!(
|
||||
prompt.contains(tail),
|
||||
"tail marker missing — tail was not preserved"
|
||||
);
|
||||
|
||||
// Trim marker must be present so the router model can see that
|
||||
// content was omitted.
|
||||
assert!(
|
||||
prompt.contains(TRIM_MARKER),
|
||||
"ellipsis trim marker missing from truncated prompt"
|
||||
);
|
||||
|
||||
// Routing prompt scaffolding remains intact.
|
||||
assert!(prompt.contains("<conversation>"));
|
||||
assert!(prompt.contains("<routes>"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_turn_cap_limits_routing_history() {
|
||||
// The outer turn-cap guardrail should keep only the last
|
||||
// `MAX_ROUTING_TURNS` filtered messages regardless of how long the
|
||||
// conversation is. We build a conversation with alternating
|
||||
// user/assistant turns tagged with their index and verify that only
|
||||
// the tail of the conversation makes it into the prompt.
|
||||
let orchestrations_str = r#"
|
||||
{
|
||||
"gpt-4o": [
|
||||
{"name": "Image generation", "description": "generating image"}
|
||||
]
|
||||
}
|
||||
"#;
|
||||
let agent_orchestrations = serde_json::from_str::<
|
||||
HashMap<String, Vec<OrchestrationPreference>>,
|
||||
>(orchestrations_str)
|
||||
.unwrap();
|
||||
|
||||
let orchestrator =
|
||||
OrchestratorModelV1::new(agent_orchestrations, "test-model".to_string(), usize::MAX);
|
||||
|
||||
let mut conversation: Vec<Message> = Vec::new();
|
||||
let total_turns = MAX_ROUTING_TURNS * 2; // well past the cap
|
||||
for i in 0..total_turns {
|
||||
let role = if i % 2 == 0 {
|
||||
Role::User
|
||||
} else {
|
||||
Role::Assistant
|
||||
};
|
||||
conversation.push(Message {
|
||||
role,
|
||||
content: Some(MessageContent::Text(format!("turn-{i:03}"))),
|
||||
name: None,
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
});
|
||||
}
|
||||
|
||||
let req = orchestrator.generate_request(&conversation, &None);
|
||||
let prompt = req.messages[0].content.extract_text();
|
||||
|
||||
// The last MAX_ROUTING_TURNS messages (indexes total-cap..total)
|
||||
// must all appear.
|
||||
for i in (total_turns - MAX_ROUTING_TURNS)..total_turns {
|
||||
let tag = format!("turn-{i:03}");
|
||||
assert!(
|
||||
prompt.contains(&tag),
|
||||
"expected recent turn tag {tag} to be present"
|
||||
);
|
||||
}
|
||||
|
||||
// And earlier turns (indexes 0..total-cap) must all be dropped.
|
||||
for i in 0..(total_turns - MAX_ROUTING_TURNS) {
|
||||
let tag = format!("turn-{i:03}");
|
||||
assert!(
|
||||
!prompt.contains(&tag),
|
||||
"old turn tag {tag} leaked past turn cap into the prompt"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trim_middle_utf8_helper() {
|
||||
// No-op when already small enough.
|
||||
assert_eq!(trim_middle_utf8("hello", 100), "hello");
|
||||
assert_eq!(trim_middle_utf8("hello", 5), "hello");
|
||||
|
||||
// 60/40 split with ellipsis when too long.
|
||||
let long = "a".repeat(20);
|
||||
let out = trim_middle_utf8(&long, 10);
|
||||
assert!(out.len() <= 10);
|
||||
assert!(out.contains(TRIM_MARKER));
|
||||
// Exactly one ellipsis, rest are 'a's.
|
||||
assert_eq!(out.matches(TRIM_MARKER).count(), 1);
|
||||
assert!(out.chars().filter(|c| *c == 'a').count() > 0);
|
||||
|
||||
// When max_bytes is smaller than the marker, falls back to
|
||||
// head-only truncation (no marker).
|
||||
let out = trim_middle_utf8("abcdefgh", 2);
|
||||
assert_eq!(out, "ab");
|
||||
|
||||
// UTF-8 boundary safety: 2-byte chars.
|
||||
let s = "é".repeat(50); // 100 bytes
|
||||
let out = trim_middle_utf8(&s, 25);
|
||||
assert!(out.len() <= 25);
|
||||
// Must still be valid UTF-8 that only contains 'é' and the marker.
|
||||
let ok = out.chars().all(|c| c == 'é' || c == '…');
|
||||
assert!(ok, "unexpected char in trimmed output: {out:?}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_text_input() {
|
||||
let expected_prompt = r#"
|
||||
|
|
|
|||
|
|
@ -1,39 +0,0 @@
|
|||
use hermesllm::apis::openai::{ChatCompletionsRequest, Message};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum RoutingModelError {
|
||||
#[error("Failed to parse JSON: {0}")]
|
||||
JsonError(#[from] serde_json::Error),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, RoutingModelError>;
|
||||
|
||||
/// Internal route descriptor passed to the router model to build its prompt.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RoutingPreference {
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
}
|
||||
|
||||
/// Groups a model with its routing preferences (used internally by RouterModelV1).
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ModelUsagePreference {
|
||||
pub model: String,
|
||||
pub routing_preferences: Vec<RoutingPreference>,
|
||||
}
|
||||
|
||||
pub trait RouterModel: Send + Sync {
|
||||
fn generate_request(
|
||||
&self,
|
||||
messages: &[Message],
|
||||
usage_preferences: &Option<Vec<ModelUsagePreference>>,
|
||||
) -> ChatCompletionsRequest;
|
||||
fn parse_response(
|
||||
&self,
|
||||
content: &str,
|
||||
usage_preferences: &Option<Vec<ModelUsagePreference>>,
|
||||
) -> Result<Option<(String, String)>>;
|
||||
fn get_model_name(&self) -> String;
|
||||
}
|
||||
|
|
@ -1,841 +0,0 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use super::router_model::{ModelUsagePreference, RoutingPreference};
|
||||
use hermesllm::apis::openai::{ChatCompletionsRequest, Message, MessageContent, Role};
|
||||
use hermesllm::transforms::lib::ExtractText;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::{debug, warn};
|
||||
|
||||
use super::router_model::{RouterModel, RoutingModelError};
|
||||
|
||||
pub const MAX_TOKEN_LEN: usize = 2048; // Default max token length for the routing model
|
||||
pub const ARCH_ROUTER_V1_SYSTEM_PROMPT: &str = r#"
|
||||
You are a helpful assistant designed to find the best suited route.
|
||||
You are provided with route description within <routes></routes> XML tags:
|
||||
<routes>
|
||||
{routes}
|
||||
</routes>
|
||||
|
||||
<conversation>
|
||||
{conversation}
|
||||
</conversation>
|
||||
|
||||
Your task is to decide which route is best suit with user intent on the conversation in <conversation></conversation> XML tags. Follow the instruction:
|
||||
1. If the latest intent from user is irrelevant or user intent is full filled, response with other route {"route": "other"}.
|
||||
2. You must analyze the route descriptions and find the best match route for user latest intent.
|
||||
3. You only response the name of the route that best matches the user's request, use the exact name in the <routes></routes>.
|
||||
|
||||
Based on your analysis, provide your response in the following JSON formats if you decide to match any route:
|
||||
{"route": "route_name"}
|
||||
"#;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, RoutingModelError>;
|
||||
pub struct RouterModelV1 {
|
||||
llm_route_json_str: String,
|
||||
llm_route_to_model_map: HashMap<String, String>,
|
||||
routing_model: String,
|
||||
max_token_length: usize,
|
||||
}
|
||||
impl RouterModelV1 {
|
||||
pub fn new(
|
||||
llm_routes: HashMap<String, Vec<RoutingPreference>>,
|
||||
routing_model: String,
|
||||
max_token_length: usize,
|
||||
) -> Self {
|
||||
let llm_route_values: Vec<RoutingPreference> =
|
||||
llm_routes.values().flatten().cloned().collect();
|
||||
let llm_route_json_str =
|
||||
serde_json::to_string(&llm_route_values).unwrap_or_else(|_| "[]".to_string());
|
||||
let llm_route_to_model_map: HashMap<String, String> = llm_routes
|
||||
.iter()
|
||||
.flat_map(|(model, prefs)| prefs.iter().map(|pref| (pref.name.clone(), model.clone())))
|
||||
.collect();
|
||||
|
||||
RouterModelV1 {
|
||||
routing_model,
|
||||
max_token_length,
|
||||
llm_route_json_str,
|
||||
llm_route_to_model_map,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
struct LlmRouterResponse {
|
||||
pub route: Option<String>,
|
||||
}
|
||||
|
||||
const TOKEN_LENGTH_DIVISOR: usize = 4; // Approximate token length divisor for UTF-8 characters
|
||||
|
||||
impl RouterModel for RouterModelV1 {
|
||||
fn generate_request(
|
||||
&self,
|
||||
messages: &[Message],
|
||||
usage_preferences_from_request: &Option<Vec<ModelUsagePreference>>,
|
||||
) -> ChatCompletionsRequest {
|
||||
// remove system prompt, tool calls, tool call response and messages without content
|
||||
// if content is empty its likely a tool call
|
||||
// when role == tool its tool call response
|
||||
let messages_vec = messages
|
||||
.iter()
|
||||
.filter(|m| {
|
||||
m.role != Role::System
|
||||
&& m.role != Role::Tool
|
||||
&& !m.content.extract_text().is_empty()
|
||||
})
|
||||
.collect::<Vec<&Message>>();
|
||||
|
||||
// Following code is to ensure that the conversation does not exceed max token length
|
||||
// Note: we use a simple heuristic to estimate token count based on character length to optimize for performance
|
||||
let mut token_count = ARCH_ROUTER_V1_SYSTEM_PROMPT.len() / TOKEN_LENGTH_DIVISOR;
|
||||
let mut selected_messages_list_reversed: Vec<&Message> = vec![];
|
||||
for (selected_messsage_count, message) in messages_vec.iter().rev().enumerate() {
|
||||
let message_token_count = message.content.extract_text().len() / TOKEN_LENGTH_DIVISOR;
|
||||
token_count += message_token_count;
|
||||
if token_count > self.max_token_length {
|
||||
debug!(
|
||||
token_count = token_count,
|
||||
max_tokens = self.max_token_length,
|
||||
selected = selected_messsage_count,
|
||||
total = messages_vec.len(),
|
||||
"token count exceeds max, truncating conversation"
|
||||
);
|
||||
if message.role == Role::User {
|
||||
// If message that exceeds max token length is from user, we need to keep it
|
||||
selected_messages_list_reversed.push(message);
|
||||
}
|
||||
break;
|
||||
}
|
||||
// If we are here, it means that the message is within the max token length
|
||||
selected_messages_list_reversed.push(message);
|
||||
}
|
||||
|
||||
if selected_messages_list_reversed.is_empty() {
|
||||
debug!("no messages selected, using last message");
|
||||
if let Some(last_message) = messages_vec.last() {
|
||||
selected_messages_list_reversed.push(last_message);
|
||||
}
|
||||
}
|
||||
|
||||
// ensure that first and last selected message is from user
|
||||
if let Some(first_message) = selected_messages_list_reversed.first() {
|
||||
if first_message.role != Role::User {
|
||||
warn!("last message is not from user, may lead to incorrect routing");
|
||||
}
|
||||
}
|
||||
if let Some(last_message) = selected_messages_list_reversed.last() {
|
||||
if last_message.role != Role::User {
|
||||
warn!("first message is not from user, may lead to incorrect routing");
|
||||
}
|
||||
}
|
||||
|
||||
// Reverse the selected messages to maintain the conversation order
|
||||
let selected_conversation_list = selected_messages_list_reversed
|
||||
.iter()
|
||||
.rev()
|
||||
.map(|message| {
|
||||
Message {
|
||||
role: message.role.clone(),
|
||||
// we can unwrap here because we have already filtered out messages without content
|
||||
content: Some(MessageContent::Text(
|
||||
message
|
||||
.content
|
||||
.as_ref()
|
||||
.map_or(String::new(), |c| c.to_string()),
|
||||
)),
|
||||
name: None,
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<Message>>();
|
||||
|
||||
// Generate the router request message based on the usage preferences.
|
||||
// If preferences are passed in request then we use them otherwise we use the default routing model preferences.
|
||||
let router_message = match convert_to_router_preferences(usage_preferences_from_request) {
|
||||
Some(prefs) => generate_router_message(&prefs, &selected_conversation_list),
|
||||
None => generate_router_message(&self.llm_route_json_str, &selected_conversation_list),
|
||||
};
|
||||
|
||||
ChatCompletionsRequest {
|
||||
model: self.routing_model.clone(),
|
||||
messages: vec![Message {
|
||||
content: Some(MessageContent::Text(router_message)),
|
||||
role: Role::User,
|
||||
name: None,
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
}],
|
||||
temperature: Some(0.01),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_response(
|
||||
&self,
|
||||
content: &str,
|
||||
usage_preferences: &Option<Vec<ModelUsagePreference>>,
|
||||
) -> Result<Option<(String, String)>> {
|
||||
if content.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
let router_resp_fixed = fix_json_response(content);
|
||||
let router_response: LlmRouterResponse = serde_json::from_str(router_resp_fixed.as_str())?;
|
||||
|
||||
let selected_route = router_response.route.unwrap_or_default().to_string();
|
||||
|
||||
if selected_route.is_empty() || selected_route == "other" {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if let Some(usage_preferences) = usage_preferences {
|
||||
// If usage preferences are defined, we need to find the model that matches the selected route
|
||||
let model_name: Option<String> = usage_preferences
|
||||
.iter()
|
||||
.map(|pref| {
|
||||
pref.routing_preferences
|
||||
.iter()
|
||||
.find(|routing_pref| routing_pref.name == selected_route)
|
||||
.map(|_| pref.model.clone())
|
||||
})
|
||||
.find_map(|model| model);
|
||||
|
||||
if let Some(model_name) = model_name {
|
||||
return Ok(Some((selected_route, model_name)));
|
||||
} else {
|
||||
warn!(
|
||||
route = %selected_route,
|
||||
preferences = ?usage_preferences,
|
||||
"no matching model found for route"
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
// If no usage preferences are passed in request then use the default routing model preferences
|
||||
if let Some(model) = self.llm_route_to_model_map.get(&selected_route).cloned() {
|
||||
return Ok(Some((selected_route, model)));
|
||||
}
|
||||
|
||||
warn!(
|
||||
route = %selected_route,
|
||||
preferences = ?self.llm_route_to_model_map,
|
||||
"no model found for route"
|
||||
);
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn get_model_name(&self) -> String {
|
||||
self.routing_model.clone()
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_router_message(prefs: &str, selected_conversation_list: &Vec<Message>) -> String {
|
||||
ARCH_ROUTER_V1_SYSTEM_PROMPT
|
||||
.replace("{routes}", prefs)
|
||||
.replace(
|
||||
"{conversation}",
|
||||
&serde_json::to_string(&selected_conversation_list).unwrap_or_default(),
|
||||
)
|
||||
}
|
||||
|
||||
fn convert_to_router_preferences(
|
||||
prefs_from_request: &Option<Vec<ModelUsagePreference>>,
|
||||
) -> Option<String> {
|
||||
if let Some(usage_preferences) = prefs_from_request {
|
||||
let routing_preferences = usage_preferences
|
||||
.iter()
|
||||
.flat_map(|pref| {
|
||||
pref.routing_preferences
|
||||
.iter()
|
||||
.map(|routing_pref| RoutingPreference {
|
||||
name: routing_pref.name.clone(),
|
||||
description: routing_pref.description.clone(),
|
||||
})
|
||||
})
|
||||
.collect::<Vec<RoutingPreference>>();
|
||||
|
||||
return Some(serde_json::to_string(&routing_preferences).unwrap_or_default());
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn fix_json_response(body: &str) -> String {
|
||||
let mut updated_body = body.to_string();
|
||||
|
||||
updated_body = updated_body.replace("'", "\"");
|
||||
|
||||
if updated_body.contains("\\n") {
|
||||
updated_body = updated_body.replace("\\n", "");
|
||||
}
|
||||
|
||||
if updated_body.starts_with("```json") {
|
||||
updated_body = updated_body
|
||||
.strip_prefix("```json")
|
||||
.unwrap_or(&updated_body)
|
||||
.to_string();
|
||||
}
|
||||
|
||||
if updated_body.ends_with("```") {
|
||||
updated_body = updated_body
|
||||
.strip_suffix("```")
|
||||
.unwrap_or(&updated_body)
|
||||
.to_string();
|
||||
}
|
||||
|
||||
updated_body
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for dyn RouterModel {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "RouterModel")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn test_system_prompt_format() {
|
||||
let expected_prompt = r#"
|
||||
You are a helpful assistant designed to find the best suited route.
|
||||
You are provided with route description within <routes></routes> XML tags:
|
||||
<routes>
|
||||
[{"name":"Image generation","description":"generating image"}]
|
||||
</routes>
|
||||
|
||||
<conversation>
|
||||
[{"role":"user","content":"hi"},{"role":"assistant","content":"Hello! How can I assist you today?"},{"role":"user","content":"given the image In style of Andy Warhol, portrait of Bart and Lisa Simpson"}]
|
||||
</conversation>
|
||||
|
||||
Your task is to decide which route is best suit with user intent on the conversation in <conversation></conversation> XML tags. Follow the instruction:
|
||||
1. If the latest intent from user is irrelevant or user intent is full filled, response with other route {"route": "other"}.
|
||||
2. You must analyze the route descriptions and find the best match route for user latest intent.
|
||||
3. You only response the name of the route that best matches the user's request, use the exact name in the <routes></routes>.
|
||||
|
||||
Based on your analysis, provide your response in the following JSON formats if you decide to match any route:
|
||||
{"route": "route_name"}
|
||||
"#;
|
||||
let routes_str = r#"
|
||||
{
|
||||
"gpt-4o": [
|
||||
{"name": "Image generation", "description": "generating image"}
|
||||
]
|
||||
}
|
||||
"#;
|
||||
let llm_routes =
|
||||
serde_json::from_str::<HashMap<String, Vec<RoutingPreference>>>(routes_str).unwrap();
|
||||
let routing_model = "test-model".to_string();
|
||||
let router = RouterModelV1::new(llm_routes, routing_model, usize::MAX);
|
||||
|
||||
let conversation_str = r#"
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "hi"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "Hello! How can I assist you today?"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "given the image In style of Andy Warhol, portrait of Bart and Lisa Simpson"
|
||||
}
|
||||
]
|
||||
"#;
|
||||
let conversation: Vec<Message> = serde_json::from_str(conversation_str).unwrap();
|
||||
|
||||
let req = router.generate_request(&conversation, &None);
|
||||
|
||||
let prompt = req.messages[0].content.extract_text();
|
||||
|
||||
assert_eq!(expected_prompt, prompt);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_system_prompt_format_usage_preferences() {
|
||||
let expected_prompt = r#"
|
||||
You are a helpful assistant designed to find the best suited route.
|
||||
You are provided with route description within <routes></routes> XML tags:
|
||||
<routes>
|
||||
[{"name":"code-generation","description":"generating new code snippets, functions, or boilerplate based on user prompts or requirements"}]
|
||||
</routes>
|
||||
|
||||
<conversation>
|
||||
[{"role":"user","content":"hi"},{"role":"assistant","content":"Hello! How can I assist you today?"},{"role":"user","content":"given the image In style of Andy Warhol, portrait of Bart and Lisa Simpson"}]
|
||||
</conversation>
|
||||
|
||||
Your task is to decide which route is best suit with user intent on the conversation in <conversation></conversation> XML tags. Follow the instruction:
|
||||
1. If the latest intent from user is irrelevant or user intent is full filled, response with other route {"route": "other"}.
|
||||
2. You must analyze the route descriptions and find the best match route for user latest intent.
|
||||
3. You only response the name of the route that best matches the user's request, use the exact name in the <routes></routes>.
|
||||
|
||||
Based on your analysis, provide your response in the following JSON formats if you decide to match any route:
|
||||
{"route": "route_name"}
|
||||
"#;
|
||||
let routes_str = r#"
|
||||
{
|
||||
"gpt-4o": [
|
||||
{"name": "Image generation", "description": "generating image"}
|
||||
]
|
||||
}
|
||||
"#;
|
||||
let llm_routes =
|
||||
serde_json::from_str::<HashMap<String, Vec<RoutingPreference>>>(routes_str).unwrap();
|
||||
let routing_model = "test-model".to_string();
|
||||
let router = RouterModelV1::new(llm_routes, routing_model, usize::MAX);
|
||||
|
||||
let conversation_str = r#"
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "hi"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "Hello! How can I assist you today?"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "given the image In style of Andy Warhol, portrait of Bart and Lisa Simpson"
|
||||
}
|
||||
]
|
||||
"#;
|
||||
let conversation: Vec<Message> = serde_json::from_str(conversation_str).unwrap();
|
||||
|
||||
let usage_preferences = Some(vec![ModelUsagePreference {
|
||||
model: "claude/claude-3-7-sonnet".to_string(),
|
||||
routing_preferences: vec![RoutingPreference {
|
||||
name: "code-generation".to_string(),
|
||||
description: "generating new code snippets, functions, or boilerplate based on user prompts or requirements".to_string(),
|
||||
}],
|
||||
}]);
|
||||
let req = router.generate_request(&conversation, &usage_preferences);
|
||||
|
||||
let prompt = req.messages[0].content.extract_text();
|
||||
|
||||
assert_eq!(expected_prompt, prompt);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_conversation_exceed_token_count() {
|
||||
let expected_prompt = r#"
|
||||
You are a helpful assistant designed to find the best suited route.
|
||||
You are provided with route description within <routes></routes> XML tags:
|
||||
<routes>
|
||||
[{"name":"Image generation","description":"generating image"}]
|
||||
</routes>
|
||||
|
||||
<conversation>
|
||||
[{"role":"user","content":"given the image In style of Andy Warhol, portrait of Bart and Lisa Simpson"}]
|
||||
</conversation>
|
||||
|
||||
Your task is to decide which route is best suit with user intent on the conversation in <conversation></conversation> XML tags. Follow the instruction:
|
||||
1. If the latest intent from user is irrelevant or user intent is full filled, response with other route {"route": "other"}.
|
||||
2. You must analyze the route descriptions and find the best match route for user latest intent.
|
||||
3. You only response the name of the route that best matches the user's request, use the exact name in the <routes></routes>.
|
||||
|
||||
Based on your analysis, provide your response in the following JSON formats if you decide to match any route:
|
||||
{"route": "route_name"}
|
||||
"#;
|
||||
|
||||
let routes_str = r#"
|
||||
{
|
||||
"gpt-4o": [
|
||||
{"name": "Image generation", "description": "generating image"}
|
||||
]
|
||||
}
|
||||
"#;
|
||||
let llm_routes =
|
||||
serde_json::from_str::<HashMap<String, Vec<RoutingPreference>>>(routes_str).unwrap();
|
||||
let routing_model = "test-model".to_string();
|
||||
let router = RouterModelV1::new(llm_routes, routing_model, 235);
|
||||
|
||||
let conversation_str = r#"
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "hi"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "Hello! How can I assist you today?"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "given the image In style of Andy Warhol, portrait of Bart and Lisa Simpson"
|
||||
}
|
||||
]
|
||||
"#;
|
||||
|
||||
let conversation: Vec<Message> = serde_json::from_str(conversation_str).unwrap();
|
||||
|
||||
let req = router.generate_request(&conversation, &None);
|
||||
|
||||
let prompt = req.messages[0].content.extract_text();
|
||||
|
||||
assert_eq!(expected_prompt, prompt);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_conversation_exceed_token_count_large_single_message() {
|
||||
let expected_prompt = r#"
|
||||
You are a helpful assistant designed to find the best suited route.
|
||||
You are provided with route description within <routes></routes> XML tags:
|
||||
<routes>
|
||||
[{"name":"Image generation","description":"generating image"}]
|
||||
</routes>
|
||||
|
||||
<conversation>
|
||||
[{"role":"user","content":"given the image In style of Andy Warhol, portrait of Bart and Lisa Simpson and this is a very long message that exceeds the max token length of the routing model, so it should be truncated and only the last user message should be included in the conversation for routing."}]
|
||||
</conversation>
|
||||
|
||||
Your task is to decide which route is best suit with user intent on the conversation in <conversation></conversation> XML tags. Follow the instruction:
|
||||
1. If the latest intent from user is irrelevant or user intent is full filled, response with other route {"route": "other"}.
|
||||
2. You must analyze the route descriptions and find the best match route for user latest intent.
|
||||
3. You only response the name of the route that best matches the user's request, use the exact name in the <routes></routes>.
|
||||
|
||||
Based on your analysis, provide your response in the following JSON formats if you decide to match any route:
|
||||
{"route": "route_name"}
|
||||
"#;
|
||||
|
||||
let routes_str = r#"
|
||||
{
|
||||
"gpt-4o": [
|
||||
{"name": "Image generation", "description": "generating image"}
|
||||
]
|
||||
}
|
||||
"#;
|
||||
let llm_routes =
|
||||
serde_json::from_str::<HashMap<String, Vec<RoutingPreference>>>(routes_str).unwrap();
|
||||
|
||||
let routing_model = "test-model".to_string();
|
||||
let router = RouterModelV1::new(llm_routes, routing_model, 200);
|
||||
|
||||
let conversation_str = r#"
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "hi"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "Hello! How can I assist you today?"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "given the image In style of Andy Warhol, portrait of Bart and Lisa Simpson and this is a very long message that exceeds the max token length of the routing model, so it should be truncated and only the last user message should be included in the conversation for routing."
|
||||
}
|
||||
]
|
||||
"#;
|
||||
|
||||
let conversation: Vec<Message> = serde_json::from_str(conversation_str).unwrap();
|
||||
|
||||
let req = router.generate_request(&conversation, &None);
|
||||
|
||||
let prompt = req.messages[0].content.extract_text();
|
||||
|
||||
assert_eq!(expected_prompt, prompt);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_conversation_trim_upto_user_message() {
|
||||
let expected_prompt = r#"
|
||||
You are a helpful assistant designed to find the best suited route.
|
||||
You are provided with route description within <routes></routes> XML tags:
|
||||
<routes>
|
||||
[{"name":"Image generation","description":"generating image"}]
|
||||
</routes>
|
||||
|
||||
<conversation>
|
||||
[{"role":"user","content":"given the image In style of Andy Warhol"},{"role":"assistant","content":"ok here is the image"},{"role":"user","content":"pls give me another image about Bart and Lisa"}]
|
||||
</conversation>
|
||||
|
||||
Your task is to decide which route is best suit with user intent on the conversation in <conversation></conversation> XML tags. Follow the instruction:
|
||||
1. If the latest intent from user is irrelevant or user intent is full filled, response with other route {"route": "other"}.
|
||||
2. You must analyze the route descriptions and find the best match route for user latest intent.
|
||||
3. You only response the name of the route that best matches the user's request, use the exact name in the <routes></routes>.
|
||||
|
||||
Based on your analysis, provide your response in the following JSON formats if you decide to match any route:
|
||||
{"route": "route_name"}
|
||||
"#;
|
||||
|
||||
let routes_str = r#"
|
||||
{
|
||||
"gpt-4o": [
|
||||
{"name": "Image generation", "description": "generating image"}
|
||||
]
|
||||
}
|
||||
"#;
|
||||
let llm_routes =
|
||||
serde_json::from_str::<HashMap<String, Vec<RoutingPreference>>>(routes_str).unwrap();
|
||||
let routing_model = "test-model".to_string();
|
||||
let router = RouterModelV1::new(llm_routes, routing_model, 230);
|
||||
|
||||
let conversation_str = r#"
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "hi"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "Hello! How can I assist you today?"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "given the image In style of Andy Warhol"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "ok here is the image"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "pls give me another image about Bart and Lisa"
|
||||
}
|
||||
]
|
||||
"#;
|
||||
|
||||
let conversation: Vec<Message> = serde_json::from_str(conversation_str).unwrap();
|
||||
|
||||
let req = router.generate_request(&conversation, &None);
|
||||
|
||||
let prompt = req.messages[0].content.extract_text();
|
||||
|
||||
assert_eq!(expected_prompt, prompt);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_text_input() {
|
||||
let expected_prompt = r#"
|
||||
You are a helpful assistant designed to find the best suited route.
|
||||
You are provided with route description within <routes></routes> XML tags:
|
||||
<routes>
|
||||
[{"name":"Image generation","description":"generating image"}]
|
||||
</routes>
|
||||
|
||||
<conversation>
|
||||
[{"role":"user","content":"hi"},{"role":"assistant","content":"Hello! How can I assist you today?"},{"role":"user","content":"given the image In style of Andy Warhol, portrait of Bart and Lisa Simpson"}]
|
||||
</conversation>
|
||||
|
||||
Your task is to decide which route is best suit with user intent on the conversation in <conversation></conversation> XML tags. Follow the instruction:
|
||||
1. If the latest intent from user is irrelevant or user intent is full filled, response with other route {"route": "other"}.
|
||||
2. You must analyze the route descriptions and find the best match route for user latest intent.
|
||||
3. You only response the name of the route that best matches the user's request, use the exact name in the <routes></routes>.
|
||||
|
||||
Based on your analysis, provide your response in the following JSON formats if you decide to match any route:
|
||||
{"route": "route_name"}
|
||||
"#;
|
||||
let routes_str = r#"
|
||||
{
|
||||
"gpt-4o": [
|
||||
{"name": "Image generation", "description": "generating image"}
|
||||
]
|
||||
}
|
||||
"#;
|
||||
let llm_routes =
|
||||
serde_json::from_str::<HashMap<String, Vec<RoutingPreference>>>(routes_str).unwrap();
|
||||
let routing_model = "test-model".to_string();
|
||||
let router = RouterModelV1::new(llm_routes, routing_model, usize::MAX);
|
||||
|
||||
let conversation_str = r#"
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "hi"
|
||||
},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": "https://example.com/image.png"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "Hello! How can I assist you today?"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "given the image In style of Andy Warhol, portrait of Bart and Lisa Simpson"
|
||||
}
|
||||
]
|
||||
"#;
|
||||
let conversation: Vec<Message> = serde_json::from_str(conversation_str).unwrap();
|
||||
|
||||
let req = router.generate_request(&conversation, &None);
|
||||
|
||||
let prompt = req.messages[0].content.extract_text();
|
||||
|
||||
assert_eq!(expected_prompt, prompt);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_skip_tool_call() {
|
||||
let expected_prompt = r#"
|
||||
You are a helpful assistant designed to find the best suited route.
|
||||
You are provided with route description within <routes></routes> XML tags:
|
||||
<routes>
|
||||
[{"name":"Image generation","description":"generating image"}]
|
||||
</routes>
|
||||
|
||||
<conversation>
|
||||
[{"role":"user","content":"What's the weather like in Tokyo?"},{"role":"assistant","content":"The current weather in Tokyo is 22°C and sunny."},{"role":"user","content":"What about in New York?"}]
|
||||
</conversation>
|
||||
|
||||
Your task is to decide which route is best suit with user intent on the conversation in <conversation></conversation> XML tags. Follow the instruction:
|
||||
1. If the latest intent from user is irrelevant or user intent is full filled, response with other route {"route": "other"}.
|
||||
2. You must analyze the route descriptions and find the best match route for user latest intent.
|
||||
3. You only response the name of the route that best matches the user's request, use the exact name in the <routes></routes>.
|
||||
|
||||
Based on your analysis, provide your response in the following JSON formats if you decide to match any route:
|
||||
{"route": "route_name"}
|
||||
"#;
|
||||
let routes_str = r#"
|
||||
{
|
||||
"gpt-4o": [
|
||||
{"name": "Image generation", "description": "generating image"}
|
||||
]
|
||||
}
|
||||
"#;
|
||||
let llm_routes =
|
||||
serde_json::from_str::<HashMap<String, Vec<RoutingPreference>>>(routes_str).unwrap();
|
||||
let routing_model = "test-model".to_string();
|
||||
let router = RouterModelV1::new(llm_routes, routing_model, usize::MAX);
|
||||
|
||||
let conversation_str = r#"
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What's the weather like in Tokyo?"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": "toolcall-abc123",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_weather",
|
||||
"arguments": "{ \"location\": \"Tokyo\" }"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "tool",
|
||||
"tool_call_id": "toolcall-abc123",
|
||||
"content": "{ \"temperature\": \"22°C\", \"condition\": \"Sunny\" }"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "The current weather in Tokyo is 22°C and sunny."
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "What about in New York?"
|
||||
}
|
||||
]
|
||||
"#;
|
||||
|
||||
// expects conversation to look like this
|
||||
|
||||
// [
|
||||
// {
|
||||
// "role": "user",
|
||||
// "content": "What's the weather like in Tokyo?"
|
||||
// },
|
||||
// {
|
||||
// "role": "assistant",
|
||||
// "content": "The current weather in Tokyo is 22°C and sunny."
|
||||
// },
|
||||
// {
|
||||
// "role": "user",
|
||||
// "content": "What about in New York?"
|
||||
// }
|
||||
// ]
|
||||
|
||||
let conversation: Vec<Message> = serde_json::from_str(conversation_str).unwrap();
|
||||
|
||||
let req: ChatCompletionsRequest = router.generate_request(&conversation, &None);
|
||||
|
||||
let prompt = req.messages[0].content.extract_text();
|
||||
|
||||
assert_eq!(expected_prompt, prompt);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_response() {
|
||||
let routes_str = r#"
|
||||
{
|
||||
"gpt-4o": [
|
||||
{"name": "Image generation", "description": "generating image"}
|
||||
]
|
||||
}
|
||||
"#;
|
||||
let llm_routes =
|
||||
serde_json::from_str::<HashMap<String, Vec<RoutingPreference>>>(routes_str).unwrap();
|
||||
|
||||
let router = RouterModelV1::new(llm_routes, "test-model".to_string(), 2000);
|
||||
|
||||
// Case 1: Valid JSON with non-empty route
|
||||
let input = r#"{"route": "Image generation"}"#;
|
||||
let result = router.parse_response(input, &None).unwrap();
|
||||
assert_eq!(
|
||||
result,
|
||||
Some(("Image generation".to_string(), "gpt-4o".to_string()))
|
||||
);
|
||||
|
||||
// Case 2: Valid JSON with empty route
|
||||
let input = r#"{"route": ""}"#;
|
||||
let result = router.parse_response(input, &None).unwrap();
|
||||
assert_eq!(result, None);
|
||||
|
||||
// Case 3: Valid JSON with null route
|
||||
let input = r#"{"route": null}"#;
|
||||
let result = router.parse_response(input, &None).unwrap();
|
||||
assert_eq!(result, None);
|
||||
|
||||
// Case 4: JSON missing route field
|
||||
let input = r#"{}"#;
|
||||
let result = router.parse_response(input, &None).unwrap();
|
||||
assert_eq!(result, None);
|
||||
|
||||
// Case 4.1: empty string
|
||||
let input = r#""#;
|
||||
let result = router.parse_response(input, &None).unwrap();
|
||||
assert_eq!(result, None);
|
||||
|
||||
// Case 5: Malformed JSON
|
||||
let input = r#"{"route": "route1""#; // missing closing }
|
||||
let result = router.parse_response(input, &None);
|
||||
assert!(result.is_err());
|
||||
|
||||
// Case 6: Single quotes and \n in JSON
|
||||
let input = "{'route': 'Image generation'}\\n";
|
||||
let result = router.parse_response(input, &None).unwrap();
|
||||
assert_eq!(
|
||||
result,
|
||||
Some(("Image generation".to_string(), "gpt-4o".to_string()))
|
||||
);
|
||||
|
||||
// Case 7: Code block marker
|
||||
let input = "```json\n{\"route\": \"Image generation\"}\n```";
|
||||
let result = router.parse_response(input, &None).unwrap();
|
||||
assert_eq!(
|
||||
result,
|
||||
Some(("Image generation".to_string(), "gpt-4o".to_string()))
|
||||
);
|
||||
}
|
||||
}
|
||||
260
crates/brightstaff/src/router/stress_tests.rs
Normal file
260
crates/brightstaff/src/router/stress_tests.rs
Normal file
|
|
@ -0,0 +1,260 @@
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::router::orchestrator::OrchestratorService;
|
||||
use crate::session_cache::memory::MemorySessionCache;
|
||||
use common::configuration::{SelectionPolicy, SelectionPreference, TopLevelRoutingPreference};
|
||||
use hermesllm::apis::openai::{Message, MessageContent, Role};
|
||||
use std::sync::Arc;
|
||||
|
||||
fn make_messages(n: usize) -> Vec<Message> {
|
||||
(0..n)
|
||||
.map(|i| Message {
|
||||
role: if i % 2 == 0 {
|
||||
Role::User
|
||||
} else {
|
||||
Role::Assistant
|
||||
},
|
||||
content: Some(MessageContent::Text(format!(
|
||||
"This is message number {i} with some padding text to make it realistic."
|
||||
))),
|
||||
name: None,
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn make_routing_prefs() -> Vec<TopLevelRoutingPreference> {
|
||||
vec![
|
||||
TopLevelRoutingPreference {
|
||||
name: "code_generation".to_string(),
|
||||
description: "Code generation and debugging tasks".to_string(),
|
||||
models: vec![
|
||||
"openai/gpt-4o".to_string(),
|
||||
"openai/gpt-4o-mini".to_string(),
|
||||
],
|
||||
selection_policy: SelectionPolicy {
|
||||
prefer: SelectionPreference::None,
|
||||
},
|
||||
},
|
||||
TopLevelRoutingPreference {
|
||||
name: "summarization".to_string(),
|
||||
description: "Summarizing documents and text".to_string(),
|
||||
models: vec![
|
||||
"anthropic/claude-3-sonnet".to_string(),
|
||||
"openai/gpt-4o-mini".to_string(),
|
||||
],
|
||||
selection_policy: SelectionPolicy {
|
||||
prefer: SelectionPreference::None,
|
||||
},
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
/// Stress test: exercise the full routing code path N times using a mock
|
||||
/// HTTP server and measure jemalloc allocated bytes before/after.
|
||||
///
|
||||
/// This catches:
|
||||
/// - Memory leaks in generate_request / parse_response
|
||||
/// - Leaks in reqwest connection handling
|
||||
/// - String accumulation in the orchestrator model
|
||||
/// - Fragmentation (jemalloc allocated vs resident)
|
||||
#[tokio::test]
|
||||
async fn stress_test_routing_determine_route() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let router_url = format!("{}/v1/chat/completions", server.url());
|
||||
|
||||
let mock_response = serde_json::json!({
|
||||
"id": "chatcmpl-mock",
|
||||
"object": "chat.completion",
|
||||
"created": 1234567890,
|
||||
"model": "plano-orchestrator",
|
||||
"choices": [{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "{\"route\": \"code_generation\"}"
|
||||
},
|
||||
"finish_reason": "stop"
|
||||
}],
|
||||
"usage": {"prompt_tokens": 100, "completion_tokens": 10, "total_tokens": 110}
|
||||
});
|
||||
|
||||
let _mock = server
|
||||
.mock("POST", "/v1/chat/completions")
|
||||
.with_status(200)
|
||||
.with_header("content-type", "application/json")
|
||||
.with_body(mock_response.to_string())
|
||||
.expect_at_least(1)
|
||||
.create_async()
|
||||
.await;
|
||||
|
||||
let prefs = make_routing_prefs();
|
||||
let session_cache = Arc::new(MemorySessionCache::new(1000));
|
||||
let orchestrator_service = Arc::new(OrchestratorService::with_routing(
|
||||
router_url,
|
||||
"Plano-Orchestrator".to_string(),
|
||||
"plano-orchestrator".to_string(),
|
||||
Some(prefs.clone()),
|
||||
None,
|
||||
None,
|
||||
session_cache,
|
||||
None,
|
||||
2048,
|
||||
));
|
||||
|
||||
// Warm up: a few requests to stabilize allocator state
|
||||
for _ in 0..10 {
|
||||
let msgs = make_messages(5);
|
||||
let _ = orchestrator_service
|
||||
.determine_route(&msgs, None, "warmup")
|
||||
.await;
|
||||
}
|
||||
|
||||
// Snapshot memory after warmup
|
||||
let baseline = get_allocated();
|
||||
|
||||
let num_iterations = 2000;
|
||||
|
||||
for i in 0..num_iterations {
|
||||
let msgs = make_messages(5 + (i % 10));
|
||||
let inline = if i % 3 == 0 {
|
||||
Some(make_routing_prefs())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let _ = orchestrator_service
|
||||
.determine_route(&msgs, inline, &format!("req-{i}"))
|
||||
.await;
|
||||
}
|
||||
|
||||
let after = get_allocated();
|
||||
|
||||
let growth = after.saturating_sub(baseline);
|
||||
let growth_mb = growth as f64 / (1024.0 * 1024.0);
|
||||
let per_request = growth.checked_div(num_iterations).unwrap_or(0);
|
||||
|
||||
eprintln!("=== Routing Stress Test Results ===");
|
||||
eprintln!(" Iterations: {num_iterations}");
|
||||
eprintln!(" Baseline alloc: {} bytes", baseline);
|
||||
eprintln!(" Final alloc: {} bytes", after);
|
||||
eprintln!(" Growth: {} bytes ({growth_mb:.2} MB)", growth);
|
||||
eprintln!(" Per-request: {} bytes", per_request);
|
||||
|
||||
// Allow up to 256 bytes per request of retained growth (connection pool, etc.)
|
||||
// A true leak would show thousands of bytes per request.
|
||||
assert!(
|
||||
per_request < 256,
|
||||
"Possible memory leak: {per_request} bytes/request retained after {num_iterations} iterations"
|
||||
);
|
||||
}
|
||||
|
||||
/// Stress test with high concurrency: many parallel determine_route calls.
|
||||
#[tokio::test]
|
||||
async fn stress_test_routing_concurrent() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let router_url = format!("{}/v1/chat/completions", server.url());
|
||||
|
||||
let mock_response = serde_json::json!({
|
||||
"id": "chatcmpl-mock",
|
||||
"object": "chat.completion",
|
||||
"created": 1234567890,
|
||||
"model": "plano-orchestrator",
|
||||
"choices": [{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "{\"route\": \"summarization\"}"
|
||||
},
|
||||
"finish_reason": "stop"
|
||||
}],
|
||||
"usage": {"prompt_tokens": 100, "completion_tokens": 10, "total_tokens": 110}
|
||||
});
|
||||
|
||||
let _mock = server
|
||||
.mock("POST", "/v1/chat/completions")
|
||||
.with_status(200)
|
||||
.with_header("content-type", "application/json")
|
||||
.with_body(mock_response.to_string())
|
||||
.expect_at_least(1)
|
||||
.create_async()
|
||||
.await;
|
||||
|
||||
let prefs = make_routing_prefs();
|
||||
let session_cache = Arc::new(MemorySessionCache::new(1000));
|
||||
let orchestrator_service = Arc::new(OrchestratorService::with_routing(
|
||||
router_url,
|
||||
"Plano-Orchestrator".to_string(),
|
||||
"plano-orchestrator".to_string(),
|
||||
Some(prefs),
|
||||
None,
|
||||
None,
|
||||
session_cache,
|
||||
None,
|
||||
2048,
|
||||
));
|
||||
|
||||
// Warm up
|
||||
for _ in 0..20 {
|
||||
let msgs = make_messages(3);
|
||||
let _ = orchestrator_service
|
||||
.determine_route(&msgs, None, "warmup")
|
||||
.await;
|
||||
}
|
||||
|
||||
let baseline = get_allocated();
|
||||
|
||||
let concurrency = 50;
|
||||
let requests_per_task = 100;
|
||||
let total = concurrency * requests_per_task;
|
||||
|
||||
let mut handles = vec![];
|
||||
for t in 0..concurrency {
|
||||
let svc = Arc::clone(&orchestrator_service);
|
||||
let handle = tokio::spawn(async move {
|
||||
for r in 0..requests_per_task {
|
||||
let msgs = make_messages(3 + (r % 8));
|
||||
let _ = svc
|
||||
.determine_route(&msgs, None, &format!("req-{t}-{r}"))
|
||||
.await;
|
||||
}
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
for h in handles {
|
||||
h.await.unwrap();
|
||||
}
|
||||
|
||||
let after = get_allocated();
|
||||
let growth = after.saturating_sub(baseline);
|
||||
let per_request = growth / total;
|
||||
|
||||
eprintln!("=== Concurrent Routing Stress Test Results ===");
|
||||
eprintln!(" Tasks: {concurrency} x {requests_per_task} = {total}");
|
||||
eprintln!(" Baseline: {} bytes", baseline);
|
||||
eprintln!(" Final: {} bytes", after);
|
||||
eprintln!(
|
||||
" Growth: {} bytes ({:.2} MB)",
|
||||
growth,
|
||||
growth as f64 / 1_048_576.0
|
||||
);
|
||||
eprintln!(" Per-request: {} bytes", per_request);
|
||||
|
||||
assert!(
|
||||
per_request < 512,
|
||||
"Possible memory leak under concurrency: {per_request} bytes/request retained after {total} requests"
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(feature = "jemalloc")]
|
||||
fn get_allocated() -> usize {
|
||||
tikv_jemalloc_ctl::epoch::advance().unwrap();
|
||||
tikv_jemalloc_ctl::stats::allocated::read().unwrap_or(0)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "jemalloc"))]
|
||||
fn get_allocated() -> usize {
|
||||
0
|
||||
}
|
||||
}
|
||||
82
crates/brightstaff/src/session_cache/memory.rs
Normal file
82
crates/brightstaff/src/session_cache/memory.rs
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
use std::{
|
||||
num::NonZeroUsize,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use lru::LruCache;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::info;
|
||||
|
||||
use super::{CachedRoute, SessionCache};
|
||||
|
||||
type CacheStore = Mutex<LruCache<String, (CachedRoute, Instant, Duration)>>;
|
||||
|
||||
pub struct MemorySessionCache {
|
||||
store: Arc<CacheStore>,
|
||||
}
|
||||
|
||||
impl MemorySessionCache {
|
||||
pub fn new(max_entries: usize) -> Self {
|
||||
let capacity = NonZeroUsize::new(max_entries)
|
||||
.unwrap_or_else(|| NonZeroUsize::new(10_000).expect("10_000 is non-zero"));
|
||||
let store = Arc::new(Mutex::new(LruCache::new(capacity)));
|
||||
|
||||
// Spawn a background task to evict TTL-expired entries every 5 minutes.
|
||||
let store_clone = Arc::clone(&store);
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(300));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
Self::evict_expired(&store_clone).await;
|
||||
}
|
||||
});
|
||||
|
||||
Self { store }
|
||||
}
|
||||
|
||||
async fn evict_expired(store: &CacheStore) {
|
||||
let mut cache = store.lock().await;
|
||||
let expired: Vec<String> = cache
|
||||
.iter()
|
||||
.filter(|(_, (_, inserted_at, ttl))| inserted_at.elapsed() >= *ttl)
|
||||
.map(|(k, _)| k.clone())
|
||||
.collect();
|
||||
let removed = expired.len();
|
||||
for key in &expired {
|
||||
cache.pop(key.as_str());
|
||||
}
|
||||
if removed > 0 {
|
||||
info!(
|
||||
removed = removed,
|
||||
remaining = cache.len(),
|
||||
"cleaned up expired session cache entries"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SessionCache for MemorySessionCache {
|
||||
async fn get(&self, key: &str) -> Option<CachedRoute> {
|
||||
let mut cache = self.store.lock().await;
|
||||
if let Some((route, inserted_at, ttl)) = cache.get(key) {
|
||||
if inserted_at.elapsed() < *ttl {
|
||||
return Some(route.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
async fn put(&self, key: &str, route: CachedRoute, ttl: Duration) {
|
||||
self.store
|
||||
.lock()
|
||||
.await
|
||||
.put(key.to_string(), (route, Instant::now(), ttl));
|
||||
}
|
||||
|
||||
async fn remove(&self, key: &str) {
|
||||
self.store.lock().await.pop(key);
|
||||
}
|
||||
}
|
||||
70
crates/brightstaff/src/session_cache/mod.rs
Normal file
70
crates/brightstaff/src/session_cache/mod.rs
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use common::configuration::Configuration;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, info};
|
||||
|
||||
pub mod memory;
|
||||
pub mod redis;
|
||||
|
||||
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||
pub struct CachedRoute {
|
||||
pub model_name: String,
|
||||
pub route_name: Option<String>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait SessionCache: Send + Sync {
|
||||
/// Look up a cached routing decision by key.
|
||||
async fn get(&self, key: &str) -> Option<CachedRoute>;
|
||||
|
||||
/// Store a routing decision in the session cache with the given TTL.
|
||||
async fn put(&self, key: &str, route: CachedRoute, ttl: Duration);
|
||||
|
||||
/// Remove a cached routing decision by key.
|
||||
async fn remove(&self, key: &str);
|
||||
}
|
||||
|
||||
/// Initialize the session cache backend from config.
|
||||
/// Defaults to the in-memory backend when no `session_cache` block is configured.
|
||||
pub async fn init_session_cache(
|
||||
config: &Configuration,
|
||||
) -> Result<Arc<dyn SessionCache>, Box<dyn std::error::Error + Send + Sync>> {
|
||||
use common::configuration::SessionCacheType;
|
||||
|
||||
let session_max_entries = config.routing.as_ref().and_then(|r| r.session_max_entries);
|
||||
|
||||
const DEFAULT_SESSION_MAX_ENTRIES: usize = 10_000;
|
||||
const MAX_SESSION_MAX_ENTRIES: usize = 10_000;
|
||||
|
||||
let max_entries = session_max_entries
|
||||
.unwrap_or(DEFAULT_SESSION_MAX_ENTRIES)
|
||||
.min(MAX_SESSION_MAX_ENTRIES);
|
||||
|
||||
let cache_config = config
|
||||
.routing
|
||||
.as_ref()
|
||||
.and_then(|r| r.session_cache.as_ref());
|
||||
|
||||
let cache_type = cache_config
|
||||
.map(|c| &c.cache_type)
|
||||
.unwrap_or(&SessionCacheType::Memory);
|
||||
|
||||
match cache_type {
|
||||
SessionCacheType::Memory => {
|
||||
info!(storage_type = "memory", "initialized session cache");
|
||||
Ok(Arc::new(memory::MemorySessionCache::new(max_entries)))
|
||||
}
|
||||
SessionCacheType::Redis => {
|
||||
let url = cache_config
|
||||
.and_then(|c| c.url.as_ref())
|
||||
.ok_or("session_cache.url is required when type is redis")?;
|
||||
debug!(storage_type = "redis", url = %url, "initializing session cache");
|
||||
let cache = redis::RedisSessionCache::new(url)
|
||||
.await
|
||||
.map_err(|e| format!("failed to connect to Redis session cache: {e}"))?;
|
||||
Ok(Arc::new(cache))
|
||||
}
|
||||
}
|
||||
}
|
||||
48
crates/brightstaff/src/session_cache/redis.rs
Normal file
48
crates/brightstaff/src/session_cache/redis.rs
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use redis::aio::MultiplexedConnection;
|
||||
use redis::AsyncCommands;
|
||||
|
||||
use super::{CachedRoute, SessionCache};
|
||||
|
||||
const KEY_PREFIX: &str = "plano:affinity:";
|
||||
|
||||
pub struct RedisSessionCache {
|
||||
conn: MultiplexedConnection,
|
||||
}
|
||||
|
||||
impl RedisSessionCache {
|
||||
pub async fn new(url: &str) -> Result<Self, redis::RedisError> {
|
||||
let client = redis::Client::open(url)?;
|
||||
let conn = client.get_multiplexed_async_connection().await?;
|
||||
Ok(Self { conn })
|
||||
}
|
||||
|
||||
fn make_key(key: &str) -> String {
|
||||
format!("{KEY_PREFIX}{key}")
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SessionCache for RedisSessionCache {
|
||||
async fn get(&self, key: &str) -> Option<CachedRoute> {
|
||||
let mut conn = self.conn.clone();
|
||||
let value: Option<String> = conn.get(Self::make_key(key)).await.ok()?;
|
||||
value.and_then(|v| serde_json::from_str(&v).ok())
|
||||
}
|
||||
|
||||
async fn put(&self, key: &str, route: CachedRoute, ttl: Duration) {
|
||||
let mut conn = self.conn.clone();
|
||||
let Ok(json) = serde_json::to_string(&route) else {
|
||||
return;
|
||||
};
|
||||
let ttl_secs = ttl.as_secs().max(1);
|
||||
let _: Result<(), _> = conn.set_ex(Self::make_key(key), json, ttl_secs).await;
|
||||
}
|
||||
|
||||
async fn remove(&self, key: &str) {
|
||||
let mut conn = self.conn.clone();
|
||||
let _: Result<(), _> = conn.del(Self::make_key(key)).await;
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load diff
347
crates/brightstaff/src/signals/environment/exhaustion.rs
Normal file
347
crates/brightstaff/src/signals/environment/exhaustion.rs
Normal file
|
|
@ -0,0 +1,347 @@
|
|||
//! Environment exhaustion detector. Direct port of
|
||||
//! `signals/environment/exhaustion.py`.
|
||||
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use regex::Regex;
|
||||
use serde_json::json;
|
||||
|
||||
use crate::signals::analyzer::ShareGptMessage;
|
||||
use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType};
|
||||
|
||||
pub const API_ERROR_PATTERNS: &[&str] = &[
|
||||
r"500\s*(internal\s+)?server\s+error",
|
||||
r"502\s*bad\s+gateway",
|
||||
r"503\s*service\s+unavailable",
|
||||
r"504\s*gateway\s+timeout",
|
||||
r"internal\s+server\s+error",
|
||||
r"service\s+unavailable",
|
||||
r"server\s+error",
|
||||
r"backend\s+error",
|
||||
r"upstream\s+error",
|
||||
r"service\s+temporarily\s+unavailable",
|
||||
r"maintenance\s+mode",
|
||||
r"under\s+maintenance",
|
||||
r"try\s+again\s+later",
|
||||
r"temporarily\s+unavailable",
|
||||
r"system\s+error",
|
||||
r"unexpected\s+error",
|
||||
r"unhandled\s+exception",
|
||||
];
|
||||
|
||||
pub const TIMEOUT_PATTERNS: &[&str] = &[
|
||||
r"timeout",
|
||||
r"timed?\s*out",
|
||||
r"etimedout",
|
||||
r"connection\s+timed?\s*out",
|
||||
r"read\s+timed?\s*out",
|
||||
r"request\s+timed?\s*out",
|
||||
r"gateway\s+timeout",
|
||||
r"deadline\s+exceeded",
|
||||
r"took\s+too\s+long",
|
||||
r"operation\s+timed?\s*out",
|
||||
r"socket\s+timeout",
|
||||
];
|
||||
|
||||
pub const RATE_LIMIT_PATTERNS: &[&str] = &[
|
||||
r"rate\s+limit",
|
||||
r"rate.limited",
|
||||
r"(status|error|http)\s*:?\s*429",
|
||||
r"429\s+(too\s+many|rate|limit)",
|
||||
r"too\s+many\s+requests?",
|
||||
r"quota\s+exceeded",
|
||||
r"quota\s+limit",
|
||||
r"throttl(ed|ing)",
|
||||
r"request\s+limit",
|
||||
r"api\s+limit",
|
||||
r"calls?\s+per\s+(second|minute|hour|day)",
|
||||
r"exceeded\s+.*\s+limit",
|
||||
r"slow\s+down",
|
||||
r"retry\s+after",
|
||||
r"requests?\s+exceeded",
|
||||
];
|
||||
|
||||
pub const NETWORK_PATTERNS: &[&str] = &[
|
||||
r"connection\s+refused",
|
||||
r"econnrefused",
|
||||
r"econnreset",
|
||||
r"connection\s+reset",
|
||||
r"enotfound",
|
||||
r"dns\s+(error|failure|lookup)",
|
||||
r"host\s+not\s+found",
|
||||
r"network\s+(error|failure|unreachable)",
|
||||
r"no\s+route\s+to\s+host",
|
||||
r"socket\s+error",
|
||||
r"connection\s+failed",
|
||||
r"unable\s+to\s+connect",
|
||||
r"cannot\s+connect",
|
||||
r"could\s+not\s+connect",
|
||||
r"connect\s+error",
|
||||
r"ssl\s+(error|handshake|certificate)",
|
||||
r"certificate\s+(error|invalid|expired)",
|
||||
];
|
||||
|
||||
pub const MALFORMED_PATTERNS: &[&str] = &[
|
||||
r"json\s+parse\s+error",
|
||||
r"invalid\s+json",
|
||||
r"unexpected\s+token",
|
||||
r"syntax\s+error.*json",
|
||||
r"malformed\s+(response|json|data)",
|
||||
r"unexpected\s+end\s+of",
|
||||
r"parse\s+error",
|
||||
r"parsing\s+failed",
|
||||
r"invalid\s+response",
|
||||
r"unexpected\s+response",
|
||||
r"response\s+format",
|
||||
r"missing\s+field.*response",
|
||||
r"unexpected\s+schema",
|
||||
r"schema\s+validation",
|
||||
r"deserialization\s+error",
|
||||
r"failed\s+to\s+decode",
|
||||
];
|
||||
|
||||
pub const CONTEXT_OVERFLOW_PATTERNS: &[&str] = &[
|
||||
r"context\s+(length|limit|overflow|exceeded)",
|
||||
r"token\s+(limit|overflow|exceeded)",
|
||||
r"max(imum)?\s+tokens?",
|
||||
r"input\s+too\s+(long|large)",
|
||||
r"exceeds?\s+(context|token|character|input)\s+limit",
|
||||
r"message\s+too\s+(long|large)",
|
||||
r"content\s+too\s+(long|large)",
|
||||
r"truncat(ed|ion)\s+(due\s+to|because|for)\s+(length|size|limit)",
|
||||
r"maximum\s+context",
|
||||
r"prompt\s+too\s+(long|large)",
|
||||
];
|
||||
|
||||
fn compile(patterns: &[&str]) -> Regex {
|
||||
let combined = patterns
|
||||
.iter()
|
||||
.map(|p| format!("({})", p))
|
||||
.collect::<Vec<_>>()
|
||||
.join("|");
|
||||
Regex::new(&format!("(?i){}", combined)).expect("exhaustion pattern regex must compile")
|
||||
}
|
||||
|
||||
fn api_error_re() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| compile(API_ERROR_PATTERNS))
|
||||
}
|
||||
fn timeout_re() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| compile(TIMEOUT_PATTERNS))
|
||||
}
|
||||
fn rate_limit_re() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| compile(RATE_LIMIT_PATTERNS))
|
||||
}
|
||||
fn network_re() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| compile(NETWORK_PATTERNS))
|
||||
}
|
||||
fn malformed_re() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| compile(MALFORMED_PATTERNS))
|
||||
}
|
||||
fn context_overflow_re() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| compile(CONTEXT_OVERFLOW_PATTERNS))
|
||||
}
|
||||
|
||||
fn snippet_around(text: &str, m: regex::Match<'_>, context: usize) -> String {
|
||||
let start = m.start().saturating_sub(context);
|
||||
let end = (m.end() + context).min(text.len());
|
||||
let start = align_char_boundary(text, start, false);
|
||||
let end = align_char_boundary(text, end, true);
|
||||
let mut snippet = String::new();
|
||||
if start > 0 {
|
||||
snippet.push_str("...");
|
||||
}
|
||||
snippet.push_str(&text[start..end]);
|
||||
if end < text.len() {
|
||||
snippet.push_str("...");
|
||||
}
|
||||
snippet
|
||||
}
|
||||
|
||||
fn align_char_boundary(s: &str, mut idx: usize, forward: bool) -> usize {
|
||||
if idx >= s.len() {
|
||||
return s.len();
|
||||
}
|
||||
while !s.is_char_boundary(idx) {
|
||||
if forward {
|
||||
idx += 1;
|
||||
} else if idx == 0 {
|
||||
break;
|
||||
} else {
|
||||
idx -= 1;
|
||||
}
|
||||
}
|
||||
idx
|
||||
}
|
||||
|
||||
pub fn analyze_exhaustion(messages: &[ShareGptMessage<'_>]) -> SignalGroup {
|
||||
let mut group = SignalGroup::new("exhaustion");
|
||||
|
||||
for (i, msg) in messages.iter().enumerate() {
|
||||
if msg.from != "observation" {
|
||||
continue;
|
||||
}
|
||||
let value = msg.value;
|
||||
let lower = value.to_lowercase();
|
||||
|
||||
if let Some(m) = rate_limit_re().find(&lower) {
|
||||
group.add_signal(emit(
|
||||
SignalType::EnvironmentExhaustionRateLimit,
|
||||
i,
|
||||
snippet_around(value, m, 50),
|
||||
0.95,
|
||||
"rate_limit",
|
||||
m.as_str(),
|
||||
));
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(m) = api_error_re().find(&lower) {
|
||||
group.add_signal(emit(
|
||||
SignalType::EnvironmentExhaustionApiError,
|
||||
i,
|
||||
snippet_around(value, m, 50),
|
||||
0.9,
|
||||
"api_error",
|
||||
m.as_str(),
|
||||
));
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(m) = timeout_re().find(&lower) {
|
||||
group.add_signal(emit(
|
||||
SignalType::EnvironmentExhaustionTimeout,
|
||||
i,
|
||||
snippet_around(value, m, 50),
|
||||
0.9,
|
||||
"timeout",
|
||||
m.as_str(),
|
||||
));
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(m) = network_re().find(&lower) {
|
||||
group.add_signal(emit(
|
||||
SignalType::EnvironmentExhaustionNetwork,
|
||||
i,
|
||||
snippet_around(value, m, 50),
|
||||
0.9,
|
||||
"network",
|
||||
m.as_str(),
|
||||
));
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(m) = malformed_re().find(&lower) {
|
||||
group.add_signal(emit(
|
||||
SignalType::EnvironmentExhaustionMalformed,
|
||||
i,
|
||||
snippet_around(value, m, 50),
|
||||
0.85,
|
||||
"malformed_response",
|
||||
m.as_str(),
|
||||
));
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(m) = context_overflow_re().find(&lower) {
|
||||
group.add_signal(emit(
|
||||
SignalType::EnvironmentExhaustionContextOverflow,
|
||||
i,
|
||||
snippet_around(value, m, 50),
|
||||
0.9,
|
||||
"context_overflow",
|
||||
m.as_str(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
group
|
||||
}
|
||||
|
||||
fn emit(
|
||||
t: SignalType,
|
||||
idx: usize,
|
||||
snippet: String,
|
||||
confidence: f32,
|
||||
kind: &str,
|
||||
matched: &str,
|
||||
) -> SignalInstance {
|
||||
SignalInstance::new(t, idx, snippet)
|
||||
.with_confidence(confidence)
|
||||
.with_metadata(json!({
|
||||
"exhaustion_type": kind,
|
||||
"matched": matched,
|
||||
}))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn obs(value: &str) -> ShareGptMessage<'_> {
|
||||
ShareGptMessage {
|
||||
from: "observation",
|
||||
value,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_rate_limit() {
|
||||
let g = analyze_exhaustion(&[obs("HTTP 429: too many requests, retry after 30s")]);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::EnvironmentExhaustionRateLimit)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_api_error() {
|
||||
let g = analyze_exhaustion(&[obs("503 service unavailable - try again later")]);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::EnvironmentExhaustionApiError)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_timeout() {
|
||||
let g = analyze_exhaustion(&[obs("Connection timed out after 30 seconds")]);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::EnvironmentExhaustionTimeout)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_network_failure() {
|
||||
let g = analyze_exhaustion(&[obs("ECONNREFUSED: connection refused by remote host")]);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::EnvironmentExhaustionNetwork)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_malformed_response() {
|
||||
let g = analyze_exhaustion(&[obs("Invalid JSON: unexpected token at position 42")]);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::EnvironmentExhaustionMalformed)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_context_overflow() {
|
||||
let g = analyze_exhaustion(&[obs("Maximum context length exceeded for this model")]);
|
||||
assert!(g.signals.iter().any(|s| matches!(
|
||||
s.signal_type,
|
||||
SignalType::EnvironmentExhaustionContextOverflow
|
||||
)));
|
||||
}
|
||||
}
|
||||
3
crates/brightstaff/src/signals/environment/mod.rs
Normal file
3
crates/brightstaff/src/signals/environment/mod.rs
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
//! Environment signals: exhaustion (external system failures and constraints).
|
||||
|
||||
pub mod exhaustion;
|
||||
388
crates/brightstaff/src/signals/execution/failure.rs
Normal file
388
crates/brightstaff/src/signals/execution/failure.rs
Normal file
|
|
@ -0,0 +1,388 @@
|
|||
//! Execution failure detector. Direct port of `signals/execution/failure.py`.
|
||||
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use regex::Regex;
|
||||
use serde_json::json;
|
||||
|
||||
use crate::signals::analyzer::ShareGptMessage;
|
||||
use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType};
|
||||
|
||||
pub const INVALID_ARGS_PATTERNS: &[&str] = &[
|
||||
r"invalid\s+argument",
|
||||
r"invalid\s+parameter",
|
||||
r"invalid\s+type",
|
||||
r"type\s*error",
|
||||
r"expected\s+\w+\s*,?\s*got\s+\w+",
|
||||
r"required\s+field",
|
||||
r"required\s+parameter",
|
||||
r"missing\s+required",
|
||||
r"missing\s+argument",
|
||||
r"validation\s+failed",
|
||||
r"validation\s+error",
|
||||
r"invalid\s+value",
|
||||
r"invalid\s+format",
|
||||
r"must\s+be\s+(a|an)\s+\w+",
|
||||
r"cannot\s+be\s+(null|empty|none)",
|
||||
r"is\s+not\s+valid",
|
||||
r"does\s+not\s+match",
|
||||
r"out\s+of\s+range",
|
||||
r"invalid\s+date",
|
||||
r"invalid\s+json",
|
||||
r"malformed\s+request",
|
||||
];
|
||||
|
||||
pub const BAD_QUERY_PATTERNS: &[&str] = &[
|
||||
r"invalid\s+query",
|
||||
r"query\s+syntax\s+error",
|
||||
r"malformed\s+query",
|
||||
r"unknown\s+field",
|
||||
r"invalid\s+field",
|
||||
r"invalid\s+filter",
|
||||
r"invalid\s+search",
|
||||
r"unknown\s+id",
|
||||
r"invalid\s+id",
|
||||
r"id\s+format\s+error",
|
||||
r"invalid\s+identifier",
|
||||
r"query\s+failed",
|
||||
r"search\s+error",
|
||||
r"invalid\s+operator",
|
||||
r"unsupported\s+query",
|
||||
];
|
||||
|
||||
pub const TOOL_NOT_FOUND_PATTERNS: &[&str] = &[
|
||||
r"unknown\s+function",
|
||||
r"unknown\s+tool",
|
||||
r"function\s+not\s+found",
|
||||
r"tool\s+not\s+found",
|
||||
r"no\s+such\s+function",
|
||||
r"no\s+such\s+tool",
|
||||
r"undefined\s+function",
|
||||
r"action\s+not\s+supported",
|
||||
r"invalid\s+tool",
|
||||
r"invalid\s+function",
|
||||
r"unrecognized\s+function",
|
||||
];
|
||||
|
||||
pub const AUTH_MISUSE_PATTERNS: &[&str] = &[
|
||||
r"\bunauthorized\b",
|
||||
r"(status|error|http|code)\s*:?\s*401",
|
||||
r"401\s+unauthorized",
|
||||
r"403\s+forbidden",
|
||||
r"permission\s+denied",
|
||||
r"access\s+denied",
|
||||
r"authentication\s+required",
|
||||
r"invalid\s+credentials",
|
||||
r"invalid\s+token",
|
||||
r"token\s+expired",
|
||||
r"missing\s+authorization",
|
||||
r"\bforbidden\b",
|
||||
r"not\s+authorized",
|
||||
r"insufficient\s+permissions?",
|
||||
];
|
||||
|
||||
pub const STATE_ERROR_PATTERNS: &[&str] = &[
|
||||
r"invalid\s+state",
|
||||
r"illegal\s+state",
|
||||
r"must\s+call\s+\w+\s+first",
|
||||
r"must\s+\w+\s+before",
|
||||
r"cannot\s+\w+\s+before",
|
||||
r"already\s+(exists?|created|started|finished)",
|
||||
r"not\s+initialized",
|
||||
r"not\s+started",
|
||||
r"already\s+in\s+progress",
|
||||
r"operation\s+in\s+progress",
|
||||
r"sequence\s+error",
|
||||
r"precondition\s+failed",
|
||||
r"(status|error|http)\s*:?\s*409",
|
||||
r"409\s+conflict",
|
||||
r"\bconflict\b",
|
||||
];
|
||||
|
||||
fn compile(patterns: &[&str]) -> Regex {
|
||||
// Use `(?i)` flag for case-insensitive matching, matching Python's `re.IGNORECASE`.
|
||||
let combined = patterns
|
||||
.iter()
|
||||
.map(|p| format!("({})", p))
|
||||
.collect::<Vec<_>>()
|
||||
.join("|");
|
||||
Regex::new(&format!("(?i){}", combined)).expect("failure pattern regex must compile")
|
||||
}
|
||||
|
||||
fn invalid_args_re() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| compile(INVALID_ARGS_PATTERNS))
|
||||
}
|
||||
fn bad_query_re() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| compile(BAD_QUERY_PATTERNS))
|
||||
}
|
||||
fn tool_not_found_re() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| compile(TOOL_NOT_FOUND_PATTERNS))
|
||||
}
|
||||
fn auth_misuse_re() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| compile(AUTH_MISUSE_PATTERNS))
|
||||
}
|
||||
fn state_error_re() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| compile(STATE_ERROR_PATTERNS))
|
||||
}
|
||||
|
||||
/// Pull tool name + args from a `function_call` message. Mirrors
|
||||
/// `_extract_tool_info` in the reference.
|
||||
pub(crate) fn extract_tool_info(value: &str) -> (String, String) {
|
||||
if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(value) {
|
||||
if let Some(obj) = parsed.as_object() {
|
||||
let name = obj
|
||||
.get("name")
|
||||
.or_else(|| obj.get("function"))
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| "unknown".to_string());
|
||||
let args = match obj.get("arguments").or_else(|| obj.get("args")) {
|
||||
Some(serde_json::Value::Object(o)) => {
|
||||
serde_json::to_string(&serde_json::Value::Object(o.clone())).unwrap_or_default()
|
||||
}
|
||||
Some(other) => other
|
||||
.as_str()
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| serde_json::to_string(other).unwrap_or_default()),
|
||||
None => String::new(),
|
||||
};
|
||||
return (name, args);
|
||||
}
|
||||
}
|
||||
let mut snippet: String = value.chars().take(200).collect();
|
||||
snippet.shrink_to_fit();
|
||||
("unknown".to_string(), snippet)
|
||||
}
|
||||
|
||||
/// Build a context-window snippet around a regex match, with leading/trailing
|
||||
/// ellipses when truncated. Mirrors `_get_snippet`.
|
||||
fn snippet_around(text: &str, m: regex::Match<'_>, context: usize) -> String {
|
||||
let start = m.start().saturating_sub(context);
|
||||
let end = (m.end() + context).min(text.len());
|
||||
// Ensure we cut on UTF-8 boundaries.
|
||||
let start = align_char_boundary(text, start, false);
|
||||
let end = align_char_boundary(text, end, true);
|
||||
let mut snippet = String::new();
|
||||
if start > 0 {
|
||||
snippet.push_str("...");
|
||||
}
|
||||
snippet.push_str(&text[start..end]);
|
||||
if end < text.len() {
|
||||
snippet.push_str("...");
|
||||
}
|
||||
snippet
|
||||
}
|
||||
|
||||
fn align_char_boundary(s: &str, mut idx: usize, forward: bool) -> usize {
|
||||
if idx >= s.len() {
|
||||
return s.len();
|
||||
}
|
||||
while !s.is_char_boundary(idx) {
|
||||
if forward {
|
||||
idx += 1;
|
||||
} else if idx == 0 {
|
||||
break;
|
||||
} else {
|
||||
idx -= 1;
|
||||
}
|
||||
}
|
||||
idx
|
||||
}
|
||||
|
||||
pub fn analyze_failure(messages: &[ShareGptMessage<'_>]) -> SignalGroup {
|
||||
let mut group = SignalGroup::new("failure");
|
||||
let mut last_call: Option<(usize, String, String)> = None;
|
||||
|
||||
for (i, msg) in messages.iter().enumerate() {
|
||||
match msg.from {
|
||||
"function_call" => {
|
||||
let (name, args) = extract_tool_info(msg.value);
|
||||
last_call = Some((i, name, args));
|
||||
continue;
|
||||
}
|
||||
"observation" => {}
|
||||
_ => continue,
|
||||
}
|
||||
|
||||
let value = msg.value;
|
||||
let lower = value.to_lowercase();
|
||||
let (call_index, tool_name) = match &last_call {
|
||||
Some((idx, name, _)) => (*idx, name.clone()),
|
||||
None => (i.saturating_sub(1), "unknown".to_string()),
|
||||
};
|
||||
|
||||
if let Some(m) = invalid_args_re().find(&lower) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::ExecutionFailureInvalidArgs,
|
||||
i,
|
||||
snippet_around(value, m, 50),
|
||||
)
|
||||
.with_confidence(0.9)
|
||||
.with_metadata(json!({
|
||||
"tool_name": tool_name,
|
||||
"call_index": call_index,
|
||||
"error_type": "invalid_args",
|
||||
"matched": m.as_str(),
|
||||
})),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(m) = tool_not_found_re().find(&lower) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::ExecutionFailureToolNotFound,
|
||||
i,
|
||||
snippet_around(value, m, 50),
|
||||
)
|
||||
.with_confidence(0.95)
|
||||
.with_metadata(json!({
|
||||
"tool_name": tool_name,
|
||||
"call_index": call_index,
|
||||
"error_type": "tool_not_found",
|
||||
"matched": m.as_str(),
|
||||
})),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(m) = auth_misuse_re().find(&lower) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::ExecutionFailureAuthMisuse,
|
||||
i,
|
||||
snippet_around(value, m, 50),
|
||||
)
|
||||
.with_confidence(0.8)
|
||||
.with_metadata(json!({
|
||||
"tool_name": tool_name,
|
||||
"call_index": call_index,
|
||||
"error_type": "auth_misuse",
|
||||
"matched": m.as_str(),
|
||||
})),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(m) = state_error_re().find(&lower) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::ExecutionFailureStateError,
|
||||
i,
|
||||
snippet_around(value, m, 50),
|
||||
)
|
||||
.with_confidence(0.85)
|
||||
.with_metadata(json!({
|
||||
"tool_name": tool_name,
|
||||
"call_index": call_index,
|
||||
"error_type": "state_error",
|
||||
"matched": m.as_str(),
|
||||
})),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(m) = bad_query_re().find(&lower) {
|
||||
let confidence = if ["error", "invalid", "failed"]
|
||||
.iter()
|
||||
.any(|w| lower.contains(w))
|
||||
{
|
||||
0.8
|
||||
} else {
|
||||
0.6
|
||||
};
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::ExecutionFailureBadQuery,
|
||||
i,
|
||||
snippet_around(value, m, 50),
|
||||
)
|
||||
.with_confidence(confidence)
|
||||
.with_metadata(json!({
|
||||
"tool_name": tool_name,
|
||||
"call_index": call_index,
|
||||
"error_type": "bad_query",
|
||||
"matched": m.as_str(),
|
||||
})),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
group
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn fc(value: &str) -> ShareGptMessage<'_> {
|
||||
ShareGptMessage {
|
||||
from: "function_call",
|
||||
value,
|
||||
}
|
||||
}
|
||||
fn obs(value: &str) -> ShareGptMessage<'_> {
|
||||
ShareGptMessage {
|
||||
from: "observation",
|
||||
value,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_invalid_args() {
|
||||
let msgs = vec![
|
||||
fc(r#"{"name":"create_user","arguments":{"age":"twelve"}}"#),
|
||||
obs("Error: validation failed - expected integer got string for field age"),
|
||||
];
|
||||
let g = analyze_failure(&msgs);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::ExecutionFailureInvalidArgs)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_tool_not_found() {
|
||||
let msgs = vec![
|
||||
fc(r#"{"name":"send_thought","arguments":{}}"#),
|
||||
obs("Error: unknown function 'send_thought'"),
|
||||
];
|
||||
let g = analyze_failure(&msgs);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::ExecutionFailureToolNotFound)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_auth_misuse() {
|
||||
let msgs = vec![
|
||||
fc(r#"{"name":"get_secret","arguments":{}}"#),
|
||||
obs("HTTP 401 Unauthorized"),
|
||||
];
|
||||
let g = analyze_failure(&msgs);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::ExecutionFailureAuthMisuse)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_state_error() {
|
||||
let msgs = vec![
|
||||
fc(r#"{"name":"commit_tx","arguments":{}}"#),
|
||||
obs("must call begin_tx first"),
|
||||
];
|
||||
let g = analyze_failure(&msgs);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::ExecutionFailureStateError)));
|
||||
}
|
||||
}
|
||||
433
crates/brightstaff/src/signals/execution/loops.rs
Normal file
433
crates/brightstaff/src/signals/execution/loops.rs
Normal file
|
|
@ -0,0 +1,433 @@
|
|||
//! Execution loops detector. Direct port of `signals/execution/loops.py`.
|
||||
|
||||
use serde_json::json;
|
||||
|
||||
use crate::signals::analyzer::ShareGptMessage;
|
||||
use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType};
|
||||
|
||||
pub const RETRY_THRESHOLD: usize = 3;
|
||||
pub const PARAMETER_DRIFT_THRESHOLD: usize = 3;
|
||||
pub const OSCILLATION_CYCLES_THRESHOLD: usize = 3;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ToolCall {
|
||||
pub index: usize,
|
||||
pub name: String,
|
||||
/// Canonical JSON string of arguments (sorted keys when parseable).
|
||||
pub args: String,
|
||||
pub args_dict: Option<serde_json::Map<String, serde_json::Value>>,
|
||||
}
|
||||
|
||||
impl ToolCall {
|
||||
pub fn args_equal(&self, other: &ToolCall) -> bool {
|
||||
match (&self.args_dict, &other.args_dict) {
|
||||
(Some(a), Some(b)) => a == b,
|
||||
_ => self.args == other.args,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_tool_call(index: usize, msg: &ShareGptMessage<'_>) -> Option<ToolCall> {
|
||||
if msg.from != "function_call" {
|
||||
return None;
|
||||
}
|
||||
let value = msg.value;
|
||||
|
||||
if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(value) {
|
||||
if let Some(obj) = parsed.as_object() {
|
||||
let name = obj
|
||||
.get("name")
|
||||
.or_else(|| obj.get("function"))
|
||||
.and_then(|v| v.as_str())
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| "unknown".to_string());
|
||||
let raw_args = obj.get("arguments").or_else(|| obj.get("args"));
|
||||
let (args_str, args_dict) = match raw_args {
|
||||
Some(serde_json::Value::Object(o)) => {
|
||||
let mut keys: Vec<&String> = o.keys().collect();
|
||||
keys.sort();
|
||||
let mut canon = serde_json::Map::new();
|
||||
for k in keys {
|
||||
canon.insert(k.clone(), o[k].clone());
|
||||
}
|
||||
(
|
||||
serde_json::to_string(&serde_json::Value::Object(canon.clone()))
|
||||
.unwrap_or_default(),
|
||||
Some(canon),
|
||||
)
|
||||
}
|
||||
Some(other) => (
|
||||
other
|
||||
.as_str()
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| serde_json::to_string(other).unwrap_or_default()),
|
||||
None,
|
||||
),
|
||||
None => (String::new(), None),
|
||||
};
|
||||
return Some(ToolCall {
|
||||
index,
|
||||
name,
|
||||
args: args_str,
|
||||
args_dict,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(paren) = value.find('(') {
|
||||
if paren > 0 {
|
||||
let name = value[..paren].trim().to_string();
|
||||
let args_part = &value[paren..];
|
||||
if args_part.starts_with('(') && args_part.ends_with(')') {
|
||||
let inner = args_part[1..args_part.len() - 1].trim();
|
||||
if let Ok(serde_json::Value::Object(o)) =
|
||||
serde_json::from_str::<serde_json::Value>(inner)
|
||||
{
|
||||
let mut keys: Vec<&String> = o.keys().collect();
|
||||
keys.sort();
|
||||
let mut canon = serde_json::Map::new();
|
||||
for k in keys {
|
||||
canon.insert(k.clone(), o[k].clone());
|
||||
}
|
||||
return Some(ToolCall {
|
||||
index,
|
||||
name,
|
||||
args: serde_json::to_string(&serde_json::Value::Object(canon.clone()))
|
||||
.unwrap_or_default(),
|
||||
args_dict: Some(canon),
|
||||
});
|
||||
}
|
||||
return Some(ToolCall {
|
||||
index,
|
||||
name,
|
||||
args: inner.to_string(),
|
||||
args_dict: None,
|
||||
});
|
||||
}
|
||||
return Some(ToolCall {
|
||||
index,
|
||||
name,
|
||||
args: args_part.to_string(),
|
||||
args_dict: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Some(ToolCall {
|
||||
index,
|
||||
name: value.trim().to_string(),
|
||||
args: String::new(),
|
||||
args_dict: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn extract_tool_calls(messages: &[ShareGptMessage<'_>]) -> Vec<ToolCall> {
|
||||
let mut out = Vec::new();
|
||||
for (i, msg) in messages.iter().enumerate() {
|
||||
if let Some(c) = parse_tool_call(i, msg) {
|
||||
out.push(c);
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
fn detect_retry(calls: &[ToolCall]) -> Vec<(usize, usize, String)> {
|
||||
if calls.len() < RETRY_THRESHOLD {
|
||||
return Vec::new();
|
||||
}
|
||||
let mut patterns = Vec::new();
|
||||
let mut i = 0;
|
||||
while i < calls.len() {
|
||||
let current = &calls[i];
|
||||
let mut j = i + 1;
|
||||
let mut run_length = 1;
|
||||
while j < calls.len() {
|
||||
if calls[j].name == current.name && calls[j].args_equal(current) {
|
||||
run_length += 1;
|
||||
j += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if run_length >= RETRY_THRESHOLD {
|
||||
patterns.push((calls[i].index, calls[j - 1].index, current.name.clone()));
|
||||
i = j;
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
patterns
|
||||
}
|
||||
|
||||
fn detect_parameter_drift(calls: &[ToolCall]) -> Vec<(usize, usize, String, usize)> {
|
||||
if calls.len() < PARAMETER_DRIFT_THRESHOLD {
|
||||
return Vec::new();
|
||||
}
|
||||
let mut patterns = Vec::new();
|
||||
let mut i = 0;
|
||||
while i < calls.len() {
|
||||
let current_name = calls[i].name.clone();
|
||||
let mut seen_args: Vec<String> = vec![calls[i].args.clone()];
|
||||
let mut unique_args = 1;
|
||||
let mut j = i + 1;
|
||||
while j < calls.len() {
|
||||
if calls[j].name != current_name {
|
||||
break;
|
||||
}
|
||||
if !seen_args.iter().any(|a| a == &calls[j].args) {
|
||||
seen_args.push(calls[j].args.clone());
|
||||
unique_args += 1;
|
||||
}
|
||||
j += 1;
|
||||
}
|
||||
let run_length = j - i;
|
||||
if run_length >= PARAMETER_DRIFT_THRESHOLD && unique_args >= 2 {
|
||||
patterns.push((
|
||||
calls[i].index,
|
||||
calls[j - 1].index,
|
||||
current_name,
|
||||
unique_args,
|
||||
));
|
||||
i = j;
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
patterns
|
||||
}
|
||||
|
||||
fn detect_oscillation(calls: &[ToolCall]) -> Vec<(usize, usize, Vec<String>, usize)> {
|
||||
let min_calls = 2 * OSCILLATION_CYCLES_THRESHOLD;
|
||||
if calls.len() < min_calls {
|
||||
return Vec::new();
|
||||
}
|
||||
let mut patterns = Vec::new();
|
||||
let mut i: usize = 0;
|
||||
while i + min_calls <= calls.len() {
|
||||
let max_pat_len = (5usize).min(calls.len() - i);
|
||||
let mut found_for_i = false;
|
||||
for pat_len in 2..=max_pat_len {
|
||||
let pattern_names: Vec<String> =
|
||||
(0..pat_len).map(|k| calls[i + k].name.clone()).collect();
|
||||
let unique: std::collections::HashSet<&String> = pattern_names.iter().collect();
|
||||
if unique.len() < 2 {
|
||||
continue;
|
||||
}
|
||||
let mut cycles = 1;
|
||||
let mut pos = i + pat_len;
|
||||
while pos + pat_len <= calls.len() {
|
||||
let mut all_match = true;
|
||||
for k in 0..pat_len {
|
||||
if calls[pos + k].name != pattern_names[k] {
|
||||
all_match = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if all_match {
|
||||
cycles += 1;
|
||||
pos += pat_len;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if cycles >= OSCILLATION_CYCLES_THRESHOLD {
|
||||
let end_idx_in_calls = i + (cycles * pat_len) - 1;
|
||||
patterns.push((
|
||||
calls[i].index,
|
||||
calls[end_idx_in_calls].index,
|
||||
pattern_names,
|
||||
cycles,
|
||||
));
|
||||
// Mirror Python: `i = end_idx + 1 - pattern_len`. We set `i` so that
|
||||
// the next outer iteration begins after we account for overlap.
|
||||
i = end_idx_in_calls + 1 - pat_len;
|
||||
found_for_i = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !found_for_i {
|
||||
i += 1;
|
||||
} else {
|
||||
// Match Python's `i = end_idx + 1 - pattern_len; break` then loop.
|
||||
// We'll continue; the outer while re-checks i.
|
||||
}
|
||||
}
|
||||
if patterns.len() > 1 {
|
||||
patterns = deduplicate_patterns(patterns);
|
||||
}
|
||||
patterns
|
||||
}
|
||||
|
||||
fn deduplicate_patterns(
|
||||
mut patterns: Vec<(usize, usize, Vec<String>, usize)>,
|
||||
) -> Vec<(usize, usize, Vec<String>, usize)> {
|
||||
if patterns.is_empty() {
|
||||
return patterns;
|
||||
}
|
||||
patterns.sort_by(|a, b| {
|
||||
let ord = a.0.cmp(&b.0);
|
||||
if ord != std::cmp::Ordering::Equal {
|
||||
ord
|
||||
} else {
|
||||
(b.1 - b.0).cmp(&(a.1 - a.0))
|
||||
}
|
||||
});
|
||||
let mut result = Vec::new();
|
||||
let mut last_end: i64 = -1;
|
||||
for p in patterns {
|
||||
if (p.0 as i64) > last_end {
|
||||
last_end = p.1 as i64;
|
||||
result.push(p);
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
pub fn analyze_loops(messages: &[ShareGptMessage<'_>]) -> SignalGroup {
|
||||
let mut group = SignalGroup::new("loops");
|
||||
let calls = extract_tool_calls(messages);
|
||||
if calls.len() < RETRY_THRESHOLD {
|
||||
return group;
|
||||
}
|
||||
|
||||
let retries = detect_retry(&calls);
|
||||
for (start_idx, end_idx, tool_name) in &retries {
|
||||
let call_count = calls
|
||||
.iter()
|
||||
.filter(|c| *start_idx <= c.index && c.index <= *end_idx)
|
||||
.count();
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::ExecutionLoopsRetry,
|
||||
*start_idx,
|
||||
format!(
|
||||
"Tool '{}' called {} times with identical arguments",
|
||||
tool_name, call_count
|
||||
),
|
||||
)
|
||||
.with_confidence(0.95)
|
||||
.with_metadata(json!({
|
||||
"tool_name": tool_name,
|
||||
"start_index": start_idx,
|
||||
"end_index": end_idx,
|
||||
"call_count": call_count,
|
||||
"loop_type": "retry",
|
||||
})),
|
||||
);
|
||||
}
|
||||
|
||||
let drifts = detect_parameter_drift(&calls);
|
||||
for (start_idx, end_idx, tool_name, variation_count) in &drifts {
|
||||
let overlaps_retry = retries
|
||||
.iter()
|
||||
.any(|r| !(*end_idx < r.0 || *start_idx > r.1));
|
||||
if overlaps_retry {
|
||||
continue;
|
||||
}
|
||||
let call_count = calls
|
||||
.iter()
|
||||
.filter(|c| *start_idx <= c.index && c.index <= *end_idx)
|
||||
.count();
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::ExecutionLoopsParameterDrift,
|
||||
*start_idx,
|
||||
format!(
|
||||
"Tool '{}' called {} times with {} different argument variations",
|
||||
tool_name, call_count, variation_count
|
||||
),
|
||||
)
|
||||
.with_confidence(0.85)
|
||||
.with_metadata(json!({
|
||||
"tool_name": tool_name,
|
||||
"start_index": start_idx,
|
||||
"end_index": end_idx,
|
||||
"call_count": call_count,
|
||||
"variation_count": variation_count,
|
||||
"loop_type": "parameter_drift",
|
||||
})),
|
||||
);
|
||||
}
|
||||
|
||||
let oscillations = detect_oscillation(&calls);
|
||||
for (start_idx, end_idx, tool_names, cycle_count) in &oscillations {
|
||||
let pattern_str = tool_names.join(" \u{2192} ");
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::ExecutionLoopsOscillation,
|
||||
*start_idx,
|
||||
format!(
|
||||
"Oscillation pattern [{}] repeated {} times",
|
||||
pattern_str, cycle_count
|
||||
),
|
||||
)
|
||||
.with_confidence(0.9)
|
||||
.with_metadata(json!({
|
||||
"pattern": tool_names,
|
||||
"start_index": start_idx,
|
||||
"end_index": end_idx,
|
||||
"cycle_count": cycle_count,
|
||||
"loop_type": "oscillation",
|
||||
})),
|
||||
);
|
||||
}
|
||||
|
||||
group
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn fc(value: &str) -> ShareGptMessage<'_> {
|
||||
ShareGptMessage {
|
||||
from: "function_call",
|
||||
value,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_retry_loop() {
|
||||
let arg = r#"{"name":"check_status","arguments":{"id":"abc"}}"#;
|
||||
let msgs = vec![fc(arg), fc(arg), fc(arg), fc(arg)];
|
||||
let g = analyze_loops(&msgs);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::ExecutionLoopsRetry)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_parameter_drift() {
|
||||
let msgs = vec![
|
||||
fc(r#"{"name":"search","arguments":{"q":"a"}}"#),
|
||||
fc(r#"{"name":"search","arguments":{"q":"ab"}}"#),
|
||||
fc(r#"{"name":"search","arguments":{"q":"abc"}}"#),
|
||||
fc(r#"{"name":"search","arguments":{"q":"abcd"}}"#),
|
||||
];
|
||||
let g = analyze_loops(&msgs);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::ExecutionLoopsParameterDrift)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_oscillation() {
|
||||
let a = r#"{"name":"toolA","arguments":{}}"#;
|
||||
let b = r#"{"name":"toolB","arguments":{}}"#;
|
||||
let msgs = vec![fc(a), fc(b), fc(a), fc(b), fc(a), fc(b)];
|
||||
let g = analyze_loops(&msgs);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::ExecutionLoopsOscillation)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_signals_when_few_calls() {
|
||||
let msgs = vec![fc(r#"{"name":"only_once","arguments":{}}"#)];
|
||||
let g = analyze_loops(&msgs);
|
||||
assert!(g.signals.is_empty());
|
||||
}
|
||||
}
|
||||
5
crates/brightstaff/src/signals/execution/mod.rs
Normal file
5
crates/brightstaff/src/signals/execution/mod.rs
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
//! Execution signals: failure (agent-caused tool errors) and loops
|
||||
//! (repetitive tool-call behavior).
|
||||
|
||||
pub mod failure;
|
||||
pub mod loops;
|
||||
193
crates/brightstaff/src/signals/interaction/constants.rs
Normal file
193
crates/brightstaff/src/signals/interaction/constants.rs
Normal file
|
|
@ -0,0 +1,193 @@
|
|||
//! Shared constants for the interaction layer detectors.
|
||||
//!
|
||||
//! Direct port of `signals/interaction/constants.py`.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::sync::OnceLock;
|
||||
|
||||
pub const POSITIVE_PREFIXES: &[&str] = &[
|
||||
"yes",
|
||||
"yeah",
|
||||
"yep",
|
||||
"yup",
|
||||
"sure",
|
||||
"ok",
|
||||
"okay",
|
||||
"great",
|
||||
"awesome",
|
||||
"perfect",
|
||||
"thanks",
|
||||
"thank",
|
||||
"wonderful",
|
||||
"excellent",
|
||||
"amazing",
|
||||
"nice",
|
||||
"good",
|
||||
"cool",
|
||||
"absolutely",
|
||||
"definitely",
|
||||
"please",
|
||||
];
|
||||
|
||||
pub const CONFIRMATION_PREFIXES: &[&str] = &[
|
||||
"yes",
|
||||
"yeah",
|
||||
"yep",
|
||||
"yup",
|
||||
"correct",
|
||||
"right",
|
||||
"that's correct",
|
||||
"thats correct",
|
||||
"that's right",
|
||||
"thats right",
|
||||
"that is correct",
|
||||
"that is right",
|
||||
];
|
||||
|
||||
const STOPWORD_LIST: &[&str] = &[
|
||||
"a",
|
||||
"about",
|
||||
"above",
|
||||
"after",
|
||||
"again",
|
||||
"against",
|
||||
"all",
|
||||
"am",
|
||||
"an",
|
||||
"and",
|
||||
"any",
|
||||
"are",
|
||||
"as",
|
||||
"at",
|
||||
"be",
|
||||
"because",
|
||||
"been",
|
||||
"before",
|
||||
"being",
|
||||
"below",
|
||||
"between",
|
||||
"both",
|
||||
"but",
|
||||
"by",
|
||||
"can",
|
||||
"could",
|
||||
"did",
|
||||
"do",
|
||||
"does",
|
||||
"doing",
|
||||
"down",
|
||||
"during",
|
||||
"each",
|
||||
"few",
|
||||
"for",
|
||||
"from",
|
||||
"further",
|
||||
"had",
|
||||
"has",
|
||||
"have",
|
||||
"having",
|
||||
"he",
|
||||
"her",
|
||||
"here",
|
||||
"hers",
|
||||
"herself",
|
||||
"him",
|
||||
"himself",
|
||||
"his",
|
||||
"how",
|
||||
"i",
|
||||
"if",
|
||||
"in",
|
||||
"into",
|
||||
"is",
|
||||
"it",
|
||||
"its",
|
||||
"itself",
|
||||
"just",
|
||||
"me",
|
||||
"more",
|
||||
"most",
|
||||
"my",
|
||||
"myself",
|
||||
"no",
|
||||
"nor",
|
||||
"not",
|
||||
"now",
|
||||
"of",
|
||||
"off",
|
||||
"on",
|
||||
"once",
|
||||
"only",
|
||||
"or",
|
||||
"other",
|
||||
"our",
|
||||
"ours",
|
||||
"ourselves",
|
||||
"out",
|
||||
"over",
|
||||
"own",
|
||||
"same",
|
||||
"she",
|
||||
"should",
|
||||
"so",
|
||||
"some",
|
||||
"such",
|
||||
"than",
|
||||
"that",
|
||||
"the",
|
||||
"their",
|
||||
"theirs",
|
||||
"them",
|
||||
"themselves",
|
||||
"then",
|
||||
"there",
|
||||
"these",
|
||||
"they",
|
||||
"this",
|
||||
"those",
|
||||
"through",
|
||||
"to",
|
||||
"too",
|
||||
"under",
|
||||
"until",
|
||||
"up",
|
||||
"very",
|
||||
"was",
|
||||
"we",
|
||||
"were",
|
||||
"what",
|
||||
"when",
|
||||
"where",
|
||||
"which",
|
||||
"while",
|
||||
"who",
|
||||
"whom",
|
||||
"why",
|
||||
"with",
|
||||
"would",
|
||||
"you",
|
||||
"your",
|
||||
"yours",
|
||||
"yourself",
|
||||
"yourselves",
|
||||
];
|
||||
|
||||
pub fn stopwords() -> &'static HashSet<&'static str> {
|
||||
static SET: OnceLock<HashSet<&'static str>> = OnceLock::new();
|
||||
SET.get_or_init(|| STOPWORD_LIST.iter().copied().collect())
|
||||
}
|
||||
|
||||
/// Returns true if `text` (case-insensitive, trimmed) starts with any of the
|
||||
/// given prefixes treated as **whole tokens or token sequences**. This matches
|
||||
/// the Python's `text_lower.startswith(prefix)` plus the natural intent that
|
||||
/// `"please"` shouldn't fire on `"pleased"`.
|
||||
pub fn starts_with_prefix(text: &str, prefixes: &[&str]) -> bool {
|
||||
let lowered = text.to_lowercase();
|
||||
let trimmed = lowered.trim_start();
|
||||
for prefix in prefixes {
|
||||
if trimmed.starts_with(prefix) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
445
crates/brightstaff/src/signals/interaction/disengagement.rs
Normal file
445
crates/brightstaff/src/signals/interaction/disengagement.rs
Normal file
|
|
@ -0,0 +1,445 @@
|
|||
//! Disengagement signals: escalation, quit, negative stance.
|
||||
//!
|
||||
//! Direct port of `signals/interaction/disengagement.py`.
|
||||
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use regex::Regex;
|
||||
use serde_json::json;
|
||||
|
||||
use super::constants::{starts_with_prefix, POSITIVE_PREFIXES};
|
||||
use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType};
|
||||
use crate::signals::text_processing::{normalize_patterns, NormalizedMessage, NormalizedPattern};
|
||||
|
||||
const ESCALATION_PATTERN_TEXTS: &[&str] = &[
|
||||
// Human requests
|
||||
"speak to a human",
|
||||
"talk to a human",
|
||||
"connect me to a human",
|
||||
"connect me with a human",
|
||||
"transfer me to a human",
|
||||
"get me a human",
|
||||
"chat with a human",
|
||||
// Person requests
|
||||
"speak to a person",
|
||||
"talk to a person",
|
||||
"connect me to a person",
|
||||
"connect me with a person",
|
||||
"transfer me to a person",
|
||||
"get me a person",
|
||||
"chat with a person",
|
||||
// Real person requests
|
||||
"speak to a real person",
|
||||
"talk to a real person",
|
||||
"connect me to a real person",
|
||||
"connect me with a real person",
|
||||
"transfer me to a real person",
|
||||
"get me a real person",
|
||||
"chat with a real person",
|
||||
// Actual person requests
|
||||
"speak to an actual person",
|
||||
"talk to an actual person",
|
||||
"connect me to an actual person",
|
||||
"connect me with an actual person",
|
||||
"transfer me to an actual person",
|
||||
"get me an actual person",
|
||||
"chat with an actual person",
|
||||
// Supervisor requests
|
||||
"speak to a supervisor",
|
||||
"talk to a supervisor",
|
||||
"connect me to a supervisor",
|
||||
"connect me with a supervisor",
|
||||
"transfer me to a supervisor",
|
||||
"get me a supervisor",
|
||||
"chat with a supervisor",
|
||||
// Manager requests
|
||||
"speak to a manager",
|
||||
"talk to a manager",
|
||||
"connect me to a manager",
|
||||
"connect me with a manager",
|
||||
"transfer me to a manager",
|
||||
"get me a manager",
|
||||
"chat with a manager",
|
||||
// Customer service requests
|
||||
"speak to customer service",
|
||||
"talk to customer service",
|
||||
"connect me to customer service",
|
||||
"connect me with customer service",
|
||||
"transfer me to customer service",
|
||||
"get me customer service",
|
||||
"chat with customer service",
|
||||
// Customer support requests
|
||||
"speak to customer support",
|
||||
"talk to customer support",
|
||||
"connect me to customer support",
|
||||
"connect me with customer support",
|
||||
"transfer me to customer support",
|
||||
"get me customer support",
|
||||
"chat with customer support",
|
||||
// Support requests
|
||||
"speak to support",
|
||||
"talk to support",
|
||||
"connect me to support",
|
||||
"connect me with support",
|
||||
"transfer me to support",
|
||||
"get me support",
|
||||
"chat with support",
|
||||
// Tech support requests
|
||||
"speak to tech support",
|
||||
"talk to tech support",
|
||||
"connect me to tech support",
|
||||
"connect me with tech support",
|
||||
"transfer me to tech support",
|
||||
"get me tech support",
|
||||
"chat with tech support",
|
||||
// Help desk requests
|
||||
"speak to help desk",
|
||||
"talk to help desk",
|
||||
"connect me to help desk",
|
||||
"connect me with help desk",
|
||||
"transfer me to help desk",
|
||||
"get me help desk",
|
||||
"chat with help desk",
|
||||
// Explicit escalation
|
||||
"escalate this",
|
||||
];
|
||||
|
||||
const QUIT_PATTERN_TEXTS: &[&str] = &[
|
||||
"i give up",
|
||||
"i'm giving up",
|
||||
"im giving up",
|
||||
"i'm going to quit",
|
||||
"i quit",
|
||||
"forget it",
|
||||
"forget this",
|
||||
"screw it",
|
||||
"screw this",
|
||||
"don't bother trying",
|
||||
"don't bother with this",
|
||||
"don't bother with it",
|
||||
"don't even bother",
|
||||
"why bother",
|
||||
"not worth it",
|
||||
"this is hopeless",
|
||||
"going elsewhere",
|
||||
"try somewhere else",
|
||||
"look elsewhere",
|
||||
];
|
||||
|
||||
const NEGATIVE_STANCE_PATTERN_TEXTS: &[&str] = &[
|
||||
"this is useless",
|
||||
"not helpful",
|
||||
"doesn't help",
|
||||
"not helping",
|
||||
"you're not helping",
|
||||
"youre not helping",
|
||||
"this doesn't work",
|
||||
"this doesnt work",
|
||||
"this isn't working",
|
||||
"this isnt working",
|
||||
"still doesn't work",
|
||||
"still doesnt work",
|
||||
"still not working",
|
||||
"still isn't working",
|
||||
"still isnt working",
|
||||
"waste of time",
|
||||
"wasting my time",
|
||||
"this is ridiculous",
|
||||
"this is absurd",
|
||||
"this is insane",
|
||||
"this is stupid",
|
||||
"this is dumb",
|
||||
"this sucks",
|
||||
"this is frustrating",
|
||||
"not good enough",
|
||||
"why can't you",
|
||||
"why cant you",
|
||||
"same issue",
|
||||
"did that already",
|
||||
"done that already",
|
||||
"tried that already",
|
||||
"already tried that",
|
||||
"i've done that",
|
||||
"ive done that",
|
||||
"i've tried that",
|
||||
"ive tried that",
|
||||
"i'm disappointed",
|
||||
"im disappointed",
|
||||
"disappointed with you",
|
||||
"disappointed in you",
|
||||
"useless bot",
|
||||
"dumb bot",
|
||||
"stupid bot",
|
||||
];
|
||||
|
||||
const AGENT_DIRECTED_PROFANITY_PATTERN_TEXTS: &[&str] = &[
|
||||
"this is bullshit",
|
||||
"what bullshit",
|
||||
"such bullshit",
|
||||
"total bullshit",
|
||||
"complete bullshit",
|
||||
"this is crap",
|
||||
"what crap",
|
||||
"this is shit",
|
||||
"what the hell is wrong with you",
|
||||
"what the fuck is wrong with you",
|
||||
"you're fucking useless",
|
||||
"youre fucking useless",
|
||||
"you are fucking useless",
|
||||
"fucking useless",
|
||||
"this bot is shit",
|
||||
"this bot is crap",
|
||||
"damn bot",
|
||||
"fucking bot",
|
||||
"stupid fucking",
|
||||
"are you fucking kidding",
|
||||
"wtf is wrong with you",
|
||||
"wtf is this",
|
||||
"ffs just",
|
||||
"for fucks sake",
|
||||
"for fuck's sake",
|
||||
"what the f**k",
|
||||
"what the f*ck",
|
||||
"what the f***",
|
||||
"that's bullsh*t",
|
||||
"thats bullsh*t",
|
||||
"that's bull***t",
|
||||
"thats bull***t",
|
||||
"that's bs",
|
||||
"thats bs",
|
||||
"this is bullsh*t",
|
||||
"this is bull***t",
|
||||
"this is bs",
|
||||
];
|
||||
|
||||
fn escalation_patterns() -> &'static Vec<NormalizedPattern> {
|
||||
static PATS: OnceLock<Vec<NormalizedPattern>> = OnceLock::new();
|
||||
PATS.get_or_init(|| normalize_patterns(ESCALATION_PATTERN_TEXTS))
|
||||
}
|
||||
|
||||
fn quit_patterns() -> &'static Vec<NormalizedPattern> {
|
||||
static PATS: OnceLock<Vec<NormalizedPattern>> = OnceLock::new();
|
||||
PATS.get_or_init(|| normalize_patterns(QUIT_PATTERN_TEXTS))
|
||||
}
|
||||
|
||||
fn negative_stance_patterns() -> &'static Vec<NormalizedPattern> {
|
||||
static PATS: OnceLock<Vec<NormalizedPattern>> = OnceLock::new();
|
||||
PATS.get_or_init(|| normalize_patterns(NEGATIVE_STANCE_PATTERN_TEXTS))
|
||||
}
|
||||
|
||||
fn profanity_patterns() -> &'static Vec<NormalizedPattern> {
|
||||
static PATS: OnceLock<Vec<NormalizedPattern>> = OnceLock::new();
|
||||
PATS.get_or_init(|| normalize_patterns(AGENT_DIRECTED_PROFANITY_PATTERN_TEXTS))
|
||||
}
|
||||
|
||||
fn re_consecutive_q() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| Regex::new(r"\?{2,}").unwrap())
|
||||
}
|
||||
fn re_consecutive_e() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| Regex::new(r"!{2,}").unwrap())
|
||||
}
|
||||
fn re_mixed_punct() -> &'static Regex {
|
||||
static R: OnceLock<Regex> = OnceLock::new();
|
||||
R.get_or_init(|| Regex::new(r"[?!]{3,}").unwrap())
|
||||
}
|
||||
|
||||
pub fn analyze_disengagement(
|
||||
normalized_messages: &[(usize, &str, NormalizedMessage)],
|
||||
char_ngram_threshold: f32,
|
||||
token_cosine_threshold: f32,
|
||||
) -> SignalGroup {
|
||||
let mut group = SignalGroup::new("disengagement");
|
||||
|
||||
for (idx, role, norm_msg) in normalized_messages {
|
||||
if *role != "human" {
|
||||
continue;
|
||||
}
|
||||
|
||||
let text = &norm_msg.raw;
|
||||
|
||||
// All-caps shouting check.
|
||||
let alpha_chars: String = text.chars().filter(|c| c.is_alphabetic()).collect();
|
||||
if alpha_chars.chars().count() >= 10 {
|
||||
let upper_count = alpha_chars.chars().filter(|c| c.is_uppercase()).count();
|
||||
let upper_ratio = upper_count as f32 / alpha_chars.chars().count() as f32;
|
||||
if upper_ratio >= 0.8 {
|
||||
let snippet: String = text.chars().take(50).collect();
|
||||
group.add_signal(
|
||||
SignalInstance::new(SignalType::DisengagementNegativeStance, *idx, snippet)
|
||||
.with_metadata(json!({
|
||||
"indicator_type": "all_caps",
|
||||
"upper_ratio": upper_ratio,
|
||||
})),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Excessive consecutive punctuation.
|
||||
let starts_with_positive = starts_with_prefix(text, POSITIVE_PREFIXES);
|
||||
let cq = re_consecutive_q().find_iter(text).count();
|
||||
let ce = re_consecutive_e().find_iter(text).count();
|
||||
let mixed = re_mixed_punct().find_iter(text).count();
|
||||
if !starts_with_positive && (cq >= 1 || ce >= 1 || mixed >= 1) {
|
||||
let snippet: String = text.chars().take(50).collect();
|
||||
group.add_signal(
|
||||
SignalInstance::new(SignalType::DisengagementNegativeStance, *idx, snippet)
|
||||
.with_metadata(json!({
|
||||
"indicator_type": "excessive_punctuation",
|
||||
"consecutive_questions": cq,
|
||||
"consecutive_exclamations": ce,
|
||||
"mixed_punctuation": mixed,
|
||||
})),
|
||||
);
|
||||
}
|
||||
|
||||
// Escalation patterns.
|
||||
let mut found_escalation = false;
|
||||
for pattern in escalation_patterns() {
|
||||
if norm_msg.matches_normalized_pattern(
|
||||
pattern,
|
||||
char_ngram_threshold,
|
||||
token_cosine_threshold,
|
||||
) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::DisengagementEscalation,
|
||||
*idx,
|
||||
pattern.raw.clone(),
|
||||
)
|
||||
.with_metadata(json!({"pattern_type": "escalation"})),
|
||||
);
|
||||
found_escalation = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Quit patterns (independent of escalation).
|
||||
for pattern in quit_patterns() {
|
||||
if norm_msg.matches_normalized_pattern(
|
||||
pattern,
|
||||
char_ngram_threshold,
|
||||
token_cosine_threshold,
|
||||
) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(SignalType::DisengagementQuit, *idx, pattern.raw.clone())
|
||||
.with_metadata(json!({"pattern_type": "quit"})),
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Profanity (more specific) before generic negative stance.
|
||||
let mut found_profanity = false;
|
||||
for pattern in profanity_patterns() {
|
||||
if norm_msg.matches_normalized_pattern(
|
||||
pattern,
|
||||
char_ngram_threshold,
|
||||
token_cosine_threshold,
|
||||
) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::DisengagementNegativeStance,
|
||||
*idx,
|
||||
pattern.raw.clone(),
|
||||
)
|
||||
.with_metadata(json!({
|
||||
"indicator_type": "profanity",
|
||||
"pattern": pattern.raw,
|
||||
})),
|
||||
);
|
||||
found_profanity = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if !found_escalation && !found_profanity {
|
||||
for pattern in negative_stance_patterns() {
|
||||
if norm_msg.matches_normalized_pattern(
|
||||
pattern,
|
||||
char_ngram_threshold,
|
||||
token_cosine_threshold,
|
||||
) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::DisengagementNegativeStance,
|
||||
*idx,
|
||||
pattern.raw.clone(),
|
||||
)
|
||||
.with_metadata(json!({
|
||||
"indicator_type": "complaint",
|
||||
"pattern": pattern.raw,
|
||||
})),
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn nm(s: &str) -> NormalizedMessage {
|
||||
NormalizedMessage::from_text(s, 2000)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_human_escalation_request() {
|
||||
let msgs = vec![(
|
||||
0usize,
|
||||
"human",
|
||||
nm("This is taking forever, get me a human"),
|
||||
)];
|
||||
let g = analyze_disengagement(&msgs, 0.65, 0.6);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::DisengagementEscalation)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_quit_intent() {
|
||||
let msgs = vec![(0usize, "human", nm("Forget it, I give up"))];
|
||||
let g = analyze_disengagement(&msgs, 0.65, 0.6);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::DisengagementQuit)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_negative_stance_complaint() {
|
||||
let msgs = vec![(0usize, "human", nm("This is useless"))];
|
||||
let g = analyze_disengagement(&msgs, 0.65, 0.6);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::DisengagementNegativeStance)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_excessive_punctuation_as_negative_stance() {
|
||||
let msgs = vec![(0usize, "human", nm("WHY isn't this working???"))];
|
||||
let g = analyze_disengagement(&msgs, 0.65, 0.6);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::DisengagementNegativeStance)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn positive_excitement_is_not_disengagement() {
|
||||
let msgs = vec![(0usize, "human", nm("Yes!! That's perfect!!!"))];
|
||||
let g = analyze_disengagement(&msgs, 0.65, 0.6);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.all(|s| !matches!(s.signal_type, SignalType::DisengagementNegativeStance)));
|
||||
}
|
||||
}
|
||||
338
crates/brightstaff/src/signals/interaction/misalignment.rs
Normal file
338
crates/brightstaff/src/signals/interaction/misalignment.rs
Normal file
|
|
@ -0,0 +1,338 @@
|
|||
//! Misalignment signals: corrections, rephrases, clarifications.
|
||||
//!
|
||||
//! Direct port of `signals/interaction/misalignment.py`.
|
||||
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use serde_json::json;
|
||||
|
||||
use super::constants::{stopwords, CONFIRMATION_PREFIXES};
|
||||
use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType};
|
||||
use crate::signals::text_processing::{normalize_patterns, NormalizedMessage, NormalizedPattern};
|
||||
|
||||
const CORRECTION_PATTERN_TEXTS: &[&str] = &[
|
||||
"no, i meant",
|
||||
"no i meant",
|
||||
"no, i said",
|
||||
"no i said",
|
||||
"no, i asked",
|
||||
"no i asked",
|
||||
"nah, i meant",
|
||||
"nope, i meant",
|
||||
"not what i said",
|
||||
"not what i asked",
|
||||
"that's not what i said",
|
||||
"that's not what i asked",
|
||||
"that's not what i meant",
|
||||
"thats not what i said",
|
||||
"thats not what i asked",
|
||||
"thats not what i meant",
|
||||
"that's not what you",
|
||||
"no that's not what i",
|
||||
"no, that's not what i",
|
||||
"you're not quite right",
|
||||
"youre not quite right",
|
||||
"you're not exactly right",
|
||||
"youre not exactly right",
|
||||
"you're wrong about",
|
||||
"youre wrong about",
|
||||
"i just said",
|
||||
"i already said",
|
||||
"i already told you",
|
||||
];
|
||||
|
||||
const REPHRASE_PATTERN_TEXTS: &[&str] = &[
|
||||
"let me rephrase",
|
||||
"let me explain again",
|
||||
"what i'm trying to say",
|
||||
"what i'm saying is",
|
||||
"in other words",
|
||||
];
|
||||
|
||||
const CLARIFICATION_PATTERN_TEXTS: &[&str] = &[
|
||||
"i don't understand",
|
||||
"don't understand",
|
||||
"not understanding",
|
||||
"can't understand",
|
||||
"don't get it",
|
||||
"don't follow",
|
||||
"i'm confused",
|
||||
"so confused",
|
||||
"makes no sense",
|
||||
"doesn't make sense",
|
||||
"not making sense",
|
||||
"what do you mean",
|
||||
"what does that mean",
|
||||
"what are you saying",
|
||||
"i'm lost",
|
||||
"totally lost",
|
||||
"lost me",
|
||||
"no clue what you",
|
||||
"no idea what you",
|
||||
"no clue what that",
|
||||
"no idea what that",
|
||||
"come again",
|
||||
"say that again",
|
||||
"repeat that",
|
||||
"trouble following",
|
||||
"hard to follow",
|
||||
"can't follow",
|
||||
];
|
||||
|
||||
fn correction_patterns() -> &'static Vec<NormalizedPattern> {
|
||||
static PATS: OnceLock<Vec<NormalizedPattern>> = OnceLock::new();
|
||||
PATS.get_or_init(|| normalize_patterns(CORRECTION_PATTERN_TEXTS))
|
||||
}
|
||||
|
||||
fn rephrase_patterns() -> &'static Vec<NormalizedPattern> {
|
||||
static PATS: OnceLock<Vec<NormalizedPattern>> = OnceLock::new();
|
||||
PATS.get_or_init(|| normalize_patterns(REPHRASE_PATTERN_TEXTS))
|
||||
}
|
||||
|
||||
fn clarification_patterns() -> &'static Vec<NormalizedPattern> {
|
||||
static PATS: OnceLock<Vec<NormalizedPattern>> = OnceLock::new();
|
||||
PATS.get_or_init(|| normalize_patterns(CLARIFICATION_PATTERN_TEXTS))
|
||||
}
|
||||
|
||||
fn is_confirmation_message(text: &str) -> bool {
|
||||
let lowered = text.to_lowercase();
|
||||
let trimmed = lowered.trim();
|
||||
CONFIRMATION_PREFIXES.iter().any(|p| trimmed.starts_with(p))
|
||||
}
|
||||
|
||||
/// Detect whether two user messages appear to be rephrases of each other.
|
||||
pub fn is_similar_rephrase(
|
||||
norm_msg1: &NormalizedMessage,
|
||||
norm_msg2: &NormalizedMessage,
|
||||
overlap_threshold: f32,
|
||||
min_meaningful_tokens: usize,
|
||||
max_new_content_ratio: f32,
|
||||
) -> bool {
|
||||
if norm_msg1.tokens.len() < 3 || norm_msg2.tokens.len() < 3 {
|
||||
return false;
|
||||
}
|
||||
if is_confirmation_message(&norm_msg1.raw) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let stops = stopwords();
|
||||
let tokens1: std::collections::HashSet<&str> = norm_msg1
|
||||
.tokens
|
||||
.iter()
|
||||
.filter(|t| !stops.contains(t.as_str()))
|
||||
.map(|s| s.as_str())
|
||||
.collect();
|
||||
let tokens2: std::collections::HashSet<&str> = norm_msg2
|
||||
.tokens
|
||||
.iter()
|
||||
.filter(|t| !stops.contains(t.as_str()))
|
||||
.map(|s| s.as_str())
|
||||
.collect();
|
||||
|
||||
if tokens1.len() < min_meaningful_tokens || tokens2.len() < min_meaningful_tokens {
|
||||
return false;
|
||||
}
|
||||
|
||||
let new_tokens: std::collections::HashSet<&&str> = tokens1.difference(&tokens2).collect();
|
||||
let new_content_ratio = if tokens1.is_empty() {
|
||||
0.0
|
||||
} else {
|
||||
new_tokens.len() as f32 / tokens1.len() as f32
|
||||
};
|
||||
if new_content_ratio > max_new_content_ratio {
|
||||
return false;
|
||||
}
|
||||
|
||||
let intersection = tokens1.intersection(&tokens2).count();
|
||||
let min_size = tokens1.len().min(tokens2.len());
|
||||
if min_size == 0 {
|
||||
return false;
|
||||
}
|
||||
let overlap_ratio = intersection as f32 / min_size as f32;
|
||||
overlap_ratio >= overlap_threshold
|
||||
}
|
||||
|
||||
/// Analyze user messages for misalignment signals.
|
||||
pub fn analyze_misalignment(
|
||||
normalized_messages: &[(usize, &str, NormalizedMessage)],
|
||||
char_ngram_threshold: f32,
|
||||
token_cosine_threshold: f32,
|
||||
) -> SignalGroup {
|
||||
let mut group = SignalGroup::new("misalignment");
|
||||
|
||||
let mut prev_user_idx: Option<usize> = None;
|
||||
let mut prev_user_msg: Option<&NormalizedMessage> = None;
|
||||
|
||||
for (idx, role, norm_msg) in normalized_messages {
|
||||
if *role != "human" {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut found_in_turn = false;
|
||||
|
||||
for pattern in correction_patterns() {
|
||||
if norm_msg.matches_normalized_pattern(
|
||||
pattern,
|
||||
char_ngram_threshold,
|
||||
token_cosine_threshold,
|
||||
) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::MisalignmentCorrection,
|
||||
*idx,
|
||||
pattern.raw.clone(),
|
||||
)
|
||||
.with_metadata(json!({"pattern_type": "correction"})),
|
||||
);
|
||||
found_in_turn = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if found_in_turn {
|
||||
prev_user_idx = Some(*idx);
|
||||
prev_user_msg = Some(norm_msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
for pattern in rephrase_patterns() {
|
||||
if norm_msg.matches_normalized_pattern(
|
||||
pattern,
|
||||
char_ngram_threshold,
|
||||
token_cosine_threshold,
|
||||
) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::MisalignmentRephrase,
|
||||
*idx,
|
||||
pattern.raw.clone(),
|
||||
)
|
||||
.with_metadata(json!({"pattern_type": "rephrase"})),
|
||||
);
|
||||
found_in_turn = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if found_in_turn {
|
||||
prev_user_idx = Some(*idx);
|
||||
prev_user_msg = Some(norm_msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
for pattern in clarification_patterns() {
|
||||
if norm_msg.matches_normalized_pattern(
|
||||
pattern,
|
||||
char_ngram_threshold,
|
||||
token_cosine_threshold,
|
||||
) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::MisalignmentClarification,
|
||||
*idx,
|
||||
pattern.raw.clone(),
|
||||
)
|
||||
.with_metadata(json!({"pattern_type": "clarification"})),
|
||||
);
|
||||
found_in_turn = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if found_in_turn {
|
||||
prev_user_idx = Some(*idx);
|
||||
prev_user_msg = Some(norm_msg);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Semantic rephrase vs the previous user message (recent only).
|
||||
if let (Some(prev_idx), Some(prev_msg)) = (prev_user_idx, prev_user_msg) {
|
||||
let turns_between = idx.saturating_sub(prev_idx);
|
||||
if turns_between <= 3 && is_similar_rephrase(norm_msg, prev_msg, 0.75, 4, 0.5) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::MisalignmentRephrase,
|
||||
*idx,
|
||||
"[similar rephrase detected]",
|
||||
)
|
||||
.with_confidence(0.8)
|
||||
.with_metadata(json!({
|
||||
"pattern_type": "semantic_rephrase",
|
||||
"compared_to": prev_idx,
|
||||
})),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
prev_user_idx = Some(*idx);
|
||||
prev_user_msg = Some(norm_msg);
|
||||
}
|
||||
|
||||
group
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn nm(s: &str) -> NormalizedMessage {
|
||||
NormalizedMessage::from_text(s, 2000)
|
||||
}
|
||||
|
||||
fn make(items: &[(&'static str, &str)]) -> Vec<(usize, &'static str, NormalizedMessage)> {
|
||||
items
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, (role, text))| (i, *role, nm(text)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_explicit_correction() {
|
||||
let msgs = make(&[
|
||||
("human", "Show me my orders"),
|
||||
("gpt", "Sure, here are your invoices"),
|
||||
("human", "No, I meant my recent orders"),
|
||||
]);
|
||||
let g = analyze_misalignment(&msgs, 0.65, 0.6);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::MisalignmentCorrection)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_rephrase_marker() {
|
||||
let msgs = make(&[
|
||||
("human", "Show me X"),
|
||||
("gpt", "Sure"),
|
||||
("human", "Let me rephrase: I want X grouped by date"),
|
||||
]);
|
||||
let g = analyze_misalignment(&msgs, 0.65, 0.6);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::MisalignmentRephrase)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_clarification_request() {
|
||||
let msgs = make(&[
|
||||
("human", "Run the report"),
|
||||
("gpt", "Foobar quux baz."),
|
||||
("human", "I don't understand what you mean"),
|
||||
]);
|
||||
let g = analyze_misalignment(&msgs, 0.65, 0.6);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::MisalignmentClarification)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn confirmation_is_not_a_rephrase() {
|
||||
let m1 = nm("Yes, that's correct, please proceed with the order");
|
||||
let m2 = nm("please proceed with the order for the same product");
|
||||
assert!(!is_similar_rephrase(&m1, &m2, 0.75, 4, 0.5));
|
||||
}
|
||||
}
|
||||
10
crates/brightstaff/src/signals/interaction/mod.rs
Normal file
10
crates/brightstaff/src/signals/interaction/mod.rs
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
//! Interaction signals: misalignment, stagnation, disengagement, satisfaction.
|
||||
//!
|
||||
//! These signals capture how the dialogue itself unfolds (semantic alignment,
|
||||
//! progress, engagement, closure) independent of tool execution outcomes.
|
||||
|
||||
pub mod constants;
|
||||
pub mod disengagement;
|
||||
pub mod misalignment;
|
||||
pub mod satisfaction;
|
||||
pub mod stagnation;
|
||||
177
crates/brightstaff/src/signals/interaction/satisfaction.rs
Normal file
177
crates/brightstaff/src/signals/interaction/satisfaction.rs
Normal file
|
|
@ -0,0 +1,177 @@
|
|||
//! Satisfaction signals: gratitude, confirmation, success.
|
||||
//!
|
||||
//! Direct port of `signals/interaction/satisfaction.py`.
|
||||
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use serde_json::json;
|
||||
|
||||
use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType};
|
||||
use crate::signals::text_processing::{normalize_patterns, NormalizedMessage, NormalizedPattern};
|
||||
|
||||
const GRATITUDE_PATTERN_TEXTS: &[&str] = &[
|
||||
"that's helpful",
|
||||
"that helps",
|
||||
"this helps",
|
||||
"appreciate it",
|
||||
"appreciate that",
|
||||
"that's perfect",
|
||||
"exactly what i needed",
|
||||
"just what i needed",
|
||||
"you're the best",
|
||||
"you rock",
|
||||
"you're awesome",
|
||||
"you're amazing",
|
||||
"you're great",
|
||||
];
|
||||
|
||||
const CONFIRMATION_PATTERN_TEXTS: &[&str] = &[
|
||||
"that works",
|
||||
"this works",
|
||||
"that's great",
|
||||
"that's amazing",
|
||||
"this is great",
|
||||
"that's awesome",
|
||||
"love it",
|
||||
"love this",
|
||||
"love that",
|
||||
];
|
||||
|
||||
const SUCCESS_PATTERN_TEXTS: &[&str] = &[
|
||||
"it worked",
|
||||
"that worked",
|
||||
"this worked",
|
||||
"it's working",
|
||||
"that's working",
|
||||
"this is working",
|
||||
];
|
||||
|
||||
fn gratitude_patterns() -> &'static Vec<NormalizedPattern> {
|
||||
static PATS: OnceLock<Vec<NormalizedPattern>> = OnceLock::new();
|
||||
PATS.get_or_init(|| normalize_patterns(GRATITUDE_PATTERN_TEXTS))
|
||||
}
|
||||
|
||||
fn confirmation_patterns() -> &'static Vec<NormalizedPattern> {
|
||||
static PATS: OnceLock<Vec<NormalizedPattern>> = OnceLock::new();
|
||||
PATS.get_or_init(|| normalize_patterns(CONFIRMATION_PATTERN_TEXTS))
|
||||
}
|
||||
|
||||
fn success_patterns() -> &'static Vec<NormalizedPattern> {
|
||||
static PATS: OnceLock<Vec<NormalizedPattern>> = OnceLock::new();
|
||||
PATS.get_or_init(|| normalize_patterns(SUCCESS_PATTERN_TEXTS))
|
||||
}
|
||||
|
||||
pub fn analyze_satisfaction(
|
||||
normalized_messages: &[(usize, &str, NormalizedMessage)],
|
||||
char_ngram_threshold: f32,
|
||||
token_cosine_threshold: f32,
|
||||
) -> SignalGroup {
|
||||
let mut group = SignalGroup::new("satisfaction");
|
||||
|
||||
for (idx, role, norm_msg) in normalized_messages {
|
||||
if *role != "human" {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut found = false;
|
||||
|
||||
for pattern in gratitude_patterns() {
|
||||
if norm_msg.matches_normalized_pattern(
|
||||
pattern,
|
||||
char_ngram_threshold,
|
||||
token_cosine_threshold,
|
||||
) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::SatisfactionGratitude,
|
||||
*idx,
|
||||
pattern.raw.clone(),
|
||||
)
|
||||
.with_metadata(json!({"pattern_type": "gratitude"})),
|
||||
);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if found {
|
||||
continue;
|
||||
}
|
||||
|
||||
for pattern in confirmation_patterns() {
|
||||
if norm_msg.matches_normalized_pattern(
|
||||
pattern,
|
||||
char_ngram_threshold,
|
||||
token_cosine_threshold,
|
||||
) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::SatisfactionConfirmation,
|
||||
*idx,
|
||||
pattern.raw.clone(),
|
||||
)
|
||||
.with_metadata(json!({"pattern_type": "confirmation"})),
|
||||
);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if found {
|
||||
continue;
|
||||
}
|
||||
|
||||
for pattern in success_patterns() {
|
||||
if norm_msg.matches_normalized_pattern(
|
||||
pattern,
|
||||
char_ngram_threshold,
|
||||
token_cosine_threshold,
|
||||
) {
|
||||
group.add_signal(
|
||||
SignalInstance::new(SignalType::SatisfactionSuccess, *idx, pattern.raw.clone())
|
||||
.with_metadata(json!({"pattern_type": "success"})),
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
group
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn nm(s: &str) -> NormalizedMessage {
|
||||
NormalizedMessage::from_text(s, 2000)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_gratitude() {
|
||||
let msgs = vec![(0usize, "human", nm("That's perfect, appreciate it!"))];
|
||||
let g = analyze_satisfaction(&msgs, 0.65, 0.6);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::SatisfactionGratitude)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_confirmation() {
|
||||
let msgs = vec![(0usize, "human", nm("That works for me, thanks"))];
|
||||
let g = analyze_satisfaction(&msgs, 0.65, 0.6);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::SatisfactionConfirmation)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_success() {
|
||||
let msgs = vec![(0usize, "human", nm("Great, it worked!"))];
|
||||
let g = analyze_satisfaction(&msgs, 0.65, 0.6);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::SatisfactionSuccess)));
|
||||
}
|
||||
}
|
||||
241
crates/brightstaff/src/signals/interaction/stagnation.rs
Normal file
241
crates/brightstaff/src/signals/interaction/stagnation.rs
Normal file
|
|
@ -0,0 +1,241 @@
|
|||
//! Stagnation signals: dragging (turn-count efficiency) and repetition.
|
||||
//!
|
||||
//! Direct port of `signals/interaction/stagnation.py`.
|
||||
|
||||
use serde_json::json;
|
||||
|
||||
use super::constants::{starts_with_prefix, POSITIVE_PREFIXES};
|
||||
use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType, TurnMetrics};
|
||||
use crate::signals::text_processing::NormalizedMessage;
|
||||
|
||||
/// Adapter row used by stagnation::dragging detector. Mirrors the ShareGPT
|
||||
/// `{"from": role, "value": text}` shape used in the Python reference.
|
||||
pub struct ShareGptMsg<'a> {
|
||||
pub from: &'a str,
|
||||
}
|
||||
|
||||
pub fn analyze_dragging(
|
||||
messages: &[ShareGptMsg<'_>],
|
||||
baseline_turns: usize,
|
||||
efficiency_threshold: f32,
|
||||
) -> (SignalGroup, TurnMetrics) {
|
||||
let mut group = SignalGroup::new("stagnation");
|
||||
|
||||
let mut user_turns: usize = 0;
|
||||
let mut assistant_turns: usize = 0;
|
||||
for m in messages {
|
||||
match m.from {
|
||||
"human" => user_turns += 1,
|
||||
"gpt" => assistant_turns += 1,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
let total_turns = user_turns;
|
||||
let efficiency_score: f32 = if total_turns == 0 || total_turns <= baseline_turns {
|
||||
1.0
|
||||
} else {
|
||||
let excess = (total_turns - baseline_turns) as f32;
|
||||
1.0 / (1.0 + excess * 0.25)
|
||||
};
|
||||
|
||||
let is_dragging = efficiency_score < efficiency_threshold;
|
||||
let metrics = TurnMetrics {
|
||||
total_turns,
|
||||
user_turns,
|
||||
assistant_turns,
|
||||
is_dragging,
|
||||
efficiency_score,
|
||||
};
|
||||
|
||||
if is_dragging {
|
||||
let last_idx = messages.len().saturating_sub(1);
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::StagnationDragging,
|
||||
last_idx,
|
||||
format!(
|
||||
"Conversation dragging: {} turns (efficiency: {:.2})",
|
||||
total_turns, efficiency_score
|
||||
),
|
||||
)
|
||||
.with_confidence(1.0 - efficiency_score)
|
||||
.with_metadata(json!({
|
||||
"total_turns": total_turns,
|
||||
"efficiency_score": efficiency_score,
|
||||
"baseline_turns": baseline_turns,
|
||||
})),
|
||||
);
|
||||
}
|
||||
|
||||
(group, metrics)
|
||||
}
|
||||
|
||||
pub fn analyze_repetition(
|
||||
normalized_messages: &[(usize, &str, NormalizedMessage)],
|
||||
lookback: usize,
|
||||
exact_threshold: f32,
|
||||
near_duplicate_threshold: f32,
|
||||
) -> SignalGroup {
|
||||
let mut group = SignalGroup::new("stagnation");
|
||||
|
||||
// We keep references into `normalized_messages`. Since `normalized_messages`
|
||||
// is borrowed for the whole function, this avoids cloning.
|
||||
let mut prev_human: Vec<(usize, &NormalizedMessage)> = Vec::new();
|
||||
let mut prev_gpt: Vec<(usize, &NormalizedMessage)> = Vec::new();
|
||||
|
||||
for (idx, role, norm_msg) in normalized_messages {
|
||||
if *role != "human" && *role != "gpt" {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip human positive-prefix messages; they're naturally repetitive.
|
||||
if *role == "human" && starts_with_prefix(&norm_msg.raw, POSITIVE_PREFIXES) {
|
||||
prev_human.push((*idx, norm_msg));
|
||||
continue;
|
||||
}
|
||||
|
||||
if norm_msg.tokens.len() < 5 {
|
||||
if *role == "human" {
|
||||
prev_human.push((*idx, norm_msg));
|
||||
} else {
|
||||
prev_gpt.push((*idx, norm_msg));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
let prev = if *role == "human" {
|
||||
&prev_human
|
||||
} else {
|
||||
&prev_gpt
|
||||
};
|
||||
let start = prev.len().saturating_sub(lookback);
|
||||
let mut matched = false;
|
||||
for (prev_idx, prev_msg) in &prev[start..] {
|
||||
if prev_msg.tokens.len() < 5 {
|
||||
continue;
|
||||
}
|
||||
let similarity = norm_msg.ngram_similarity_with_message(prev_msg);
|
||||
if similarity >= exact_threshold {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::StagnationRepetition,
|
||||
*idx,
|
||||
format!("Exact repetition with message {}", prev_idx),
|
||||
)
|
||||
.with_confidence(similarity)
|
||||
.with_metadata(json!({
|
||||
"repetition_type": "exact",
|
||||
"compared_to": prev_idx,
|
||||
"similarity": similarity,
|
||||
"role": role,
|
||||
})),
|
||||
);
|
||||
matched = true;
|
||||
break;
|
||||
} else if similarity >= near_duplicate_threshold {
|
||||
group.add_signal(
|
||||
SignalInstance::new(
|
||||
SignalType::StagnationRepetition,
|
||||
*idx,
|
||||
format!("Near-duplicate with message {}", prev_idx),
|
||||
)
|
||||
.with_confidence(similarity)
|
||||
.with_metadata(json!({
|
||||
"repetition_type": "near_duplicate",
|
||||
"compared_to": prev_idx,
|
||||
"similarity": similarity,
|
||||
"role": role,
|
||||
})),
|
||||
);
|
||||
matched = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
let _ = matched;
|
||||
|
||||
if *role == "human" {
|
||||
prev_human.push((*idx, norm_msg));
|
||||
} else {
|
||||
prev_gpt.push((*idx, norm_msg));
|
||||
}
|
||||
}
|
||||
|
||||
group
|
||||
}
|
||||
|
||||
/// Combined stagnation analyzer: dragging + repetition.
|
||||
pub fn analyze_stagnation(
|
||||
messages: &[ShareGptMsg<'_>],
|
||||
normalized_messages: &[(usize, &str, NormalizedMessage)],
|
||||
baseline_turns: usize,
|
||||
) -> (SignalGroup, TurnMetrics) {
|
||||
let (dragging_group, metrics) = analyze_dragging(messages, baseline_turns, 0.5);
|
||||
let repetition_group = analyze_repetition(normalized_messages, 2, 0.95, 0.85);
|
||||
|
||||
let mut combined = SignalGroup::new("stagnation");
|
||||
for s in dragging_group.signals.iter().cloned() {
|
||||
combined.add_signal(s);
|
||||
}
|
||||
for s in repetition_group.signals.iter().cloned() {
|
||||
combined.add_signal(s);
|
||||
}
|
||||
(combined, metrics)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn nm(s: &str) -> NormalizedMessage {
|
||||
NormalizedMessage::from_text(s, 2000)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dragging_after_many_user_turns() {
|
||||
let msgs: Vec<_> = (0..15)
|
||||
.flat_map(|_| [ShareGptMsg { from: "human" }, ShareGptMsg { from: "gpt" }])
|
||||
.collect();
|
||||
let (g, m) = analyze_dragging(&msgs, 5, 0.5);
|
||||
assert!(m.is_dragging);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::StagnationDragging)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_dragging_below_baseline() {
|
||||
let msgs = vec![
|
||||
ShareGptMsg { from: "human" },
|
||||
ShareGptMsg { from: "gpt" },
|
||||
ShareGptMsg { from: "human" },
|
||||
ShareGptMsg { from: "gpt" },
|
||||
];
|
||||
let (g, m) = analyze_dragging(&msgs, 5, 0.5);
|
||||
assert!(!m.is_dragging);
|
||||
assert!(g.signals.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detects_exact_repetition_in_user_messages() {
|
||||
let n = vec![
|
||||
(
|
||||
0usize,
|
||||
"human",
|
||||
nm("This widget is broken and needs repair right now"),
|
||||
),
|
||||
(1, "gpt", nm("Sorry to hear that. Let me look into it.")),
|
||||
(
|
||||
2,
|
||||
"human",
|
||||
nm("This widget is broken and needs repair right now"),
|
||||
),
|
||||
];
|
||||
let g = analyze_repetition(&n, 2, 0.95, 0.85);
|
||||
assert!(g
|
||||
.signals
|
||||
.iter()
|
||||
.any(|s| matches!(s.signal_type, SignalType::StagnationRepetition)));
|
||||
}
|
||||
}
|
||||
|
|
@ -1,3 +1,26 @@
|
|||
mod analyzer;
|
||||
//! Plano signals: behavioral quality indicators for agent interactions.
|
||||
//!
|
||||
//! This is a Rust port of the paper-aligned Python reference implementation at
|
||||
//! `https://github.com/katanemo/signals` (or `/Users/shashmi/repos/signals`).
|
||||
//!
|
||||
//! Three layers of signals are detected from a conversation transcript:
|
||||
//!
|
||||
//! - **Interaction**: misalignment, stagnation, disengagement, satisfaction
|
||||
//! - **Execution**: failure, loops
|
||||
//! - **Environment**: exhaustion
|
||||
//!
|
||||
//! See `SignalType` for the full hierarchy.
|
||||
|
||||
pub use analyzer::*;
|
||||
pub mod analyzer;
|
||||
pub mod environment;
|
||||
pub mod execution;
|
||||
pub mod interaction;
|
||||
pub mod otel;
|
||||
pub mod schemas;
|
||||
pub mod text_processing;
|
||||
|
||||
pub use analyzer::{SignalAnalyzer, FLAG_MARKER};
|
||||
pub use schemas::{
|
||||
EnvironmentSignals, ExecutionSignals, InteractionQuality, InteractionSignals, SignalGroup,
|
||||
SignalInstance, SignalLayer, SignalReport, SignalType, TurnMetrics,
|
||||
};
|
||||
|
|
|
|||
241
crates/brightstaff/src/signals/otel.rs
Normal file
241
crates/brightstaff/src/signals/otel.rs
Normal file
|
|
@ -0,0 +1,241 @@
|
|||
//! Helpers for emitting `SignalReport` data to OpenTelemetry spans.
|
||||
//!
|
||||
//! Two sets of attributes are emitted:
|
||||
//!
|
||||
//! - **Legacy** keys under `signals.*` (e.g. `signals.frustration.count`),
|
||||
//! computed from the new layered counts. Preserved for one release for
|
||||
//! backward compatibility with existing dashboards.
|
||||
//! - **New** layered keys (e.g. `signals.interaction.misalignment.count`),
|
||||
//! one set of `count`/`severity` attributes per category, plus per-instance
|
||||
//! span events named `signal.<dotted_signal_type>`.
|
||||
|
||||
use opentelemetry::trace::SpanRef;
|
||||
use opentelemetry::KeyValue;
|
||||
|
||||
use crate::signals::schemas::{SignalGroup, SignalReport, SignalType};
|
||||
|
||||
/// Emit both legacy and layered OTel attributes/events for a `SignalReport`.
|
||||
///
|
||||
/// Returns `true` if any "concerning" signal was found, mirroring the previous
|
||||
/// behavior used to flag the span operation name.
|
||||
pub fn emit_signals_to_span(span: &SpanRef<'_>, report: &SignalReport) -> bool {
|
||||
emit_overall(span, report);
|
||||
emit_layered_attributes(span, report);
|
||||
emit_legacy_attributes(span, report);
|
||||
emit_signal_events(span, report);
|
||||
|
||||
is_concerning(report)
|
||||
}
|
||||
|
||||
fn emit_overall(span: &SpanRef<'_>, report: &SignalReport) {
|
||||
span.set_attribute(KeyValue::new(
|
||||
"signals.quality",
|
||||
report.overall_quality.as_str().to_string(),
|
||||
));
|
||||
span.set_attribute(KeyValue::new(
|
||||
"signals.quality_score",
|
||||
report.quality_score as f64,
|
||||
));
|
||||
span.set_attribute(KeyValue::new(
|
||||
"signals.turn_count",
|
||||
report.turn_metrics.total_turns as i64,
|
||||
));
|
||||
span.set_attribute(KeyValue::new(
|
||||
"signals.efficiency_score",
|
||||
report.turn_metrics.efficiency_score as f64,
|
||||
));
|
||||
}
|
||||
|
||||
fn emit_group(span: &SpanRef<'_>, prefix: &str, group: &SignalGroup) {
|
||||
if group.count == 0 {
|
||||
return;
|
||||
}
|
||||
span.set_attribute(KeyValue::new(
|
||||
format!("{}.count", prefix),
|
||||
group.count as i64,
|
||||
));
|
||||
span.set_attribute(KeyValue::new(
|
||||
format!("{}.severity", prefix),
|
||||
group.severity as i64,
|
||||
));
|
||||
}
|
||||
|
||||
fn emit_layered_attributes(span: &SpanRef<'_>, report: &SignalReport) {
|
||||
emit_group(
|
||||
span,
|
||||
"signals.interaction.misalignment",
|
||||
&report.interaction.misalignment,
|
||||
);
|
||||
emit_group(
|
||||
span,
|
||||
"signals.interaction.stagnation",
|
||||
&report.interaction.stagnation,
|
||||
);
|
||||
emit_group(
|
||||
span,
|
||||
"signals.interaction.disengagement",
|
||||
&report.interaction.disengagement,
|
||||
);
|
||||
emit_group(
|
||||
span,
|
||||
"signals.interaction.satisfaction",
|
||||
&report.interaction.satisfaction,
|
||||
);
|
||||
emit_group(span, "signals.execution.failure", &report.execution.failure);
|
||||
emit_group(span, "signals.execution.loops", &report.execution.loops);
|
||||
emit_group(
|
||||
span,
|
||||
"signals.environment.exhaustion",
|
||||
&report.environment.exhaustion,
|
||||
);
|
||||
}
|
||||
|
||||
fn count_of(report: &SignalReport, t: SignalType) -> usize {
|
||||
report.iter_signals().filter(|s| s.signal_type == t).count()
|
||||
}
|
||||
|
||||
/// Emit the legacy attribute keys consumed by existing dashboards. These are
|
||||
/// derived from the new `SignalReport` so no detector contract is broken.
|
||||
fn emit_legacy_attributes(span: &SpanRef<'_>, report: &SignalReport) {
|
||||
use crate::tracing::signals as legacy;
|
||||
|
||||
// signals.follow_up.repair.{count,ratio} - misalignment proxies repairs.
|
||||
let repair_count = report.interaction.misalignment.count;
|
||||
let user_turns = report.turn_metrics.user_turns.max(1) as f32;
|
||||
if repair_count > 0 {
|
||||
span.set_attribute(KeyValue::new(legacy::REPAIR_COUNT, repair_count as i64));
|
||||
let ratio = repair_count as f32 / user_turns;
|
||||
span.set_attribute(KeyValue::new(legacy::REPAIR_RATIO, format!("{:.3}", ratio)));
|
||||
}
|
||||
|
||||
// signals.frustration.{count,severity} - disengagement.negative_stance is
|
||||
// the closest legacy analog of "frustration".
|
||||
let frustration_count = count_of(report, SignalType::DisengagementNegativeStance);
|
||||
if frustration_count > 0 {
|
||||
span.set_attribute(KeyValue::new(
|
||||
legacy::FRUSTRATION_COUNT,
|
||||
frustration_count as i64,
|
||||
));
|
||||
let severity = match frustration_count {
|
||||
0 => 0,
|
||||
1..=2 => 1,
|
||||
3..=4 => 2,
|
||||
_ => 3,
|
||||
};
|
||||
span.set_attribute(KeyValue::new(legacy::FRUSTRATION_SEVERITY, severity as i64));
|
||||
}
|
||||
|
||||
// signals.repetition.count - stagnation (repetition + dragging).
|
||||
if report.interaction.stagnation.count > 0 {
|
||||
span.set_attribute(KeyValue::new(
|
||||
legacy::REPETITION_COUNT,
|
||||
report.interaction.stagnation.count as i64,
|
||||
));
|
||||
}
|
||||
|
||||
// signals.escalation.requested - any escalation/quit signal.
|
||||
let escalated = report.interaction.disengagement.signals.iter().any(|s| {
|
||||
matches!(
|
||||
s.signal_type,
|
||||
SignalType::DisengagementEscalation | SignalType::DisengagementQuit
|
||||
)
|
||||
});
|
||||
if escalated {
|
||||
span.set_attribute(KeyValue::new(legacy::ESCALATION_REQUESTED, true));
|
||||
}
|
||||
|
||||
// signals.positive_feedback.count - satisfaction signals.
|
||||
if report.interaction.satisfaction.count > 0 {
|
||||
span.set_attribute(KeyValue::new(
|
||||
legacy::POSITIVE_FEEDBACK_COUNT,
|
||||
report.interaction.satisfaction.count as i64,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
fn emit_signal_events(span: &SpanRef<'_>, report: &SignalReport) {
|
||||
for sig in report.iter_signals() {
|
||||
let event_name = format!("signal.{}", sig.signal_type.as_str());
|
||||
let mut attrs: Vec<KeyValue> = vec![
|
||||
KeyValue::new("signal.type", sig.signal_type.as_str().to_string()),
|
||||
KeyValue::new("signal.message_index", sig.message_index as i64),
|
||||
KeyValue::new("signal.confidence", sig.confidence as f64),
|
||||
];
|
||||
if !sig.snippet.is_empty() {
|
||||
attrs.push(KeyValue::new("signal.snippet", sig.snippet.clone()));
|
||||
}
|
||||
if !sig.metadata.is_null() {
|
||||
attrs.push(KeyValue::new("signal.metadata", sig.metadata.to_string()));
|
||||
}
|
||||
span.add_event(event_name, attrs);
|
||||
}
|
||||
}
|
||||
|
||||
fn is_concerning(report: &SignalReport) -> bool {
|
||||
use crate::signals::schemas::InteractionQuality;
|
||||
if matches!(
|
||||
report.overall_quality,
|
||||
InteractionQuality::Poor | InteractionQuality::Severe
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
if report.interaction.disengagement.count > 0 {
|
||||
return true;
|
||||
}
|
||||
if report.interaction.stagnation.count > 2 {
|
||||
return true;
|
||||
}
|
||||
if report.execution.failure.count > 0 || report.execution.loops.count > 0 {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::signals::schemas::{
|
||||
EnvironmentSignals, ExecutionSignals, InteractionQuality, InteractionSignals, SignalGroup,
|
||||
SignalInstance, SignalReport, SignalType, TurnMetrics,
|
||||
};
|
||||
|
||||
fn report_with_escalation() -> SignalReport {
|
||||
let mut diseng = SignalGroup::new("disengagement");
|
||||
diseng.add_signal(SignalInstance::new(
|
||||
SignalType::DisengagementEscalation,
|
||||
3,
|
||||
"get me a human",
|
||||
));
|
||||
SignalReport {
|
||||
interaction: InteractionSignals {
|
||||
disengagement: diseng,
|
||||
..InteractionSignals::default()
|
||||
},
|
||||
execution: ExecutionSignals::default(),
|
||||
environment: EnvironmentSignals::default(),
|
||||
overall_quality: InteractionQuality::Severe,
|
||||
quality_score: 0.0,
|
||||
turn_metrics: TurnMetrics {
|
||||
total_turns: 3,
|
||||
user_turns: 2,
|
||||
assistant_turns: 1,
|
||||
is_dragging: false,
|
||||
efficiency_score: 1.0,
|
||||
},
|
||||
summary: String::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_concerning_flags_disengagement() {
|
||||
let r = report_with_escalation();
|
||||
assert!(is_concerning(&r));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn count_of_returns_per_type_count() {
|
||||
let r = report_with_escalation();
|
||||
assert_eq!(count_of(&r, SignalType::DisengagementEscalation), 1);
|
||||
assert_eq!(count_of(&r, SignalType::DisengagementNegativeStance), 0);
|
||||
}
|
||||
}
|
||||
431
crates/brightstaff/src/signals/schemas.rs
Normal file
431
crates/brightstaff/src/signals/schemas.rs
Normal file
|
|
@ -0,0 +1,431 @@
|
|||
//! Data shapes for the signal analyzer.
|
||||
//!
|
||||
//! Mirrors `signals/schemas.py` from the reference implementation. Where the
|
||||
//! Python library exposes a `Dict[str, SignalGroup]` partitioned by category,
|
||||
//! the Rust port uses strongly-typed sub-structs (`InteractionSignals`,
|
||||
//! `ExecutionSignals`, `EnvironmentSignals`) for the same partitioning.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Hierarchical signal type. The 20 leaf variants mirror the paper taxonomy
|
||||
/// and the Python reference's `SignalType` string enum.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub enum SignalType {
|
||||
// Interaction > Misalignment
|
||||
MisalignmentCorrection,
|
||||
MisalignmentRephrase,
|
||||
MisalignmentClarification,
|
||||
|
||||
// Interaction > Stagnation
|
||||
StagnationDragging,
|
||||
StagnationRepetition,
|
||||
|
||||
// Interaction > Disengagement
|
||||
DisengagementEscalation,
|
||||
DisengagementQuit,
|
||||
DisengagementNegativeStance,
|
||||
|
||||
// Interaction > Satisfaction
|
||||
SatisfactionGratitude,
|
||||
SatisfactionConfirmation,
|
||||
SatisfactionSuccess,
|
||||
|
||||
// Execution > Failure
|
||||
ExecutionFailureInvalidArgs,
|
||||
ExecutionFailureBadQuery,
|
||||
ExecutionFailureToolNotFound,
|
||||
ExecutionFailureAuthMisuse,
|
||||
ExecutionFailureStateError,
|
||||
|
||||
// Execution > Loops
|
||||
ExecutionLoopsRetry,
|
||||
ExecutionLoopsParameterDrift,
|
||||
ExecutionLoopsOscillation,
|
||||
|
||||
// Environment > Exhaustion
|
||||
EnvironmentExhaustionApiError,
|
||||
EnvironmentExhaustionTimeout,
|
||||
EnvironmentExhaustionRateLimit,
|
||||
EnvironmentExhaustionNetwork,
|
||||
EnvironmentExhaustionMalformed,
|
||||
EnvironmentExhaustionContextOverflow,
|
||||
}
|
||||
|
||||
impl SignalType {
|
||||
/// Dotted hierarchical string identifier, e.g.
|
||||
/// `"interaction.misalignment.correction"`. Matches the Python reference's
|
||||
/// `SignalType` enum *value* strings byte-for-byte.
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
SignalType::MisalignmentCorrection => "interaction.misalignment.correction",
|
||||
SignalType::MisalignmentRephrase => "interaction.misalignment.rephrase",
|
||||
SignalType::MisalignmentClarification => "interaction.misalignment.clarification",
|
||||
SignalType::StagnationDragging => "interaction.stagnation.dragging",
|
||||
SignalType::StagnationRepetition => "interaction.stagnation.repetition",
|
||||
SignalType::DisengagementEscalation => "interaction.disengagement.escalation",
|
||||
SignalType::DisengagementQuit => "interaction.disengagement.quit",
|
||||
SignalType::DisengagementNegativeStance => "interaction.disengagement.negative_stance",
|
||||
SignalType::SatisfactionGratitude => "interaction.satisfaction.gratitude",
|
||||
SignalType::SatisfactionConfirmation => "interaction.satisfaction.confirmation",
|
||||
SignalType::SatisfactionSuccess => "interaction.satisfaction.success",
|
||||
SignalType::ExecutionFailureInvalidArgs => "execution.failure.invalid_args",
|
||||
SignalType::ExecutionFailureBadQuery => "execution.failure.bad_query",
|
||||
SignalType::ExecutionFailureToolNotFound => "execution.failure.tool_not_found",
|
||||
SignalType::ExecutionFailureAuthMisuse => "execution.failure.auth_misuse",
|
||||
SignalType::ExecutionFailureStateError => "execution.failure.state_error",
|
||||
SignalType::ExecutionLoopsRetry => "execution.loops.retry",
|
||||
SignalType::ExecutionLoopsParameterDrift => "execution.loops.parameter_drift",
|
||||
SignalType::ExecutionLoopsOscillation => "execution.loops.oscillation",
|
||||
SignalType::EnvironmentExhaustionApiError => "environment.exhaustion.api_error",
|
||||
SignalType::EnvironmentExhaustionTimeout => "environment.exhaustion.timeout",
|
||||
SignalType::EnvironmentExhaustionRateLimit => "environment.exhaustion.rate_limit",
|
||||
SignalType::EnvironmentExhaustionNetwork => "environment.exhaustion.network",
|
||||
SignalType::EnvironmentExhaustionMalformed => {
|
||||
"environment.exhaustion.malformed_response"
|
||||
}
|
||||
SignalType::EnvironmentExhaustionContextOverflow => {
|
||||
"environment.exhaustion.context_overflow"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn layer(&self) -> SignalLayer {
|
||||
match self {
|
||||
SignalType::MisalignmentCorrection
|
||||
| SignalType::MisalignmentRephrase
|
||||
| SignalType::MisalignmentClarification
|
||||
| SignalType::StagnationDragging
|
||||
| SignalType::StagnationRepetition
|
||||
| SignalType::DisengagementEscalation
|
||||
| SignalType::DisengagementQuit
|
||||
| SignalType::DisengagementNegativeStance
|
||||
| SignalType::SatisfactionGratitude
|
||||
| SignalType::SatisfactionConfirmation
|
||||
| SignalType::SatisfactionSuccess => SignalLayer::Interaction,
|
||||
SignalType::ExecutionFailureInvalidArgs
|
||||
| SignalType::ExecutionFailureBadQuery
|
||||
| SignalType::ExecutionFailureToolNotFound
|
||||
| SignalType::ExecutionFailureAuthMisuse
|
||||
| SignalType::ExecutionFailureStateError
|
||||
| SignalType::ExecutionLoopsRetry
|
||||
| SignalType::ExecutionLoopsParameterDrift
|
||||
| SignalType::ExecutionLoopsOscillation => SignalLayer::Execution,
|
||||
SignalType::EnvironmentExhaustionApiError
|
||||
| SignalType::EnvironmentExhaustionTimeout
|
||||
| SignalType::EnvironmentExhaustionRateLimit
|
||||
| SignalType::EnvironmentExhaustionNetwork
|
||||
| SignalType::EnvironmentExhaustionMalformed
|
||||
| SignalType::EnvironmentExhaustionContextOverflow => SignalLayer::Environment,
|
||||
}
|
||||
}
|
||||
|
||||
/// Category name within the layer (e.g. `"misalignment"`, `"failure"`).
|
||||
pub fn category(&self) -> &'static str {
|
||||
// Strip the layer prefix and take everything before the next dot.
|
||||
let s = self.as_str();
|
||||
let after_layer = s.split_once('.').map(|(_, rest)| rest).unwrap_or(s);
|
||||
after_layer
|
||||
.split_once('.')
|
||||
.map(|(c, _)| c)
|
||||
.unwrap_or(after_layer)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
pub enum SignalLayer {
|
||||
Interaction,
|
||||
Execution,
|
||||
Environment,
|
||||
}
|
||||
|
||||
impl SignalLayer {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
SignalLayer::Interaction => "interaction",
|
||||
SignalLayer::Execution => "execution",
|
||||
SignalLayer::Environment => "environment",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Overall quality assessment for an agent interaction session.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum InteractionQuality {
|
||||
Excellent,
|
||||
Good,
|
||||
Neutral,
|
||||
Poor,
|
||||
Severe,
|
||||
}
|
||||
|
||||
impl InteractionQuality {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
InteractionQuality::Excellent => "excellent",
|
||||
InteractionQuality::Good => "good",
|
||||
InteractionQuality::Neutral => "neutral",
|
||||
InteractionQuality::Poor => "poor",
|
||||
InteractionQuality::Severe => "severe",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A single detected signal instance.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SignalInstance {
|
||||
pub signal_type: SignalType,
|
||||
/// Absolute index into the original conversation `Vec<Message>`.
|
||||
pub message_index: usize,
|
||||
pub snippet: String,
|
||||
pub confidence: f32,
|
||||
/// Free-form metadata payload mirroring the Python `Dict[str, Any]`.
|
||||
/// Stored as a JSON object so we can faithfully reproduce the reference's
|
||||
/// flexible per-detector metadata.
|
||||
#[serde(default)]
|
||||
pub metadata: serde_json::Value,
|
||||
}
|
||||
|
||||
impl SignalInstance {
|
||||
pub fn new(signal_type: SignalType, message_index: usize, snippet: impl Into<String>) -> Self {
|
||||
Self {
|
||||
signal_type,
|
||||
message_index,
|
||||
snippet: snippet.into(),
|
||||
confidence: 1.0,
|
||||
metadata: serde_json::Value::Object(serde_json::Map::new()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_confidence(mut self, c: f32) -> Self {
|
||||
self.confidence = c;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn with_metadata(mut self, m: serde_json::Value) -> Self {
|
||||
self.metadata = m;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Aggregated signals for a specific category.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SignalGroup {
|
||||
pub category: String,
|
||||
pub count: usize,
|
||||
pub signals: Vec<SignalInstance>,
|
||||
/// Severity level (0-3: none, mild, moderate, severe).
|
||||
pub severity: u8,
|
||||
}
|
||||
|
||||
impl SignalGroup {
|
||||
pub fn new(category: impl Into<String>) -> Self {
|
||||
Self {
|
||||
category: category.into(),
|
||||
count: 0,
|
||||
signals: Vec::new(),
|
||||
severity: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_signal(&mut self, signal: SignalInstance) {
|
||||
self.signals.push(signal);
|
||||
self.count = self.signals.len();
|
||||
self.update_severity();
|
||||
}
|
||||
|
||||
fn update_severity(&mut self) {
|
||||
self.severity = match self.count {
|
||||
0 => 0,
|
||||
1..=2 => 1,
|
||||
3..=4 => 2,
|
||||
_ => 3,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Turn count and efficiency metrics, used by stagnation.dragging.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct TurnMetrics {
|
||||
pub total_turns: usize,
|
||||
pub user_turns: usize,
|
||||
pub assistant_turns: usize,
|
||||
pub is_dragging: bool,
|
||||
pub efficiency_score: f32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct InteractionSignals {
|
||||
pub misalignment: SignalGroup,
|
||||
pub stagnation: SignalGroup,
|
||||
pub disengagement: SignalGroup,
|
||||
pub satisfaction: SignalGroup,
|
||||
}
|
||||
|
||||
impl Default for InteractionSignals {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
misalignment: SignalGroup::new("misalignment"),
|
||||
stagnation: SignalGroup::new("stagnation"),
|
||||
disengagement: SignalGroup::new("disengagement"),
|
||||
satisfaction: SignalGroup::new("satisfaction"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InteractionSignals {
|
||||
/// Ratio of misalignment instances to user turns. Used as a quality
|
||||
/// scoring input and as a threshold for the "high misalignment rate"
|
||||
/// summary callout. Mirrors `misalignment.count / max(user_turns, 1)`
|
||||
/// from the Python reference's `_assess_quality` and `_generate_summary`.
|
||||
pub fn misalignment_ratio(&self, user_turns: usize) -> f32 {
|
||||
let denom = user_turns.max(1) as f32;
|
||||
self.misalignment.count as f32 / denom
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ExecutionSignals {
|
||||
pub failure: SignalGroup,
|
||||
pub loops: SignalGroup,
|
||||
}
|
||||
|
||||
impl Default for ExecutionSignals {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
failure: SignalGroup::new("failure"),
|
||||
loops: SignalGroup::new("loops"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EnvironmentSignals {
|
||||
pub exhaustion: SignalGroup,
|
||||
}
|
||||
|
||||
impl Default for EnvironmentSignals {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
exhaustion: SignalGroup::new("exhaustion"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Complete signal analysis report for a conversation.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SignalReport {
|
||||
pub interaction: InteractionSignals,
|
||||
pub execution: ExecutionSignals,
|
||||
pub environment: EnvironmentSignals,
|
||||
pub overall_quality: InteractionQuality,
|
||||
pub quality_score: f32,
|
||||
pub turn_metrics: TurnMetrics,
|
||||
pub summary: String,
|
||||
}
|
||||
|
||||
impl Default for SignalReport {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interaction: InteractionSignals::default(),
|
||||
execution: ExecutionSignals::default(),
|
||||
environment: EnvironmentSignals::default(),
|
||||
overall_quality: InteractionQuality::Neutral,
|
||||
quality_score: 50.0,
|
||||
turn_metrics: TurnMetrics::default(),
|
||||
summary: String::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SignalReport {
|
||||
/// Iterate over every `SignalInstance` across all layers and groups.
|
||||
pub fn iter_signals(&self) -> impl Iterator<Item = &SignalInstance> {
|
||||
self.interaction
|
||||
.misalignment
|
||||
.signals
|
||||
.iter()
|
||||
.chain(self.interaction.stagnation.signals.iter())
|
||||
.chain(self.interaction.disengagement.signals.iter())
|
||||
.chain(self.interaction.satisfaction.signals.iter())
|
||||
.chain(self.execution.failure.signals.iter())
|
||||
.chain(self.execution.loops.signals.iter())
|
||||
.chain(self.environment.exhaustion.signals.iter())
|
||||
}
|
||||
|
||||
pub fn has_signal_type(&self, t: SignalType) -> bool {
|
||||
self.iter_signals().any(|s| s.signal_type == t)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn signal_type_strings_match_paper_taxonomy() {
|
||||
assert_eq!(
|
||||
SignalType::MisalignmentCorrection.as_str(),
|
||||
"interaction.misalignment.correction"
|
||||
);
|
||||
assert_eq!(
|
||||
SignalType::ExecutionFailureInvalidArgs.as_str(),
|
||||
"execution.failure.invalid_args"
|
||||
);
|
||||
assert_eq!(
|
||||
SignalType::EnvironmentExhaustionMalformed.as_str(),
|
||||
"environment.exhaustion.malformed_response"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn signal_type_layer_and_category() {
|
||||
assert_eq!(
|
||||
SignalType::MisalignmentRephrase.layer(),
|
||||
SignalLayer::Interaction
|
||||
);
|
||||
assert_eq!(SignalType::MisalignmentRephrase.category(), "misalignment");
|
||||
assert_eq!(
|
||||
SignalType::ExecutionLoopsRetry.layer(),
|
||||
SignalLayer::Execution
|
||||
);
|
||||
assert_eq!(SignalType::ExecutionLoopsRetry.category(), "loops");
|
||||
assert_eq!(
|
||||
SignalType::EnvironmentExhaustionTimeout.layer(),
|
||||
SignalLayer::Environment
|
||||
);
|
||||
assert_eq!(
|
||||
SignalType::EnvironmentExhaustionTimeout.category(),
|
||||
"exhaustion"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn signal_group_severity_buckets_match_python() {
|
||||
let mut g = SignalGroup::new("misalignment");
|
||||
assert_eq!(g.severity, 0);
|
||||
for n in 1..=2 {
|
||||
g.add_signal(SignalInstance::new(
|
||||
SignalType::MisalignmentCorrection,
|
||||
n,
|
||||
"x",
|
||||
));
|
||||
}
|
||||
assert_eq!(g.severity, 1);
|
||||
for n in 3..=4 {
|
||||
g.add_signal(SignalInstance::new(
|
||||
SignalType::MisalignmentCorrection,
|
||||
n,
|
||||
"x",
|
||||
));
|
||||
}
|
||||
assert_eq!(g.severity, 2);
|
||||
for n in 5..=6 {
|
||||
g.add_signal(SignalInstance::new(
|
||||
SignalType::MisalignmentCorrection,
|
||||
n,
|
||||
"x",
|
||||
));
|
||||
}
|
||||
assert_eq!(g.severity, 3);
|
||||
}
|
||||
}
|
||||
401
crates/brightstaff/src/signals/text_processing.rs
Normal file
401
crates/brightstaff/src/signals/text_processing.rs
Normal file
|
|
@ -0,0 +1,401 @@
|
|||
//! Text normalization and similarity primitives.
|
||||
//!
|
||||
//! Direct Rust port of `signals/text_processing.py` from the reference. The
|
||||
//! shapes (`NormalizedMessage`, `NormalizedPattern`) and similarity formulas
|
||||
//! match the Python implementation exactly so that pattern matching produces
|
||||
//! the same results on the same inputs.
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
/// Size of character n-grams used for fuzzy similarity (3 = trigrams).
|
||||
pub const NGRAM_SIZE: usize = 3;
|
||||
|
||||
const PUNCT_TRIM: &[char] = &[
|
||||
'!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=',
|
||||
'>', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~',
|
||||
];
|
||||
|
||||
/// Pre-processed message with normalized text and tokens for efficient matching.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct NormalizedMessage {
|
||||
pub raw: String,
|
||||
pub tokens: Vec<String>,
|
||||
pub token_set: HashSet<String>,
|
||||
pub bigram_set: HashSet<String>,
|
||||
pub char_ngram_set: HashSet<String>,
|
||||
pub token_frequency: HashMap<String, usize>,
|
||||
}
|
||||
|
||||
impl NormalizedMessage {
|
||||
/// Create a normalized message from raw text. Mirrors
|
||||
/// `NormalizedMessage.from_text` in the reference, including the
|
||||
/// head-20%/tail-80% truncation strategy when text exceeds `max_length`.
|
||||
pub fn from_text(text: &str, max_length: usize) -> Self {
|
||||
let char_count = text.chars().count();
|
||||
|
||||
let raw: String = if char_count <= max_length {
|
||||
text.to_string()
|
||||
} else {
|
||||
let head_len = max_length / 5;
|
||||
// Reserve one char for the joining space.
|
||||
let tail_len = max_length.saturating_sub(head_len + 1);
|
||||
let head: String = text.chars().take(head_len).collect();
|
||||
let tail: String = text
|
||||
.chars()
|
||||
.skip(char_count.saturating_sub(tail_len))
|
||||
.collect();
|
||||
format!("{} {}", head, tail)
|
||||
};
|
||||
|
||||
// Normalize unicode punctuation to ASCII equivalents.
|
||||
let normalized_unicode = raw
|
||||
.replace(['\u{2019}', '\u{2018}'], "'")
|
||||
.replace(['\u{201c}', '\u{201d}'], "\"")
|
||||
.replace(['\u{2013}', '\u{2014}'], "-");
|
||||
|
||||
// Lowercase + collapse whitespace (matches Python's `" ".join(s.split())`).
|
||||
let normalized: String = normalized_unicode
|
||||
.to_lowercase()
|
||||
.split_whitespace()
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ");
|
||||
|
||||
let mut tokens: Vec<String> = Vec::new();
|
||||
for word in normalized.split_whitespace() {
|
||||
let stripped: String = word.trim_matches(PUNCT_TRIM).to_string();
|
||||
if !stripped.is_empty() {
|
||||
tokens.push(stripped);
|
||||
}
|
||||
}
|
||||
|
||||
let token_set: HashSet<String> = tokens.iter().cloned().collect();
|
||||
|
||||
let mut bigram_set: HashSet<String> = HashSet::new();
|
||||
for i in 0..tokens.len().saturating_sub(1) {
|
||||
bigram_set.insert(format!("{} {}", tokens[i], tokens[i + 1]));
|
||||
}
|
||||
|
||||
let tokens_text = tokens.join(" ");
|
||||
let char_ngram_set = char_ngrams(&tokens_text, NGRAM_SIZE);
|
||||
|
||||
let mut token_frequency: HashMap<String, usize> = HashMap::new();
|
||||
for t in &tokens {
|
||||
*token_frequency.entry(t.clone()).or_insert(0) += 1;
|
||||
}
|
||||
|
||||
Self {
|
||||
raw,
|
||||
tokens,
|
||||
token_set,
|
||||
bigram_set,
|
||||
char_ngram_set,
|
||||
token_frequency,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn contains_token(&self, token: &str) -> bool {
|
||||
self.token_set.contains(token)
|
||||
}
|
||||
|
||||
pub fn contains_phrase(&self, phrase: &str) -> bool {
|
||||
let phrase_tokens: Vec<&str> = phrase.split_whitespace().collect();
|
||||
if phrase_tokens.is_empty() {
|
||||
return false;
|
||||
}
|
||||
if phrase_tokens.len() == 1 {
|
||||
return self.contains_token(phrase_tokens[0]);
|
||||
}
|
||||
if phrase_tokens.len() > self.tokens.len() {
|
||||
return false;
|
||||
}
|
||||
let n = phrase_tokens.len();
|
||||
for i in 0..=self.tokens.len() - n {
|
||||
if self.tokens[i..i + n]
|
||||
.iter()
|
||||
.zip(phrase_tokens.iter())
|
||||
.all(|(a, b)| a == b)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Character n-gram (Jaccard) similarity vs another normalized message.
|
||||
pub fn ngram_similarity_with_message(&self, other: &NormalizedMessage) -> f32 {
|
||||
jaccard(&self.char_ngram_set, &other.char_ngram_set)
|
||||
}
|
||||
|
||||
/// Character n-gram (Jaccard) similarity vs a raw pattern string.
|
||||
pub fn ngram_similarity_with_pattern(&self, pattern: &str) -> f32 {
|
||||
let normalized = strip_non_word_chars(&pattern.to_lowercase());
|
||||
let pattern_ngrams = char_ngrams(&normalized, NGRAM_SIZE);
|
||||
jaccard(&self.char_ngram_set, &pattern_ngrams)
|
||||
}
|
||||
|
||||
/// Fraction of pattern's ngrams contained in this message's ngram set.
|
||||
pub fn char_ngram_containment(&self, pattern: &str) -> f32 {
|
||||
let normalized = strip_non_word_chars(&pattern.to_lowercase());
|
||||
let pattern_ngrams = char_ngrams(&normalized, NGRAM_SIZE);
|
||||
if pattern_ngrams.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
let contained = pattern_ngrams
|
||||
.iter()
|
||||
.filter(|ng| self.char_ngram_set.contains(*ng))
|
||||
.count();
|
||||
contained as f32 / pattern_ngrams.len() as f32
|
||||
}
|
||||
|
||||
/// Token-frequency cosine similarity vs a raw pattern string.
|
||||
pub fn token_cosine_similarity(&self, pattern: &str) -> f32 {
|
||||
let mut pattern_freq: HashMap<String, usize> = HashMap::new();
|
||||
for word in pattern.to_lowercase().split_whitespace() {
|
||||
let stripped = word.trim_matches(PUNCT_TRIM);
|
||||
if !stripped.is_empty() {
|
||||
*pattern_freq.entry(stripped.to_string()).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
cosine_freq(&self.token_frequency, &pattern_freq)
|
||||
}
|
||||
|
||||
/// Layered match against a pre-normalized pattern. Mirrors
|
||||
/// `matches_normalized_pattern` from the reference: exact phrase ->
|
||||
/// char-ngram Jaccard -> token cosine.
|
||||
pub fn matches_normalized_pattern(
|
||||
&self,
|
||||
pattern: &NormalizedPattern,
|
||||
char_ngram_threshold: f32,
|
||||
token_cosine_threshold: f32,
|
||||
) -> bool {
|
||||
// Layer 0: exact phrase match using pre-tokenized message.
|
||||
let plen = pattern.tokens.len();
|
||||
let slen = self.tokens.len();
|
||||
if plen > 0 && plen <= slen {
|
||||
for i in 0..=slen - plen {
|
||||
if self.tokens[i..i + plen] == pattern.tokens[..] {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Layer 1: character n-gram Jaccard similarity.
|
||||
if !self.char_ngram_set.is_empty() && !pattern.char_ngram_set.is_empty() {
|
||||
let inter = self
|
||||
.char_ngram_set
|
||||
.intersection(&pattern.char_ngram_set)
|
||||
.count();
|
||||
let union = self.char_ngram_set.union(&pattern.char_ngram_set).count();
|
||||
if union > 0 {
|
||||
let sim = inter as f32 / union as f32;
|
||||
if sim >= char_ngram_threshold {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Layer 2: token frequency cosine similarity.
|
||||
if !self.token_frequency.is_empty() && !pattern.token_frequency.is_empty() {
|
||||
let sim = cosine_freq(&self.token_frequency, &pattern.token_frequency);
|
||||
if sim >= token_cosine_threshold {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Pre-processed pattern with normalized text and pre-computed n-grams/tokens.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct NormalizedPattern {
|
||||
pub raw: String,
|
||||
pub tokens: Vec<String>,
|
||||
pub char_ngram_set: HashSet<String>,
|
||||
pub token_frequency: HashMap<String, usize>,
|
||||
}
|
||||
|
||||
impl NormalizedPattern {
|
||||
pub fn from_text(pattern: &str) -> Self {
|
||||
let normalized = pattern
|
||||
.to_lowercase()
|
||||
.replace(['\u{2019}', '\u{2018}'], "'")
|
||||
.replace(['\u{201c}', '\u{201d}'], "\"")
|
||||
.replace(['\u{2013}', '\u{2014}'], "-");
|
||||
let normalized: String = normalized.split_whitespace().collect::<Vec<_>>().join(" ");
|
||||
|
||||
// Tokenize the same way as NormalizedMessage (trim boundary punctuation,
|
||||
// keep internal punctuation).
|
||||
let mut tokens: Vec<String> = Vec::new();
|
||||
for word in normalized.split_whitespace() {
|
||||
let stripped = word.trim_matches(PUNCT_TRIM);
|
||||
if !stripped.is_empty() {
|
||||
tokens.push(stripped.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// For ngrams + cosine, strip ALL punctuation (matches Python's
|
||||
// `re.sub(r"[^\w\s]", "", normalized)`).
|
||||
let normalized_for_ngrams = strip_non_word_chars(&normalized);
|
||||
let char_ngram_set = char_ngrams(&normalized_for_ngrams, NGRAM_SIZE);
|
||||
|
||||
let tokens_no_punct: Vec<&str> = normalized_for_ngrams.split_whitespace().collect();
|
||||
let mut token_frequency: HashMap<String, usize> = HashMap::new();
|
||||
for t in &tokens_no_punct {
|
||||
*token_frequency.entry((*t).to_string()).or_insert(0) += 1;
|
||||
}
|
||||
|
||||
Self {
|
||||
raw: pattern.to_string(),
|
||||
tokens,
|
||||
char_ngram_set,
|
||||
token_frequency,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience: normalize a list of raw pattern strings into `NormalizedPattern`s.
|
||||
pub fn normalize_patterns(patterns: &[&str]) -> Vec<NormalizedPattern> {
|
||||
patterns
|
||||
.iter()
|
||||
.map(|p| NormalizedPattern::from_text(p))
|
||||
.collect()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Similarity primitives
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
fn char_ngrams(s: &str, n: usize) -> HashSet<String> {
|
||||
// Python iterates by character index, not byte; mirror that with .chars().
|
||||
let chars: Vec<char> = s.chars().collect();
|
||||
let mut out: HashSet<String> = HashSet::new();
|
||||
if chars.len() < n {
|
||||
return out;
|
||||
}
|
||||
for i in 0..=chars.len() - n {
|
||||
out.insert(chars[i..i + n].iter().collect());
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
fn jaccard(a: &HashSet<String>, b: &HashSet<String>) -> f32 {
|
||||
if a.is_empty() && b.is_empty() {
|
||||
return 1.0;
|
||||
}
|
||||
if a.is_empty() || b.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
let inter = a.intersection(b).count();
|
||||
let union = a.union(b).count();
|
||||
if union == 0 {
|
||||
0.0
|
||||
} else {
|
||||
inter as f32 / union as f32
|
||||
}
|
||||
}
|
||||
|
||||
fn cosine_freq(a: &HashMap<String, usize>, b: &HashMap<String, usize>) -> f32 {
|
||||
if a.is_empty() && b.is_empty() {
|
||||
return 1.0;
|
||||
}
|
||||
if a.is_empty() || b.is_empty() {
|
||||
return 0.0;
|
||||
}
|
||||
let mut dot: f64 = 0.0;
|
||||
let mut n1_sq: f64 = 0.0;
|
||||
let mut n2_sq: f64 = 0.0;
|
||||
for (token, &freq2) in b {
|
||||
let freq1 = *a.get(token).unwrap_or(&0);
|
||||
dot += (freq1 * freq2) as f64;
|
||||
n2_sq += (freq2 * freq2) as f64;
|
||||
}
|
||||
for &freq1 in a.values() {
|
||||
n1_sq += (freq1 * freq1) as f64;
|
||||
}
|
||||
let n1 = n1_sq.sqrt();
|
||||
let n2 = n2_sq.sqrt();
|
||||
if n1 == 0.0 || n2 == 0.0 {
|
||||
0.0
|
||||
} else {
|
||||
(dot / (n1 * n2)) as f32
|
||||
}
|
||||
}
|
||||
|
||||
/// Python equivalent: `re.sub(r"[^\w\s]", "", text)` followed by whitespace
|
||||
/// collapse. Python's `\w` is `[A-Za-z0-9_]` plus unicode word characters; we
|
||||
/// use Rust's `char::is_alphanumeric()` plus `_` for an equivalent definition.
|
||||
fn strip_non_word_chars(text: &str) -> String {
|
||||
let mut out = String::with_capacity(text.len());
|
||||
for c in text.chars() {
|
||||
if c.is_alphanumeric() || c == '_' || c.is_whitespace() {
|
||||
out.push(c);
|
||||
}
|
||||
}
|
||||
out.split_whitespace().collect::<Vec<_>>().join(" ")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn normalize_lowercases_and_strips_punctuation() {
|
||||
let m = NormalizedMessage::from_text("Hello, World!", 2000);
|
||||
assert_eq!(m.tokens, vec!["hello".to_string(), "world".to_string()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalizes_smart_quotes() {
|
||||
let m = NormalizedMessage::from_text("don\u{2019}t", 2000);
|
||||
assert!(m.tokens.contains(&"don't".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncates_long_text_with_head_tail() {
|
||||
let long = "a".repeat(3000);
|
||||
let m = NormalizedMessage::from_text(&long, 2000);
|
||||
// raw should be ~ 2000 chars (head + space + tail)
|
||||
assert!(m.raw.chars().count() <= 2001);
|
||||
assert!(m.raw.starts_with("aa"));
|
||||
assert!(m.raw.ends_with("aa"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn contains_phrase_matches_consecutive_tokens() {
|
||||
let m = NormalizedMessage::from_text("I think this is great work", 2000);
|
||||
assert!(m.contains_phrase("this is great"));
|
||||
assert!(!m.contains_phrase("great this"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn matches_pattern_via_exact_phrase() {
|
||||
let m = NormalizedMessage::from_text("No, I meant the second one", 2000);
|
||||
let p = NormalizedPattern::from_text("no i meant");
|
||||
assert!(m.matches_normalized_pattern(&p, 0.65, 0.6));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn matches_pattern_via_char_ngram_fuzziness() {
|
||||
// Typo in "meant" -> "ment" so layer 0 (exact phrase) cannot match,
|
||||
// forcing the matcher to fall back to layer 1 (char n-gram Jaccard).
|
||||
let m = NormalizedMessage::from_text("No I ment", 2000);
|
||||
let p = NormalizedPattern::from_text("no i meant");
|
||||
assert!(m.matches_normalized_pattern(&p, 0.4, 0.6));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn jaccard_identical_sets_is_one() {
|
||||
let a: HashSet<String> = ["abc", "bcd"].iter().map(|s| s.to_string()).collect();
|
||||
assert!((jaccard(&a, &a) - 1.0).abs() < 1e-6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cosine_freq_orthogonal_is_zero() {
|
||||
let mut a: HashMap<String, usize> = HashMap::new();
|
||||
a.insert("hello".to_string(), 1);
|
||||
let mut b: HashMap<String, usize> = HashMap::new();
|
||||
b.insert("world".to_string(), 1);
|
||||
assert_eq!(cosine_freq(&a, &b), 0.0);
|
||||
}
|
||||
}
|
||||
|
|
@ -16,10 +16,134 @@ use tracing_opentelemetry::OpenTelemetrySpanExt;
|
|||
use crate::handlers::agents::pipeline::{PipelineError, PipelineProcessor};
|
||||
|
||||
const STREAM_BUFFER_SIZE: usize = 16;
|
||||
use crate::signals::{InteractionQuality, SignalAnalyzer, TextBasedSignalAnalyzer, FLAG_MARKER};
|
||||
use crate::tracing::{llm, set_service_name, signals as signal_constants};
|
||||
/// Cap on accumulated response bytes kept for usage extraction.
|
||||
/// Most chat responses are well under this; pathological ones are dropped without
|
||||
/// affecting pass-through streaming to the client.
|
||||
const USAGE_BUFFER_MAX: usize = 2 * 1024 * 1024;
|
||||
use crate::metrics as bs_metrics;
|
||||
use crate::metrics::labels as metric_labels;
|
||||
use crate::signals::otel::emit_signals_to_span;
|
||||
use crate::signals::{SignalAnalyzer, FLAG_MARKER};
|
||||
use crate::tracing::{llm, set_service_name};
|
||||
use hermesllm::apis::openai::Message;
|
||||
|
||||
/// Parsed usage + resolved-model details from a provider response.
|
||||
#[derive(Debug, Default, Clone)]
|
||||
struct ExtractedUsage {
|
||||
prompt_tokens: Option<i64>,
|
||||
completion_tokens: Option<i64>,
|
||||
total_tokens: Option<i64>,
|
||||
cached_input_tokens: Option<i64>,
|
||||
cache_creation_tokens: Option<i64>,
|
||||
reasoning_tokens: Option<i64>,
|
||||
/// The model the upstream actually used. For router aliases (e.g.
|
||||
/// `router:software-engineering`), this differs from the request model.
|
||||
resolved_model: Option<String>,
|
||||
}
|
||||
|
||||
impl ExtractedUsage {
|
||||
fn is_empty(&self) -> bool {
|
||||
self.prompt_tokens.is_none()
|
||||
&& self.completion_tokens.is_none()
|
||||
&& self.total_tokens.is_none()
|
||||
&& self.resolved_model.is_none()
|
||||
}
|
||||
|
||||
fn from_json(value: &serde_json::Value) -> Self {
|
||||
let mut out = Self::default();
|
||||
if let Some(model) = value.get("model").and_then(|v| v.as_str()) {
|
||||
if !model.is_empty() {
|
||||
out.resolved_model = Some(model.to_string());
|
||||
}
|
||||
}
|
||||
if let Some(u) = value.get("usage") {
|
||||
// OpenAI-shape usage
|
||||
out.prompt_tokens = u.get("prompt_tokens").and_then(|v| v.as_i64());
|
||||
out.completion_tokens = u.get("completion_tokens").and_then(|v| v.as_i64());
|
||||
out.total_tokens = u.get("total_tokens").and_then(|v| v.as_i64());
|
||||
out.cached_input_tokens = u
|
||||
.get("prompt_tokens_details")
|
||||
.and_then(|d| d.get("cached_tokens"))
|
||||
.and_then(|v| v.as_i64());
|
||||
out.reasoning_tokens = u
|
||||
.get("completion_tokens_details")
|
||||
.and_then(|d| d.get("reasoning_tokens"))
|
||||
.and_then(|v| v.as_i64());
|
||||
|
||||
// Anthropic-shape fallbacks
|
||||
if out.prompt_tokens.is_none() {
|
||||
out.prompt_tokens = u.get("input_tokens").and_then(|v| v.as_i64());
|
||||
}
|
||||
if out.completion_tokens.is_none() {
|
||||
out.completion_tokens = u.get("output_tokens").and_then(|v| v.as_i64());
|
||||
}
|
||||
if out.total_tokens.is_none() {
|
||||
if let (Some(p), Some(c)) = (out.prompt_tokens, out.completion_tokens) {
|
||||
out.total_tokens = Some(p + c);
|
||||
}
|
||||
}
|
||||
if out.cached_input_tokens.is_none() {
|
||||
out.cached_input_tokens = u.get("cache_read_input_tokens").and_then(|v| v.as_i64());
|
||||
}
|
||||
if out.cached_input_tokens.is_none() {
|
||||
out.cached_input_tokens =
|
||||
u.get("cached_content_token_count").and_then(|v| v.as_i64());
|
||||
}
|
||||
out.cache_creation_tokens = u
|
||||
.get("cache_creation_input_tokens")
|
||||
.and_then(|v| v.as_i64());
|
||||
if out.reasoning_tokens.is_none() {
|
||||
out.reasoning_tokens = u.get("thoughts_token_count").and_then(|v| v.as_i64());
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
/// Try to pull usage out of an accumulated response body.
|
||||
/// Handles both a single JSON object (non-streaming) and SSE streams where the
|
||||
/// final `data: {...}` event carries the `usage` field.
|
||||
fn extract_usage_from_bytes(buf: &[u8]) -> ExtractedUsage {
|
||||
if buf.is_empty() {
|
||||
return ExtractedUsage::default();
|
||||
}
|
||||
|
||||
// Fast path: full-body JSON (non-streaming).
|
||||
if let Ok(value) = serde_json::from_slice::<serde_json::Value>(buf) {
|
||||
let u = ExtractedUsage::from_json(&value);
|
||||
if !u.is_empty() {
|
||||
return u;
|
||||
}
|
||||
}
|
||||
|
||||
// SSE path: scan from the end for a `data:` line containing a usage object.
|
||||
let text = match std::str::from_utf8(buf) {
|
||||
Ok(t) => t,
|
||||
Err(_) => return ExtractedUsage::default(),
|
||||
};
|
||||
for line in text.lines().rev() {
|
||||
let trimmed = line.trim_start();
|
||||
let payload = match trimmed.strip_prefix("data:") {
|
||||
Some(p) => p.trim_start(),
|
||||
None => continue,
|
||||
};
|
||||
if payload == "[DONE]" || payload.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if !payload.contains("\"usage\"") {
|
||||
continue;
|
||||
}
|
||||
if let Ok(value) = serde_json::from_str::<serde_json::Value>(payload) {
|
||||
let u = ExtractedUsage::from_json(&value);
|
||||
if !u.is_empty() {
|
||||
return u;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ExtractedUsage::default()
|
||||
}
|
||||
|
||||
/// Trait for processing streaming chunks
|
||||
/// Implementors can inject custom logic during streaming (e.g., hallucination detection, logging)
|
||||
pub trait StreamProcessor: Send + 'static {
|
||||
|
|
@ -51,6 +175,18 @@ impl StreamProcessor for Box<dyn StreamProcessor> {
|
|||
}
|
||||
}
|
||||
|
||||
/// Optional Prometheus-metric context for an LLM upstream call. When present,
|
||||
/// [`ObservableStreamProcessor`] emits `brightstaff_llm_*` metrics at
|
||||
/// first-byte / complete / error callbacks.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LlmMetricsCtx {
|
||||
pub provider: String,
|
||||
pub model: String,
|
||||
/// HTTP status of the upstream response. Used to pick `status_class` and
|
||||
/// `error_class` on `on_complete`.
|
||||
pub upstream_status: u16,
|
||||
}
|
||||
|
||||
/// A processor that tracks streaming metrics
|
||||
pub struct ObservableStreamProcessor {
|
||||
service_name: String,
|
||||
|
|
@ -60,6 +196,12 @@ pub struct ObservableStreamProcessor {
|
|||
start_time: Instant,
|
||||
time_to_first_token: Option<u128>,
|
||||
messages: Option<Vec<Message>>,
|
||||
/// Accumulated response bytes used only for best-effort usage extraction
|
||||
/// on `on_complete`. Capped at `USAGE_BUFFER_MAX`; excess chunks are dropped
|
||||
/// from the buffer (they still pass through to the client).
|
||||
response_buffer: Vec<u8>,
|
||||
llm_metrics: Option<LlmMetricsCtx>,
|
||||
metrics_recorded: bool,
|
||||
}
|
||||
|
||||
impl ObservableStreamProcessor {
|
||||
|
|
@ -93,21 +235,42 @@ impl ObservableStreamProcessor {
|
|||
start_time,
|
||||
time_to_first_token: None,
|
||||
messages,
|
||||
response_buffer: Vec::new(),
|
||||
llm_metrics: None,
|
||||
metrics_recorded: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Attach LLM upstream metric context so the processor emits
|
||||
/// `brightstaff_llm_*` metrics on first-byte / complete / error.
|
||||
pub fn with_llm_metrics(mut self, ctx: LlmMetricsCtx) -> Self {
|
||||
self.llm_metrics = Some(ctx);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamProcessor for ObservableStreamProcessor {
|
||||
fn process_chunk(&mut self, chunk: Bytes) -> Result<Option<Bytes>, String> {
|
||||
self.total_bytes += chunk.len();
|
||||
self.chunk_count += 1;
|
||||
// Accumulate for best-effort usage extraction; drop further chunks once
|
||||
// the cap is reached so we don't retain huge response bodies in memory.
|
||||
if self.response_buffer.len() < USAGE_BUFFER_MAX {
|
||||
let remaining = USAGE_BUFFER_MAX - self.response_buffer.len();
|
||||
let take = chunk.len().min(remaining);
|
||||
self.response_buffer.extend_from_slice(&chunk[..take]);
|
||||
}
|
||||
Ok(Some(chunk))
|
||||
}
|
||||
|
||||
fn on_first_bytes(&mut self) {
|
||||
// Record time to first token (only for streaming)
|
||||
if self.time_to_first_token.is_none() {
|
||||
self.time_to_first_token = Some(self.start_time.elapsed().as_millis());
|
||||
let elapsed = self.start_time.elapsed();
|
||||
self.time_to_first_token = Some(elapsed.as_millis());
|
||||
if let Some(ref ctx) = self.llm_metrics {
|
||||
bs_metrics::record_llm_ttft(&ctx.provider, &ctx.model, elapsed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -124,77 +287,98 @@ impl StreamProcessor for ObservableStreamProcessor {
|
|||
);
|
||||
}
|
||||
|
||||
// Analyze signals if messages are available and record as span attributes
|
||||
if let Some(ref messages) = self.messages {
|
||||
let analyzer: Box<dyn SignalAnalyzer> = Box::new(TextBasedSignalAnalyzer::new());
|
||||
let report = analyzer.analyze(messages);
|
||||
// Record total duration on the span for the observability console.
|
||||
let duration_ms = self.start_time.elapsed().as_millis() as i64;
|
||||
{
|
||||
let span = tracing::Span::current();
|
||||
let otel_context = span.context();
|
||||
let otel_span = otel_context.span();
|
||||
otel_span.set_attribute(KeyValue::new(llm::DURATION_MS, duration_ms));
|
||||
otel_span.set_attribute(KeyValue::new(llm::RESPONSE_BYTES, self.total_bytes as i64));
|
||||
}
|
||||
|
||||
// Best-effort usage extraction + emission (works for both streaming
|
||||
// SSE and non-streaming JSON responses that include a `usage` object).
|
||||
let usage = extract_usage_from_bytes(&self.response_buffer);
|
||||
if !usage.is_empty() {
|
||||
let span = tracing::Span::current();
|
||||
let otel_context = span.context();
|
||||
let otel_span = otel_context.span();
|
||||
if let Some(v) = usage.prompt_tokens {
|
||||
otel_span.set_attribute(KeyValue::new(llm::PROMPT_TOKENS, v));
|
||||
}
|
||||
if let Some(v) = usage.completion_tokens {
|
||||
otel_span.set_attribute(KeyValue::new(llm::COMPLETION_TOKENS, v));
|
||||
}
|
||||
if let Some(v) = usage.total_tokens {
|
||||
otel_span.set_attribute(KeyValue::new(llm::TOTAL_TOKENS, v));
|
||||
}
|
||||
if let Some(v) = usage.cached_input_tokens {
|
||||
otel_span.set_attribute(KeyValue::new(llm::CACHED_INPUT_TOKENS, v));
|
||||
}
|
||||
if let Some(v) = usage.cache_creation_tokens {
|
||||
otel_span.set_attribute(KeyValue::new(llm::CACHE_CREATION_TOKENS, v));
|
||||
}
|
||||
if let Some(v) = usage.reasoning_tokens {
|
||||
otel_span.set_attribute(KeyValue::new(llm::REASONING_TOKENS, v));
|
||||
}
|
||||
// Override `llm.model` with the model the upstream actually ran
|
||||
// (e.g. `openai-gpt-5.4` resolved from `router:software-engineering`).
|
||||
// Cost lookup keys off the real model, not the alias.
|
||||
if let Some(resolved) = usage.resolved_model.clone() {
|
||||
otel_span.set_attribute(KeyValue::new(llm::MODEL_NAME, resolved));
|
||||
}
|
||||
}
|
||||
|
||||
// Emit LLM upstream prometheus metrics (duration + tokens) if wired.
|
||||
// The upstream responded (we have a status), so status_class alone
|
||||
// carries the non-2xx signal — error_class stays "none".
|
||||
if let Some(ref ctx) = self.llm_metrics {
|
||||
bs_metrics::record_llm_upstream(
|
||||
&ctx.provider,
|
||||
&ctx.model,
|
||||
ctx.upstream_status,
|
||||
metric_labels::LLM_ERR_NONE,
|
||||
self.start_time.elapsed(),
|
||||
);
|
||||
if let Some(v) = usage.prompt_tokens {
|
||||
bs_metrics::record_llm_tokens(
|
||||
&ctx.provider,
|
||||
&ctx.model,
|
||||
metric_labels::TOKEN_KIND_PROMPT,
|
||||
v.max(0) as u64,
|
||||
);
|
||||
}
|
||||
if let Some(v) = usage.completion_tokens {
|
||||
bs_metrics::record_llm_tokens(
|
||||
&ctx.provider,
|
||||
&ctx.model,
|
||||
metric_labels::TOKEN_KIND_COMPLETION,
|
||||
v.max(0) as u64,
|
||||
);
|
||||
}
|
||||
if usage.prompt_tokens.is_none() && usage.completion_tokens.is_none() {
|
||||
bs_metrics::record_llm_tokens_usage_missing(&ctx.provider, &ctx.model);
|
||||
}
|
||||
self.metrics_recorded = true;
|
||||
}
|
||||
// Release the buffered bytes early; nothing downstream needs them.
|
||||
self.response_buffer.clear();
|
||||
self.response_buffer.shrink_to_fit();
|
||||
|
||||
// Analyze signals if messages are available and record as span
|
||||
// attributes + per-signal events. We dual-emit legacy aggregate keys
|
||||
// and the new layered taxonomy so existing dashboards keep working
|
||||
// while new consumers can opt into the richer hierarchy.
|
||||
if let Some(ref messages) = self.messages {
|
||||
let analyzer = SignalAnalyzer::default();
|
||||
let report = analyzer.analyze_openai(messages);
|
||||
|
||||
// Get the current OTel span to set signal attributes
|
||||
let span = tracing::Span::current();
|
||||
let otel_context = span.context();
|
||||
let otel_span = otel_context.span();
|
||||
|
||||
// Add overall quality
|
||||
otel_span.set_attribute(KeyValue::new(
|
||||
signal_constants::QUALITY,
|
||||
format!("{:?}", report.overall_quality),
|
||||
));
|
||||
|
||||
// Add repair/follow-up metrics if concerning
|
||||
if report.follow_up.is_concerning || report.follow_up.repair_count > 0 {
|
||||
otel_span.set_attribute(KeyValue::new(
|
||||
signal_constants::REPAIR_COUNT,
|
||||
report.follow_up.repair_count as i64,
|
||||
));
|
||||
otel_span.set_attribute(KeyValue::new(
|
||||
signal_constants::REPAIR_RATIO,
|
||||
format!("{:.3}", report.follow_up.repair_ratio),
|
||||
));
|
||||
}
|
||||
|
||||
// Add frustration metrics
|
||||
if report.frustration.has_frustration {
|
||||
otel_span.set_attribute(KeyValue::new(
|
||||
signal_constants::FRUSTRATION_COUNT,
|
||||
report.frustration.frustration_count as i64,
|
||||
));
|
||||
otel_span.set_attribute(KeyValue::new(
|
||||
signal_constants::FRUSTRATION_SEVERITY,
|
||||
report.frustration.severity as i64,
|
||||
));
|
||||
}
|
||||
|
||||
// Add repetition metrics
|
||||
if report.repetition.has_looping {
|
||||
otel_span.set_attribute(KeyValue::new(
|
||||
signal_constants::REPETITION_COUNT,
|
||||
report.repetition.repetition_count as i64,
|
||||
));
|
||||
}
|
||||
|
||||
// Add escalation metrics
|
||||
if report.escalation.escalation_requested {
|
||||
otel_span
|
||||
.set_attribute(KeyValue::new(signal_constants::ESCALATION_REQUESTED, true));
|
||||
}
|
||||
|
||||
// Add positive feedback metrics
|
||||
if report.positive_feedback.has_positive_feedback {
|
||||
otel_span.set_attribute(KeyValue::new(
|
||||
signal_constants::POSITIVE_FEEDBACK_COUNT,
|
||||
report.positive_feedback.positive_count as i64,
|
||||
));
|
||||
}
|
||||
|
||||
// Flag the span name if any concerning signal is detected
|
||||
let should_flag = report.frustration.has_frustration
|
||||
|| report.repetition.has_looping
|
||||
|| report.escalation.escalation_requested
|
||||
|| matches!(
|
||||
report.overall_quality,
|
||||
InteractionQuality::Poor | InteractionQuality::Severe
|
||||
);
|
||||
|
||||
let should_flag = emit_signals_to_span(&otel_span, &report);
|
||||
if should_flag {
|
||||
otel_span.update_name(format!("{} {}", self.operation_name, FLAG_MARKER));
|
||||
}
|
||||
|
|
@ -217,6 +401,18 @@ impl StreamProcessor for ObservableStreamProcessor {
|
|||
duration_ms = self.start_time.elapsed().as_millis(),
|
||||
"stream error"
|
||||
);
|
||||
if let Some(ref ctx) = self.llm_metrics {
|
||||
if !self.metrics_recorded {
|
||||
bs_metrics::record_llm_upstream(
|
||||
&ctx.provider,
|
||||
&ctx.model,
|
||||
ctx.upstream_status,
|
||||
metric_labels::LLM_ERR_STREAM,
|
||||
self.start_time.elapsed(),
|
||||
);
|
||||
self.metrics_recorded = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -404,3 +600,55 @@ pub fn truncate_message(message: &str, max_length: usize) -> String {
|
|||
message.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod usage_extraction_tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn non_streaming_openai_with_cached() {
|
||||
let body = br#"{"id":"x","model":"gpt-4o","choices":[],"usage":{"prompt_tokens":12,"completion_tokens":34,"total_tokens":46,"prompt_tokens_details":{"cached_tokens":5}}}"#;
|
||||
let u = extract_usage_from_bytes(body);
|
||||
assert_eq!(u.prompt_tokens, Some(12));
|
||||
assert_eq!(u.completion_tokens, Some(34));
|
||||
assert_eq!(u.total_tokens, Some(46));
|
||||
assert_eq!(u.cached_input_tokens, Some(5));
|
||||
assert_eq!(u.reasoning_tokens, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn non_streaming_anthropic_with_cache_creation() {
|
||||
let body = br#"{"id":"x","model":"claude","usage":{"input_tokens":100,"output_tokens":50,"cache_creation_input_tokens":20,"cache_read_input_tokens":30}}"#;
|
||||
let u = extract_usage_from_bytes(body);
|
||||
assert_eq!(u.prompt_tokens, Some(100));
|
||||
assert_eq!(u.completion_tokens, Some(50));
|
||||
assert_eq!(u.total_tokens, Some(150));
|
||||
assert_eq!(u.cached_input_tokens, Some(30));
|
||||
assert_eq!(u.cache_creation_tokens, Some(20));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn streaming_openai_final_chunk_has_usage() {
|
||||
let sse = b"data: {\"choices\":[{\"delta\":{\"content\":\"hi\"}}]}
|
||||
|
||||
data: {\"choices\":[{\"delta\":{}, \"finish_reason\":\"stop\"}],\"usage\":{\"prompt_tokens\":7,\"completion_tokens\":3,\"total_tokens\":10}}
|
||||
|
||||
data: [DONE]
|
||||
|
||||
";
|
||||
let u = extract_usage_from_bytes(sse);
|
||||
assert_eq!(u.prompt_tokens, Some(7));
|
||||
assert_eq!(u.completion_tokens, Some(3));
|
||||
assert_eq!(u.total_tokens, Some(10));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_returns_default() {
|
||||
assert!(extract_usage_from_bytes(b"").is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_usage_in_body_returns_default() {
|
||||
assert!(extract_usage_from_bytes(br#"{"ok":true}"#).is_empty());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -80,6 +80,18 @@ pub mod llm {
|
|||
/// Total tokens used (prompt + completion)
|
||||
pub const TOTAL_TOKENS: &str = "llm.usage.total_tokens";
|
||||
|
||||
/// Tokens served from a prompt cache read
|
||||
/// (OpenAI `prompt_tokens_details.cached_tokens`, Anthropic `cache_read_input_tokens`,
|
||||
/// Google `cached_content_token_count`)
|
||||
pub const CACHED_INPUT_TOKENS: &str = "llm.usage.cached_input_tokens";
|
||||
|
||||
/// Tokens used to write a prompt cache entry (Anthropic `cache_creation_input_tokens`)
|
||||
pub const CACHE_CREATION_TOKENS: &str = "llm.usage.cache_creation_tokens";
|
||||
|
||||
/// Reasoning tokens for reasoning models
|
||||
/// (OpenAI `completion_tokens_details.reasoning_tokens`, Google `thoughts_token_count`)
|
||||
pub const REASONING_TOKENS: &str = "llm.usage.reasoning_tokens";
|
||||
|
||||
/// Temperature parameter used
|
||||
pub const TEMPERATURE: &str = "llm.temperature";
|
||||
|
||||
|
|
@ -119,6 +131,22 @@ pub mod routing {
|
|||
pub const SELECTION_REASON: &str = "routing.selection_reason";
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Span Attributes - Plano-specific
|
||||
// =============================================================================
|
||||
|
||||
/// Attributes specific to Plano (session affinity, routing decisions).
|
||||
pub mod plano {
|
||||
/// Session identifier propagated via the `x-model-affinity` header.
|
||||
/// Absent when the client did not send the header.
|
||||
pub const SESSION_ID: &str = "plano.session_id";
|
||||
|
||||
/// Matched route name from routing (e.g. "code", "summarization",
|
||||
/// "software-engineering"). Absent when the client routed directly
|
||||
/// to a concrete model.
|
||||
pub const ROUTE_NAME: &str = "plano.route.name";
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Span Attributes - Error Handling
|
||||
// =============================================================================
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ mod init;
|
|||
mod service_name_exporter;
|
||||
|
||||
pub use constants::{
|
||||
error, http, llm, operation_component, routing, signals, OperationNameBuilder,
|
||||
error, http, llm, operation_component, plano, routing, signals, OperationNameBuilder,
|
||||
};
|
||||
pub use custom_attributes::collect_custom_trace_attributes;
|
||||
pub use init::init_tracer;
|
||||
|
|
|
|||
|
|
@ -7,6 +7,34 @@ use crate::api::open_ai::{
|
|||
ChatCompletionTool, FunctionDefinition, FunctionParameter, FunctionParameters, ParameterType,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum SessionCacheType {
|
||||
#[default]
|
||||
Memory,
|
||||
Redis,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SessionCacheConfig {
|
||||
#[serde(rename = "type", default)]
|
||||
pub cache_type: SessionCacheType,
|
||||
/// Redis URL, e.g. `redis://localhost:6379`. Required when `type` is `redis`.
|
||||
pub url: Option<String>,
|
||||
/// Optional HTTP header name whose value is used as a tenant prefix in the cache key.
|
||||
/// When set, keys are scoped as `plano:affinity:{tenant_id}:{session_id}`.
|
||||
pub tenant_header: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Routing {
|
||||
pub llm_provider: Option<String>,
|
||||
pub model: Option<String>,
|
||||
pub session_ttl_seconds: Option<u64>,
|
||||
pub session_max_entries: Option<usize>,
|
||||
pub session_cache: Option<SessionCacheConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ModelAlias {
|
||||
pub target: String,
|
||||
|
|
@ -182,6 +210,7 @@ pub struct Configuration {
|
|||
pub model_providers: Vec<LlmProvider>,
|
||||
pub model_aliases: Option<HashMap<String, ModelAlias>>,
|
||||
pub overrides: Option<Overrides>,
|
||||
pub routing: Option<Routing>,
|
||||
pub system_prompt: Option<String>,
|
||||
pub prompt_guards: Option<PromptGuards>,
|
||||
pub prompt_targets: Option<Vec<PromptTarget>>,
|
||||
|
|
@ -204,6 +233,8 @@ pub struct Overrides {
|
|||
pub use_agent_orchestrator: Option<bool>,
|
||||
pub llm_routing_model: Option<String>,
|
||||
pub agent_orchestration_model: Option<String>,
|
||||
pub orchestrator_model_context_length: Option<usize>,
|
||||
pub disable_signals: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
|
|
@ -339,6 +370,8 @@ pub enum LlmProviderType {
|
|||
Mistral,
|
||||
#[serde(rename = "openai")]
|
||||
OpenAI,
|
||||
#[serde(rename = "xiaomi")]
|
||||
Xiaomi,
|
||||
#[serde(rename = "gemini")]
|
||||
Gemini,
|
||||
#[serde(rename = "xai")]
|
||||
|
|
@ -359,6 +392,10 @@ pub enum LlmProviderType {
|
|||
AmazonBedrock,
|
||||
#[serde(rename = "plano")]
|
||||
Plano,
|
||||
#[serde(rename = "chatgpt")]
|
||||
ChatGPT,
|
||||
#[serde(rename = "digitalocean")]
|
||||
DigitalOcean,
|
||||
}
|
||||
|
||||
impl Display for LlmProviderType {
|
||||
|
|
@ -370,6 +407,7 @@ impl Display for LlmProviderType {
|
|||
LlmProviderType::Gemini => write!(f, "gemini"),
|
||||
LlmProviderType::Mistral => write!(f, "mistral"),
|
||||
LlmProviderType::OpenAI => write!(f, "openai"),
|
||||
LlmProviderType::Xiaomi => write!(f, "xiaomi"),
|
||||
LlmProviderType::XAI => write!(f, "xai"),
|
||||
LlmProviderType::TogetherAI => write!(f, "together_ai"),
|
||||
LlmProviderType::AzureOpenAI => write!(f, "azure_openai"),
|
||||
|
|
@ -379,6 +417,8 @@ impl Display for LlmProviderType {
|
|||
LlmProviderType::Qwen => write!(f, "qwen"),
|
||||
LlmProviderType::AmazonBedrock => write!(f, "amazon_bedrock"),
|
||||
LlmProviderType::Plano => write!(f, "plano"),
|
||||
LlmProviderType::ChatGPT => write!(f, "chatgpt"),
|
||||
LlmProviderType::DigitalOcean => write!(f, "digitalocean"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -445,6 +485,7 @@ pub struct LlmProvider {
|
|||
pub base_url_path_prefix: Option<String>,
|
||||
pub internal: Option<bool>,
|
||||
pub passthrough_auth: Option<bool>,
|
||||
pub headers: Option<HashMap<String, String>>,
|
||||
}
|
||||
|
||||
pub trait IntoModels {
|
||||
|
|
@ -488,6 +529,7 @@ impl Default for LlmProvider {
|
|||
base_url_path_prefix: None,
|
||||
internal: None,
|
||||
passthrough_auth: None,
|
||||
headers: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -614,7 +656,7 @@ mod test {
|
|||
.expect("reference config file not found");
|
||||
|
||||
let config: super::Configuration = serde_yaml::from_str(&ref_config).unwrap();
|
||||
assert_eq!(config.version, "v0.3.0");
|
||||
assert_eq!(config.version, "v0.4.0");
|
||||
|
||||
if let Some(prompt_targets) = &config.prompt_targets {
|
||||
assert!(
|
||||
|
|
@ -697,13 +739,6 @@ mod test {
|
|||
internal: None,
|
||||
..Default::default()
|
||||
},
|
||||
LlmProvider {
|
||||
name: "arch-router".to_string(),
|
||||
provider_interface: LlmProviderType::Plano,
|
||||
model: Some("Arch-Router".to_string()),
|
||||
internal: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
LlmProvider {
|
||||
name: "plano-orchestrator".to_string(),
|
||||
provider_interface: LlmProviderType::Plano,
|
||||
|
|
@ -715,13 +750,35 @@ mod test {
|
|||
|
||||
let models = providers.into_models();
|
||||
|
||||
// Should only have 1 model: openai-gpt4
|
||||
assert_eq!(models.data.len(), 1);
|
||||
|
||||
// Verify internal models are excluded from /v1/models
|
||||
let model_ids: Vec<String> = models.data.iter().map(|m| m.id.clone()).collect();
|
||||
assert!(model_ids.contains(&"openai-gpt4".to_string()));
|
||||
assert!(!model_ids.contains(&"arch-router".to_string()));
|
||||
assert!(!model_ids.contains(&"plano-orchestrator".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_overrides_disable_signals_default_none() {
|
||||
let overrides = super::Overrides::default();
|
||||
assert_eq!(overrides.disable_signals, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_overrides_disable_signals_deserialize() {
|
||||
let yaml = r#"
|
||||
disable_signals: true
|
||||
"#;
|
||||
let overrides: super::Overrides = serde_yaml::from_str(yaml).unwrap();
|
||||
assert_eq!(overrides.disable_signals, Some(true));
|
||||
|
||||
let yaml_false = r#"
|
||||
disable_signals: false
|
||||
"#;
|
||||
let overrides: super::Overrides = serde_yaml::from_str(yaml_false).unwrap();
|
||||
assert_eq!(overrides.disable_signals, Some(false));
|
||||
|
||||
let yaml_missing = "{}";
|
||||
let overrides: super::Overrides = serde_yaml::from_str(yaml_missing).unwrap();
|
||||
assert_eq!(overrides.disable_signals, None);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ pub const X_ARCH_TOOL_CALL: &str = "x-arch-tool-call-message";
|
|||
pub const X_ARCH_FC_MODEL_RESPONSE: &str = "x-arch-fc-model-response";
|
||||
pub const ARCH_FC_MODEL_NAME: &str = "Arch-Function";
|
||||
pub const REQUEST_ID_HEADER: &str = "x-request-id";
|
||||
pub const MODEL_AFFINITY_HEADER: &str = "x-model-affinity";
|
||||
pub const ENVOY_ORIGINAL_PATH_HEADER: &str = "x-envoy-original-path";
|
||||
pub const TRACE_PARENT_HEADER: &str = "traceparent";
|
||||
pub const ARCH_INTERNAL_CLUSTER_NAME: &str = "arch_internal";
|
||||
|
|
|
|||
|
|
@ -277,6 +277,7 @@ mod tests {
|
|||
internal: None,
|
||||
stream: None,
|
||||
passthrough_auth: None,
|
||||
headers: None,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -435,6 +435,12 @@ impl TokenUsage for MessagesResponse {
|
|||
fn total_tokens(&self) -> usize {
|
||||
(self.usage.input_tokens + self.usage.output_tokens) as usize
|
||||
}
|
||||
fn cached_input_tokens(&self) -> Option<usize> {
|
||||
self.usage.cache_read_input_tokens.map(|t| t as usize)
|
||||
}
|
||||
fn cache_creation_tokens(&self) -> Option<usize> {
|
||||
self.usage.cache_creation_input_tokens.map(|t| t as usize)
|
||||
}
|
||||
}
|
||||
|
||||
impl ProviderResponse for MessagesResponse {
|
||||
|
|
@ -572,7 +578,9 @@ impl ProviderRequest for MessagesRequest {
|
|||
let mut regular_messages = Vec::new();
|
||||
|
||||
for msg in messages {
|
||||
if msg.role == crate::apis::openai::Role::System {
|
||||
if msg.role == crate::apis::openai::Role::System
|
||||
|| msg.role == crate::apis::openai::Role::Developer
|
||||
{
|
||||
system_messages.push(msg.clone());
|
||||
} else {
|
||||
regular_messages.push(msg.clone());
|
||||
|
|
|
|||
|
|
@ -150,6 +150,7 @@ pub enum Role {
|
|||
User,
|
||||
Assistant,
|
||||
Tool,
|
||||
Developer,
|
||||
}
|
||||
|
||||
#[skip_serializing_none]
|
||||
|
|
@ -595,6 +596,18 @@ impl TokenUsage for Usage {
|
|||
fn total_tokens(&self) -> usize {
|
||||
self.total_tokens as usize
|
||||
}
|
||||
|
||||
fn cached_input_tokens(&self) -> Option<usize> {
|
||||
self.prompt_tokens_details
|
||||
.as_ref()
|
||||
.and_then(|d| d.cached_tokens.map(|t| t as usize))
|
||||
}
|
||||
|
||||
fn reasoning_tokens(&self) -> Option<usize> {
|
||||
self.completion_tokens_details
|
||||
.as_ref()
|
||||
.and_then(|d| d.reasoning_tokens.map(|t| t as usize))
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation of ProviderRequest for ChatCompletionsRequest
|
||||
|
|
@ -736,6 +749,7 @@ impl ProviderStreamResponse for ChatCompletionsStreamResponse {
|
|||
Role::User => "user",
|
||||
Role::Assistant => "assistant",
|
||||
Role::Tool => "tool",
|
||||
Role::Developer => "developer",
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -710,6 +710,18 @@ impl crate::providers::response::TokenUsage for ResponseUsage {
|
|||
fn total_tokens(&self) -> usize {
|
||||
self.total_tokens as usize
|
||||
}
|
||||
|
||||
fn cached_input_tokens(&self) -> Option<usize> {
|
||||
self.input_tokens_details
|
||||
.as_ref()
|
||||
.map(|d| d.cached_tokens.max(0) as usize)
|
||||
}
|
||||
|
||||
fn reasoning_tokens(&self) -> Option<usize> {
|
||||
self.output_tokens_details
|
||||
.as_ref()
|
||||
.map(|d| d.reasoning_tokens.max(0) as usize)
|
||||
}
|
||||
}
|
||||
|
||||
/// Token details
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
use crate::apis::anthropic::MessagesStreamEvent;
|
||||
use crate::apis::anthropic::{
|
||||
MessagesMessageDelta, MessagesStopReason, MessagesStreamEvent, MessagesUsage,
|
||||
};
|
||||
use crate::apis::streaming_shapes::sse::{SseEvent, SseStreamBufferTrait};
|
||||
use crate::providers::streaming_response::ProviderStreamResponseType;
|
||||
use log::warn;
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// SSE Stream Buffer for Anthropic Messages API streaming.
|
||||
|
|
@ -11,13 +14,24 @@ use std::collections::HashSet;
|
|||
///
|
||||
/// When converting from OpenAI to Anthropic format, this buffer injects the required
|
||||
/// ContentBlockStart and ContentBlockStop events to maintain proper Anthropic protocol.
|
||||
///
|
||||
/// Guarantees (Anthropic Messages API contract):
|
||||
/// 1. `message_stop` is never emitted unless a matching `message_start` was emitted first.
|
||||
/// 2. `message_stop` is emitted at most once per stream (no double-close).
|
||||
/// 3. If upstream terminates with no content (empty/filtered/errored response), a
|
||||
/// minimal but well-formed envelope is synthesized so the client's state machine
|
||||
/// stays consistent.
|
||||
pub struct AnthropicMessagesStreamBuffer {
|
||||
/// Buffered SSE events ready to be written to wire
|
||||
buffered_events: Vec<SseEvent>,
|
||||
|
||||
/// Track if we've seen a message_start event
|
||||
/// Track if we've emitted a message_start event
|
||||
message_started: bool,
|
||||
|
||||
/// Track if we've emitted a terminal message_stop event (for idempotency /
|
||||
/// double-close protection).
|
||||
message_stopped: bool,
|
||||
|
||||
/// Track content block indices that have received ContentBlockStart events
|
||||
content_block_start_indices: HashSet<i32>,
|
||||
|
||||
|
|
@ -42,6 +56,7 @@ impl AnthropicMessagesStreamBuffer {
|
|||
Self {
|
||||
buffered_events: Vec::new(),
|
||||
message_started: false,
|
||||
message_stopped: false,
|
||||
content_block_start_indices: HashSet::new(),
|
||||
needs_content_block_stop: false,
|
||||
seen_message_delta: false,
|
||||
|
|
@ -49,6 +64,66 @@ impl AnthropicMessagesStreamBuffer {
|
|||
}
|
||||
}
|
||||
|
||||
/// Inject a `message_start` event into the buffer if one hasn't been emitted yet.
|
||||
/// This is the single source of truth for opening a message — every handler
|
||||
/// that can legitimately be the first event on the wire must call this before
|
||||
/// pushing its own event.
|
||||
fn ensure_message_started(&mut self) {
|
||||
if self.message_started {
|
||||
return;
|
||||
}
|
||||
let model = self.model.as_deref().unwrap_or("unknown");
|
||||
let message_start = AnthropicMessagesStreamBuffer::create_message_start_event(model);
|
||||
self.buffered_events.push(message_start);
|
||||
self.message_started = true;
|
||||
}
|
||||
|
||||
/// Inject a synthetic `message_delta` with `end_turn` / zero usage.
|
||||
/// Used when we must close a message but upstream never produced a terminal
|
||||
/// event (e.g. `[DONE]` arrives with no prior `finish_reason`).
|
||||
fn push_synthetic_message_delta(&mut self) {
|
||||
let event = MessagesStreamEvent::MessageDelta {
|
||||
delta: MessagesMessageDelta {
|
||||
stop_reason: MessagesStopReason::EndTurn,
|
||||
stop_sequence: None,
|
||||
},
|
||||
usage: MessagesUsage {
|
||||
input_tokens: 0,
|
||||
output_tokens: 0,
|
||||
cache_creation_input_tokens: None,
|
||||
cache_read_input_tokens: None,
|
||||
},
|
||||
};
|
||||
let sse_string: String = event.clone().into();
|
||||
self.buffered_events.push(SseEvent {
|
||||
data: None,
|
||||
event: Some("message_delta".to_string()),
|
||||
raw_line: sse_string.clone(),
|
||||
sse_transformed_lines: sse_string,
|
||||
provider_stream_response: Some(ProviderStreamResponseType::MessagesStreamEvent(event)),
|
||||
});
|
||||
self.seen_message_delta = true;
|
||||
}
|
||||
|
||||
/// Inject a `message_stop` event into the buffer, marking the stream as closed.
|
||||
/// Idempotent — subsequent calls are no-ops.
|
||||
fn push_message_stop(&mut self) {
|
||||
if self.message_stopped {
|
||||
return;
|
||||
}
|
||||
let message_stop = MessagesStreamEvent::MessageStop;
|
||||
let sse_string: String = message_stop.into();
|
||||
self.buffered_events.push(SseEvent {
|
||||
data: None,
|
||||
event: Some("message_stop".to_string()),
|
||||
raw_line: sse_string.clone(),
|
||||
sse_transformed_lines: sse_string,
|
||||
provider_stream_response: None,
|
||||
});
|
||||
self.message_stopped = true;
|
||||
self.seen_message_delta = false;
|
||||
}
|
||||
|
||||
/// Check if a content_block_start event has been sent for the given index
|
||||
fn has_content_block_start_been_sent(&self, index: i32) -> bool {
|
||||
self.content_block_start_indices.contains(&index)
|
||||
|
|
@ -149,6 +224,27 @@ impl SseStreamBufferTrait for AnthropicMessagesStreamBuffer {
|
|||
// We match on a reference first to determine the type, then move the event
|
||||
match &event.provider_stream_response {
|
||||
Some(ProviderStreamResponseType::MessagesStreamEvent(evt)) => {
|
||||
// If the message has already been closed, drop any trailing events
|
||||
// to avoid emitting data after `message_stop` (protocol violation).
|
||||
// This typically indicates a duplicate `[DONE]` from upstream or a
|
||||
// replay of previously-buffered bytes — worth surfacing so we can
|
||||
// spot misbehaving providers.
|
||||
if self.message_stopped {
|
||||
warn!(
|
||||
"anthropic stream buffer: dropping event after message_stop (variant={})",
|
||||
match evt {
|
||||
MessagesStreamEvent::MessageStart { .. } => "message_start",
|
||||
MessagesStreamEvent::ContentBlockStart { .. } => "content_block_start",
|
||||
MessagesStreamEvent::ContentBlockDelta { .. } => "content_block_delta",
|
||||
MessagesStreamEvent::ContentBlockStop { .. } => "content_block_stop",
|
||||
MessagesStreamEvent::MessageDelta { .. } => "message_delta",
|
||||
MessagesStreamEvent::MessageStop => "message_stop",
|
||||
MessagesStreamEvent::Ping => "ping",
|
||||
}
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
match evt {
|
||||
MessagesStreamEvent::MessageStart { .. } => {
|
||||
// Add the message_start event
|
||||
|
|
@ -157,14 +253,7 @@ impl SseStreamBufferTrait for AnthropicMessagesStreamBuffer {
|
|||
}
|
||||
MessagesStreamEvent::ContentBlockStart { index, .. } => {
|
||||
let index = *index as i32;
|
||||
// Inject message_start if needed
|
||||
if !self.message_started {
|
||||
let model = self.model.as_deref().unwrap_or("unknown");
|
||||
let message_start =
|
||||
AnthropicMessagesStreamBuffer::create_message_start_event(model);
|
||||
self.buffered_events.push(message_start);
|
||||
self.message_started = true;
|
||||
}
|
||||
self.ensure_message_started();
|
||||
|
||||
// Add the content_block_start event (from tool calls or other sources)
|
||||
self.buffered_events.push(event);
|
||||
|
|
@ -173,14 +262,7 @@ impl SseStreamBufferTrait for AnthropicMessagesStreamBuffer {
|
|||
}
|
||||
MessagesStreamEvent::ContentBlockDelta { index, .. } => {
|
||||
let index = *index as i32;
|
||||
// Inject message_start if needed
|
||||
if !self.message_started {
|
||||
let model = self.model.as_deref().unwrap_or("unknown");
|
||||
let message_start =
|
||||
AnthropicMessagesStreamBuffer::create_message_start_event(model);
|
||||
self.buffered_events.push(message_start);
|
||||
self.message_started = true;
|
||||
}
|
||||
self.ensure_message_started();
|
||||
|
||||
// Check if ContentBlockStart was sent for this index
|
||||
if !self.has_content_block_start_been_sent(index) {
|
||||
|
|
@ -196,6 +278,11 @@ impl SseStreamBufferTrait for AnthropicMessagesStreamBuffer {
|
|||
self.buffered_events.push(event);
|
||||
}
|
||||
MessagesStreamEvent::MessageDelta { usage, .. } => {
|
||||
// `message_delta` is only meaningful inside an open message.
|
||||
// Upstream can send it with no prior content (empty completion,
|
||||
// content filter, etc.), so we must open a message first.
|
||||
self.ensure_message_started();
|
||||
|
||||
// Inject ContentBlockStop before message_delta
|
||||
if self.needs_content_block_stop {
|
||||
let content_block_stop =
|
||||
|
|
@ -230,15 +317,52 @@ impl SseStreamBufferTrait for AnthropicMessagesStreamBuffer {
|
|||
}
|
||||
MessagesStreamEvent::ContentBlockStop { .. } => {
|
||||
// ContentBlockStop received from upstream (e.g., Bedrock)
|
||||
self.ensure_message_started();
|
||||
// Clear the flag so we don't inject another one
|
||||
self.needs_content_block_stop = false;
|
||||
self.buffered_events.push(event);
|
||||
}
|
||||
MessagesStreamEvent::MessageStop => {
|
||||
// MessageStop received from upstream (e.g., OpenAI via [DONE])
|
||||
// Clear the flag so we don't inject another one
|
||||
self.seen_message_delta = false;
|
||||
// MessageStop received from upstream (e.g., OpenAI via [DONE]).
|
||||
//
|
||||
// The Anthropic protocol requires the full envelope
|
||||
// message_start → [content blocks] → message_delta → message_stop
|
||||
// so we must not emit a bare `message_stop`. Synthesize whatever
|
||||
// is missing to keep the client's state machine consistent.
|
||||
self.ensure_message_started();
|
||||
|
||||
if self.needs_content_block_stop {
|
||||
let content_block_stop =
|
||||
AnthropicMessagesStreamBuffer::create_content_block_stop_event();
|
||||
self.buffered_events.push(content_block_stop);
|
||||
self.needs_content_block_stop = false;
|
||||
}
|
||||
|
||||
// If no message_delta has been emitted yet (empty/filtered upstream
|
||||
// response), synthesize a minimal one carrying `end_turn`.
|
||||
if !self.seen_message_delta {
|
||||
// If we also never opened a content block, open and close one
|
||||
// so clients that expect at least one block are happy.
|
||||
if self.content_block_start_indices.is_empty() {
|
||||
let content_block_start =
|
||||
AnthropicMessagesStreamBuffer::create_content_block_start_event(
|
||||
);
|
||||
self.buffered_events.push(content_block_start);
|
||||
self.set_content_block_start_sent(0);
|
||||
let content_block_stop =
|
||||
AnthropicMessagesStreamBuffer::create_content_block_stop_event(
|
||||
);
|
||||
self.buffered_events.push(content_block_stop);
|
||||
}
|
||||
self.push_synthetic_message_delta();
|
||||
}
|
||||
|
||||
// Push the upstream-provided message_stop and mark closed.
|
||||
// `push_message_stop` is idempotent but we want to reuse the
|
||||
// original SseEvent so raw passthrough semantics are preserved.
|
||||
self.buffered_events.push(event);
|
||||
self.message_stopped = true;
|
||||
self.seen_message_delta = false;
|
||||
}
|
||||
_ => {
|
||||
// Other Anthropic event types (Ping, etc.), just accumulate
|
||||
|
|
@ -254,24 +378,23 @@ impl SseStreamBufferTrait for AnthropicMessagesStreamBuffer {
|
|||
}
|
||||
|
||||
fn to_bytes(&mut self) -> Vec<u8> {
|
||||
// Convert all accumulated events to bytes and clear buffer
|
||||
// Convert all accumulated events to bytes and clear buffer.
|
||||
//
|
||||
// NOTE: We do NOT inject ContentBlockStop here because it's injected when we see MessageDelta
|
||||
// or MessageStop. Injecting it here causes premature ContentBlockStop in the middle of streaming.
|
||||
|
||||
// Inject MessageStop after MessageDelta if we've seen one
|
||||
// This completes the Anthropic Messages API event sequence
|
||||
if self.seen_message_delta {
|
||||
let message_stop = MessagesStreamEvent::MessageStop;
|
||||
let sse_string: String = message_stop.into();
|
||||
let message_stop_event = SseEvent {
|
||||
data: None,
|
||||
event: Some("message_stop".to_string()),
|
||||
raw_line: sse_string.clone(),
|
||||
sse_transformed_lines: sse_string,
|
||||
provider_stream_response: None,
|
||||
};
|
||||
self.buffered_events.push(message_stop_event);
|
||||
self.seen_message_delta = false;
|
||||
//
|
||||
// Inject a synthetic `message_stop` only when:
|
||||
// 1. A `message_delta` has been seen (otherwise we'd violate the Anthropic
|
||||
// protocol by emitting `message_stop` without a preceding `message_delta`), AND
|
||||
// 2. We haven't already emitted `message_stop` (either synthetic from a
|
||||
// previous flush, or real from an upstream `[DONE]`).
|
||||
//
|
||||
// Without the `!message_stopped` guard, a stream whose `finish_reason` chunk
|
||||
// and `[DONE]` marker land in separate HTTP body chunks would receive two
|
||||
// `message_stop` events, triggering Claude Code's "Received message_stop
|
||||
// without a current message" error.
|
||||
if self.seen_message_delta && !self.message_stopped {
|
||||
self.push_message_stop();
|
||||
}
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
|
|
@ -615,4 +738,133 @@ data: [DONE]"#;
|
|||
println!("✓ Stop reason: tool_use");
|
||||
println!("✓ Proper Anthropic tool_use protocol\n");
|
||||
}
|
||||
|
||||
/// Regression test for:
|
||||
/// Claude Code CLI error: "Received message_stop without a current message"
|
||||
///
|
||||
/// Reproduces the *double-close* scenario: OpenAI's final `finish_reason`
|
||||
/// chunk and the `[DONE]` marker arrive in **separate** HTTP body chunks, so
|
||||
/// `to_bytes()` is called between them. Before the fix, this produced two
|
||||
/// `message_stop` events on the wire (one synthetic, one from `[DONE]`).
|
||||
#[test]
|
||||
fn test_openai_to_anthropic_emits_single_message_stop_across_chunk_boundary() {
|
||||
let client_api = SupportedAPIsFromClient::AnthropicMessagesAPI(AnthropicApi::Messages);
|
||||
let upstream_api = SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions);
|
||||
let mut buffer = AnthropicMessagesStreamBuffer::new();
|
||||
|
||||
// --- HTTP chunk 1: content + finish_reason (no [DONE] yet) -----------
|
||||
let chunk_1 = r#"data: {"id":"c1","object":"chat.completion.chunk","created":1,"model":"gpt-4o","choices":[{"index":0,"delta":{"role":"assistant","content":"Hi"},"finish_reason":null}]}
|
||||
|
||||
data: {"id":"c1","object":"chat.completion.chunk","created":1,"model":"gpt-4o","choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}"#;
|
||||
|
||||
for raw in SseStreamIter::try_from(chunk_1.as_bytes()).unwrap() {
|
||||
let e = SseEvent::try_from((raw, &client_api, &upstream_api)).unwrap();
|
||||
buffer.add_transformed_event(e);
|
||||
}
|
||||
let out_1 = String::from_utf8(buffer.to_bytes()).unwrap();
|
||||
|
||||
// --- HTTP chunk 2: just the [DONE] marker ----------------------------
|
||||
let chunk_2 = "data: [DONE]";
|
||||
for raw in SseStreamIter::try_from(chunk_2.as_bytes()).unwrap() {
|
||||
let e = SseEvent::try_from((raw, &client_api, &upstream_api)).unwrap();
|
||||
buffer.add_transformed_event(e);
|
||||
}
|
||||
let out_2 = String::from_utf8(buffer.to_bytes()).unwrap();
|
||||
|
||||
let combined = format!("{}{}", out_1, out_2);
|
||||
let start_count = combined.matches("event: message_start").count();
|
||||
let stop_count = combined.matches("event: message_stop").count();
|
||||
|
||||
assert_eq!(
|
||||
start_count, 1,
|
||||
"Must emit exactly one message_start across chunks, got {start_count}. Output:\n{combined}"
|
||||
);
|
||||
assert_eq!(
|
||||
stop_count, 1,
|
||||
"Must emit exactly one message_stop across chunks (no double-close), got {stop_count}. Output:\n{combined}"
|
||||
);
|
||||
// Every message_stop must be preceded by a message_start earlier in the stream.
|
||||
let start_pos = combined.find("event: message_start").unwrap();
|
||||
let stop_pos = combined.find("event: message_stop").unwrap();
|
||||
assert!(
|
||||
start_pos < stop_pos,
|
||||
"message_start must come before message_stop. Output:\n{combined}"
|
||||
);
|
||||
}
|
||||
|
||||
/// Regression test for:
|
||||
/// "Received message_stop without a current message" on empty upstream responses.
|
||||
///
|
||||
/// OpenAI returns only `[DONE]` with no content deltas and no `finish_reason`
|
||||
/// (this happens with content filters, truncated upstream streams, and some
|
||||
/// 5xx recoveries). Before the fix, the buffer emitted a bare `message_stop`
|
||||
/// with no preceding `message_start`. After the fix, it synthesizes a
|
||||
/// minimal but well-formed envelope.
|
||||
#[test]
|
||||
fn test_openai_done_only_stream_synthesizes_valid_envelope() {
|
||||
let client_api = SupportedAPIsFromClient::AnthropicMessagesAPI(AnthropicApi::Messages);
|
||||
let upstream_api = SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions);
|
||||
let mut buffer = AnthropicMessagesStreamBuffer::new();
|
||||
|
||||
let raw_input = "data: [DONE]";
|
||||
for raw in SseStreamIter::try_from(raw_input.as_bytes()).unwrap() {
|
||||
let e = SseEvent::try_from((raw, &client_api, &upstream_api)).unwrap();
|
||||
buffer.add_transformed_event(e);
|
||||
}
|
||||
let out = String::from_utf8(buffer.to_bytes()).unwrap();
|
||||
|
||||
assert!(
|
||||
out.contains("event: message_start"),
|
||||
"Empty upstream must still produce message_start. Output:\n{out}"
|
||||
);
|
||||
assert!(
|
||||
out.contains("event: message_delta"),
|
||||
"Empty upstream must produce a synthesized message_delta. Output:\n{out}"
|
||||
);
|
||||
assert_eq!(
|
||||
out.matches("event: message_stop").count(),
|
||||
1,
|
||||
"Empty upstream must produce exactly one message_stop. Output:\n{out}"
|
||||
);
|
||||
|
||||
// Protocol ordering: start < delta < stop.
|
||||
let p_start = out.find("event: message_start").unwrap();
|
||||
let p_delta = out.find("event: message_delta").unwrap();
|
||||
let p_stop = out.find("event: message_stop").unwrap();
|
||||
assert!(
|
||||
p_start < p_delta && p_delta < p_stop,
|
||||
"Bad ordering. Output:\n{out}"
|
||||
);
|
||||
}
|
||||
|
||||
/// Regression test: events arriving after `message_stop` (e.g. a stray `[DONE]`
|
||||
/// echo, or late-arriving deltas from a racing upstream) must be dropped
|
||||
/// rather than written after the terminal frame.
|
||||
#[test]
|
||||
fn test_events_after_message_stop_are_dropped() {
|
||||
let client_api = SupportedAPIsFromClient::AnthropicMessagesAPI(AnthropicApi::Messages);
|
||||
let upstream_api = SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions);
|
||||
let mut buffer = AnthropicMessagesStreamBuffer::new();
|
||||
|
||||
let first = r#"data: {"id":"c1","object":"chat.completion.chunk","created":1,"model":"gpt-4o","choices":[{"index":0,"delta":{"content":"ok"},"finish_reason":"stop"}]}
|
||||
|
||||
data: [DONE]"#;
|
||||
for raw in SseStreamIter::try_from(first.as_bytes()).unwrap() {
|
||||
let e = SseEvent::try_from((raw, &client_api, &upstream_api)).unwrap();
|
||||
buffer.add_transformed_event(e);
|
||||
}
|
||||
let _ = buffer.to_bytes();
|
||||
|
||||
// Simulate a duplicate / late `[DONE]` after the stream was already closed.
|
||||
let late = "data: [DONE]";
|
||||
for raw in SseStreamIter::try_from(late.as_bytes()).unwrap() {
|
||||
let e = SseEvent::try_from((raw, &client_api, &upstream_api)).unwrap();
|
||||
buffer.add_transformed_event(e);
|
||||
}
|
||||
let tail = String::from_utf8(buffer.to_bytes()).unwrap();
|
||||
assert!(
|
||||
tail.is_empty(),
|
||||
"No bytes should be emitted after message_stop, got: {tail:?}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue