diff --git a/crates/Cargo.lock b/crates/Cargo.lock index 56fc260c..4ca39964 100644 --- a/crates/Cargo.lock +++ b/crates/Cargo.lock @@ -358,6 +358,7 @@ dependencies = [ "pretty_assertions", "rand 0.9.4", "redis", + "regex", "reqwest", "serde", "serde_json", diff --git a/crates/brightstaff/Cargo.toml b/crates/brightstaff/Cargo.toml index b9718e44..edbcb994 100644 --- a/crates/brightstaff/Cargo.toml +++ b/crates/brightstaff/Cargo.toml @@ -26,6 +26,7 @@ opentelemetry-stdout = "0.31" opentelemetry_sdk = { version = "0.31", features = ["rt-tokio"] } pretty_assertions = "1.4.1" rand = "0.9.2" +regex = "1.10" lru = "0.12" metrics = "0.23" metrics-exporter-prometheus = { version = "0.15", default-features = false, features = ["http-listener"] } diff --git a/crates/brightstaff/src/signals/analyzer.rs b/crates/brightstaff/src/signals/analyzer.rs index 8dffdd96..f334ed04 100644 --- a/crates/brightstaff/src/signals/analyzer.rs +++ b/crates/brightstaff/src/signals/analyzer.rs @@ -1,3255 +1,510 @@ -//! Agentic Signals - Behavioral quality indicators for agent interactions +//! Top-level signal analyzer. //! -//! This module implements various signals that serve as early warning indicators -//! of brilliant successes or failures in agentic interactions. These signals are -//! derived from conversation patterns and can be computed algorithmically from -//! message arrays. - -use serde::{Deserialize, Serialize}; -use std::collections::{HashMap, HashSet}; -use std::sync::LazyLock; +//! Direct port of `signals/analyzer.py`. Orchestrates all detectors across +//! the three layers (interaction / execution / environment) and produces a +//! `SignalReport`. use hermesllm::apis::openai::{Message, Role}; +use hermesllm::transforms::ExtractText; -// ============================================================================ -// Constants -// ============================================================================ +use super::environment::exhaustion::analyze_exhaustion; +use super::execution::failure::analyze_failure; +use super::execution::loops::analyze_loops; +use super::interaction::disengagement::analyze_disengagement; +use super::interaction::misalignment::analyze_misalignment; +use super::interaction::satisfaction::analyze_satisfaction; +use super::interaction::stagnation::{analyze_stagnation, ShareGptMsg}; +use super::schemas::{ + EnvironmentSignals, ExecutionSignals, InteractionQuality, InteractionSignals, SignalReport, + SignalType, TurnMetrics, +}; +use super::text_processing::NormalizedMessage; -/// Flag emoji for marking spans/operations worth investigating -pub const FLAG_MARKER: &str = "\u{1F6A9}"; +/// Marker appended to the span operation name when concerning signals are +/// detected. Kept in sync with the previous implementation for backward +/// compatibility with downstream consumers. +pub const FLAG_MARKER: &str = "[!]"; -/// Size of character n-grams for similarity matching (3 = trigrams) -const NGRAM_SIZE: usize = 3; +/// ShareGPT-shaped row used as the canonical input to the analyzer's +/// detectors. `from` is one of `"human"`, `"gpt"`, `"function_call"`, +/// `"observation"`. `value` is the raw message body. +#[derive(Debug, Clone, Copy)] +pub struct ShareGptMessage<'a> { + pub from: &'a str, + pub value: &'a str, +} -// ============================================================================ -// Normalized Message Processing -// ============================================================================ - -/// Pre-processed message with normalized text and tokens for efficient matching +/// Configuration knobs for the analyzer. Defaults match +/// `signals/analyzer.py:SignalAnalyzer.__init__`. #[derive(Debug, Clone)] -struct NormalizedMessage { - /// Original raw text - raw: String, - /// Tokens (words) extracted from the message - tokens: Vec, - /// Token set for fast lookup - token_set: HashSet, - /// Bigram set for fast similarity computation - bigram_set: HashSet, - /// Character ngram set for robust similarity matching - char_ngram_set: HashSet, - /// Token frequency map for multiset cosine similarity - token_frequency: HashMap, +pub struct SignalAnalyzerConfig { + pub baseline_turns: usize, + pub char_ngram_threshold: f32, + pub token_cosine_threshold: f32, + pub max_message_length: usize, + pub max_messages: usize, } -impl NormalizedMessage { - #[allow(dead_code)] // Used in tests for algorithm validation - fn from_text(text: &str) -> Self { - Self::from_text_with_limit(text, usize::MAX) - } - - fn from_text_with_limit(text: &str, max_length: usize) -> Self { - // Truncate to max_length characters to prevent unbounded computation - // Keep head (20%) + tail (80%) to preserve both context and intent - - let char_count = text.chars().count(); - - let raw = if char_count <= max_length { - text.to_string() - } else { - // Split: 20% head, 79% tail, 1 char space delimiter - let head_len = max_length / 5; - let tail_len = max_length - head_len - 1; - - let head: String = text.chars().take(head_len).collect(); - let tail: String = text.chars().skip(char_count - tail_len).collect(); - - format!("{} {}", head, tail) - }; - - // Normalize unicode punctuation to ASCII equivalents - let normalized_unicode = raw - .replace(['\u{2019}', '\u{2018}'], "'") // U+2019/U+2018 SINGLE QUOTATION MARKs - .replace(['\u{201C}', '\u{201D}'], "\"") // U+201C/U+201D DOUBLE QUOTATION MARKs - .replace(['\u{2013}', '\u{2014}'], "-"); // U+2013/U+2014 EN/EM DASHes - - // Normalize: lowercase, collapse whitespace - let normalized = normalized_unicode - .to_lowercase() - .split_whitespace() - .collect::>() - .join(" "); - - // Tokenize: split on whitespace and strip punctuation from boundaries - let tokens: Vec = normalized - .split_whitespace() - .map(|word| { - // Strip leading/trailing punctuation but keep internal punctuation - word.trim_matches(|c: char| c.is_ascii_punctuation()) - .to_string() - }) - .filter(|w| !w.is_empty()) - .collect(); - - let token_set: HashSet = tokens.iter().cloned().collect(); - - // Generate bigram set directly for similarity matching - let bigram_set: HashSet = tokens - .windows(2) - .map(|w| format!("{} {}", w[0], w[1])) - .collect(); - - // Generate character ngram set for robust similarity matching - // Uses tokens (with punctuation stripped) for consistency with pattern matching - let tokens_text = tokens.join(" "); - let char_ngram_set: HashSet = tokens_text - .chars() - .collect::>() - .windows(NGRAM_SIZE) - .map(|w| w.iter().collect::()) - .collect(); - - // Compute token frequency map for cosine similarity - let mut token_frequency: HashMap = HashMap::new(); - for token in &tokens { - *token_frequency.entry(token.clone()).or_insert(0) += 1; - } - - Self { - raw, - tokens, - token_set, - bigram_set, - char_ngram_set, - token_frequency, - } - } - - /// Check if a single token exists in the message (word boundary aware) - fn contains_token(&self, token: &str) -> bool { - self.token_set.contains(token) - } - - /// Check if a phrase (sequence of tokens) exists in the message - fn contains_phrase(&self, phrase: &str) -> bool { - let phrase_tokens: Vec<&str> = phrase.split_whitespace().collect(); - if phrase_tokens.is_empty() { - return false; - } - - if phrase_tokens.len() == 1 { - return self.contains_token(phrase_tokens[0]); - } - - // Multi-word phrase: check for sequence in tokens - self.tokens.windows(phrase_tokens.len()).any(|window| { - window - .iter() - .zip(phrase_tokens.iter()) - .all(|(token, phrase_token)| token == phrase_token) - }) - } - - /// Calculate character ngram similarity between this message and a pattern - /// Returns a similarity score between 0.0 and 1.0 - /// This is robust to typos, small edits, and word insertions - #[allow(dead_code)] // Used in tests for algorithm validation - fn char_ngram_similarity(&self, pattern: &str) -> f64 { - // Normalize the pattern: lowercase and remove ALL punctuation - // This makes "doesn't" → "doesnt" for robust typo matching - let normalized_pattern = pattern - .to_lowercase() - .chars() - .filter(|c| c.is_alphanumeric() || c.is_whitespace()) - .collect::() - .split_whitespace() - .collect::>() - .join(" "); - - // Generate ngrams for the pattern - let pattern_ngrams: HashSet = normalized_pattern - .chars() - .collect::>() - .windows(NGRAM_SIZE) - .map(|w| w.iter().collect::()) - .collect(); - - if self.char_ngram_set.is_empty() && pattern_ngrams.is_empty() { - return 1.0; // Both empty = identical - } - - if self.char_ngram_set.is_empty() || pattern_ngrams.is_empty() { - return 0.0; - } - - // Compute Jaccard similarity (intersection / union) - let intersection = self.char_ngram_set.intersection(&pattern_ngrams).count(); - let union = self.char_ngram_set.union(&pattern_ngrams).count(); - - if union == 0 { - return 0.0; - } - - intersection as f64 / union as f64 - } - - /// Calculate token-based cosine similarity using term frequencies - /// Returns a similarity score between 0.0 and 1.0 - /// This handles word frequency and is stable for longer messages - #[allow(dead_code)] // Used in tests for algorithm validation - fn token_cosine_similarity(&self, pattern: &str) -> f64 { - // Tokenize and compute frequencies for the pattern - let pattern_tokens: Vec = pattern - .to_lowercase() - .split_whitespace() - .map(|word| { - word.trim_matches(|c: char| c.is_ascii_punctuation()) - .to_string() - }) - .filter(|w| !w.is_empty()) - .collect(); - - let mut pattern_frequency: HashMap = HashMap::new(); - for token in &pattern_tokens { - *pattern_frequency.entry(token.clone()).or_insert(0) += 1; - } - - if self.token_frequency.is_empty() && pattern_frequency.is_empty() { - return 1.0; - } - - if self.token_frequency.is_empty() || pattern_frequency.is_empty() { - return 0.0; - } - - // Compute cosine similarity - // cosine_sim = dot_product / (norm1 * norm2) - - let mut dot_product = 0.0; - let mut norm1_squared = 0.0; - let mut norm2_squared = 0.0; - - // Collect all unique tokens from both sets - let all_tokens: HashSet = self - .token_frequency - .keys() - .chain(pattern_frequency.keys()) - .cloned() - .collect(); - - for token in all_tokens { - let freq1 = *self.token_frequency.get(&token).unwrap_or(&0) as f64; - let freq2 = *pattern_frequency.get(&token).unwrap_or(&0) as f64; - - dot_product += freq1 * freq2; - norm1_squared += freq1 * freq1; - norm2_squared += freq2 * freq2; - } - - let norm1 = norm1_squared.sqrt(); - let norm2 = norm2_squared.sqrt(); - - if norm1 == 0.0 || norm2 == 0.0 { - return 0.0; - } - - dot_product / (norm1 * norm2) - } - - /// Layered phrase matching: exact → character ngram → token cosine - /// Returns true if the pattern matches using any layer - #[allow(dead_code)] // Kept for reference; production uses matches_normalized_pattern - fn layered_contains_phrase( - &self, - pattern: &str, - char_ngram_threshold: f64, - token_cosine_threshold: f64, - ) -> bool { - // Layer 0: Exact phrase match (fastest) - if self.contains_phrase(pattern) { - return true; - } - - // Layer 1: Character ngram similarity (typo/edit robustness) - // Check whole message first (for short messages) - if self.char_ngram_similarity(pattern) >= char_ngram_threshold { - return true; - } - - // ngram containment check for patterns buried in longer messages - // If ALL of the pattern's ngrams exist in the message, the pattern must be - // present (possibly with minor variations like missing apostrophes). - // This is O(pattern_ngrams) lookups vs expensive window sliding. - if self.char_ngram_containment(pattern) >= 1.0 { - return true; - } - - // Layer 2: Token cosine similarity (semantic stability for long messages) - if self.token_cosine_similarity(pattern) >= token_cosine_threshold { - return true; - } - - false - } - - fn char_ngram_containment(&self, pattern: &str) -> f64 { - // Normalize the pattern the same way as char_ngram_similarity - let normalized_pattern = pattern - .to_lowercase() - .chars() - .filter(|c| c.is_alphanumeric() || c.is_whitespace()) - .collect::() - .split_whitespace() - .collect::>() - .join(" "); - - // Generate ngrams for the pattern - let pattern_ngrams: HashSet = normalized_pattern - .chars() - .collect::>() - .windows(NGRAM_SIZE) - .map(|w| w.iter().collect::()) - .collect(); - - if pattern_ngrams.is_empty() { - return 0.0; - } - - // Count how many pattern ngrams exist in the message - let contained = pattern_ngrams - .iter() - .filter(|t| self.char_ngram_set.contains(*t)) - .count(); - - contained as f64 / pattern_ngrams.len() as f64 - } - - /// Fast matching against a pre-normalized pattern - /// This avoids re-normalizing and re-computing ngrams for each pattern - fn matches_normalized_pattern( - &self, - pattern: &NormalizedPattern, - char_ngram_threshold: f64, - token_cosine_threshold: f64, - ) -> bool { - // Layer 0: Exact phrase match (fastest) - if self.contains_phrase(&pattern.raw) { - return true; - } - - // Layer 1: Character ngram similarity using pre-computed ngrams - if !self.char_ngram_set.is_empty() && !pattern.char_ngram_set.is_empty() { - let intersection = self - .char_ngram_set - .intersection(&pattern.char_ngram_set) - .count(); - let union = self.char_ngram_set.union(&pattern.char_ngram_set).count(); - if union > 0 { - let similarity = intersection as f64 / union as f64; - if similarity >= char_ngram_threshold { - return true; - } - } - } - - // Ngram containment check using pre-computed ngrams - if !pattern.char_ngram_set.is_empty() { - let contained = pattern - .char_ngram_set - .iter() - .filter(|t| self.char_ngram_set.contains(*t)) - .count(); - let containment = contained as f64 / pattern.char_ngram_set.len() as f64; - if containment >= 1.0 { - return true; - } - } - - // Layer 2: Token cosine similarity using pre-computed frequencies - if !self.token_frequency.is_empty() && !pattern.token_frequency.is_empty() { - let mut dot_product = 0.0; - let mut norm1_squared = 0.0; - let mut norm2_squared = 0.0; - - // Iterate over pattern tokens (usually smaller set) - for (token, &freq2) in &pattern.token_frequency { - let freq1 = *self.token_frequency.get(token).unwrap_or(&0) as f64; - let freq2 = freq2 as f64; - dot_product += freq1 * freq2; - norm2_squared += freq2 * freq2; - } - - // Add self tokens not in pattern for norm1 - for &freq1 in self.token_frequency.values() { - norm1_squared += (freq1 as f64) * (freq1 as f64); - } - - let norm1 = norm1_squared.sqrt(); - let norm2 = norm2_squared.sqrt(); - - if norm1 > 0.0 && norm2 > 0.0 { - let similarity = dot_product / (norm1 * norm2); - if similarity >= token_cosine_threshold { - return true; - } - } - } - - false - } -} - -// ============================================================================ -// Normalized Pattern (pre-computed for performance) -// ============================================================================ - -/// Pre-processed pattern with normalized text and pre-computed ngrams/tokens -/// This avoids redundant computation when matching against many messages -#[derive(Debug, Clone)] -struct NormalizedPattern { - /// Original raw pattern text - raw: String, - /// Character ngram set for similarity matching - char_ngram_set: HashSet, - /// Token frequency map for cosine similarity - token_frequency: HashMap, -} - -impl NormalizedPattern { - fn new(pattern: &str) -> Self { - // Normalize: lowercase and remove ALL punctuation - let normalized = pattern - .to_lowercase() - .chars() - .filter(|c| c.is_alphanumeric() || c.is_whitespace()) - .collect::() - .split_whitespace() - .collect::>() - .join(" "); - - // Generate ngrams - let char_ngram_set: HashSet = normalized - .chars() - .collect::>() - .windows(NGRAM_SIZE) - .map(|w| w.iter().collect::()) - .collect(); - - // Compute token frequency map - let tokens: Vec = normalized - .split_whitespace() - .map(|s| s.to_string()) - .collect(); - let mut token_frequency: HashMap = HashMap::new(); - for token in tokens { - *token_frequency.entry(token).or_insert(0) += 1; - } - - Self { - raw: pattern.to_string(), - char_ngram_set, - token_frequency, - } - } -} - -/// Helper to create a static slice of normalized patterns -fn normalize_patterns(patterns: &[&str]) -> Vec { - patterns.iter().map(|p| NormalizedPattern::new(p)).collect() -} - -// ============================================================================ -// Pre-computed Pattern Caches (initialized once at startup) -// ============================================================================ - -static REPAIR_PATTERNS: LazyLock> = LazyLock::new(|| { - normalize_patterns(&[ - // Explicit corrections - "i meant", - "i mean", - "sorry, i meant", - "what i meant was", - "what i actually meant", - "i was trying to say", - "let me correct that", - "correction", - "i misspoke", - // Negations and disagreements - "no, i", - "no i", - "nah i", - "nope i", - "not what i", - "that's not", - "that's not what", - "that isn't what", - "not quite", - "not exactly", - // Rephrasing indicators - "let me rephrase", - "let me try again", - "let me clarify", - "to clarify", - "to be clear", - "let me explain", - "what i'm trying to", - "what i'm saying", - "in other words", - // Actual/really emphasis - "actually i", - "actually no", - "what i actually", - "i actually", - "i really meant", - // Mistake acknowledgment - "i was wrong", - "my mistake", - "my bad", - "i should have said", - "i should clarify", - // Wait/hold indicators - "wait, i", - "wait no", - "hold on", - "hang on", - ]) -}); - -static COMPLAINT_PATTERNS: LazyLock> = LazyLock::new(|| { - normalize_patterns(&[ - // Useless/unhelpful (multi-word only) - "this is useless", - "not helpful", - "doesn't help", - "not helping", - "you're not helping", - "no help", - "unhelpful", - // Not working - "this doesn't work", - "doesn't work", - "not working", - "isn't working", - "won't work", - "still doesn't work", - "still not working", - // Not fixing/solving - "doesn't fix", - "not fixing", - "doesn't solve", - "doesn't seem to work", - "doesn't seem to fix", - "not resolving", - // Waste/pointless - "waste of time", - "wasting my time", - // Ridiculous/absurd - "this is ridiculous", - "ridiculous", - "this is absurd", - "absurd", - "this is insane", - "insane", - // Stupid/dumb (as adjectives, not as standalone tokens) - "this is stupid", - "this is dumb", - // Quality complaints (multi-word) - "this sucks", - "not good enough", - // Capability questions - "why can't you", - "can't you", - // Frustration - "this is frustrating", - "frustrated", - "incomplete", - "overwhelm", - "overwhelmed", - "overwhelming", - "exhausted", - "struggled", - // same issue - "same issue", - // polite dissatisfaction - "i'm disappointed", - "thanks, but", - "appreciate it, but", - "good, but", - // Fed up/done - "i give up", - "give up", - "fed up", - "had enough", - "can't take", - // Bot-specific complaints - "useless bot", - "dumb bot", - "stupid bot", - ]) -}); - -static CONFUSION_PATTERNS: LazyLock> = LazyLock::new(|| { - normalize_patterns(&[ - // Don't understand - "i don't understand", - "don't understand", - "not understanding", - "can't understand", - "don't get it", - "don't follow", - // Confused state - "i'm confused", - "so confused", - // Makes no sense - "makes no sense", - "doesn't make sense", - "not making sense", - // What do you mean (keep multi-word) - "what do you mean", - "what does that mean", - "what are you saying", - // Lost/unclear - "i'm lost", - "totally lost", - "lost me", - // No clue - "no clue", - "no idea", - // Come again - "come again", - "say that again", - "repeat that", - ]) -}); - -static GRATITUDE_PATTERNS: LazyLock> = LazyLock::new(|| { - normalize_patterns(&[ - // Standard gratitude - "thank you", - "thanks", - "thank u", - "thankyou", - "thx", - "ty", - "tyvm", - "tysm", - "thnx", - "thnks", - // Strong gratitude - "thanks so much", - "thank you so much", - "thanks a lot", - "thanks a bunch", - "much appreciated", - "really appreciate", - "greatly appreciate", - "appreciate it", - "appreciate that", - "i appreciate", - "grateful", - "so grateful", - // Helpfulness acknowledgment - "that's helpful", - "very helpful", - "super helpful", - "really helpful", - "that helps", - "this helps", - "helpful", - // Perfection expressions - "perfect", - "that's perfect", - "just perfect", - "exactly what i needed", - "exactly right", - "just what i needed", - "that's exactly", - // Informal positive - "you're the best", - "you rock", - "you're awesome", - "awesome sauce", - "legend", - ]) -}); - -static SATISFACTION_PATTERNS: LazyLock> = LazyLock::new(|| { - normalize_patterns(&[ - // Works/functions - "that works", - "this works", - "works great", - "works perfectly", - "works for me", - // Great variations - "that's great", - "that's amazing", - "this is great", - "sounds great", - "looks great", - "great job", - // Excellent/perfect - "excellent", - "outstanding", - "superb", - "spectacular", - // Awesome/amazing - "awesome", - "that's awesome", - "amazing", - "incredible", - // Love expressions - "love it", - "love this", - "i love", - "loving it", - "love that", - // Brilliant/wonderful - "brilliant", - "wonderful", - "fantastic", - "fabulous", - "marvelous", - ]) -}); - -static SUCCESS_PATTERNS: LazyLock> = LazyLock::new(|| { - normalize_patterns(&[ - // Understanding confirmation - "got it", - "i got it", - "understand", - "understood", - "i understand", - "makes sense", - "clear now", - "i see", - // Success/completion - "success", - "successful", - "it worked", - "that worked", - "this worked", - "worked", - // Problem resolution - "solved", - "resolved", - "fixed", - "fixed it", - "issue resolved", - "problem solved", - // Working state - "working now", - "it's working", - "works now", - "working fine", - "working great", - // Completion - "all set", - "all good", - "we're good", - "i'm good", - "all done", - "done", - "complete", - "finished", - // Perfect fit - "spot on", - "nailed it", - "bingo", - "exactly", - "just right", - ]) -}); - -static HUMAN_AGENT_PATTERNS: LazyLock> = LazyLock::new(|| { - normalize_patterns(&[ - // Speak to human - "speak to a human", - "speak to human", - "speak with a human", - "speak with human", - "talk to a human", - "talk to human", - "talk to a person", - "talk to person", - "talk to someone", - // Human/real agent - "human agent", - "real agent", - "actual agent", - "live agent", - "human support", - // Real/actual person - "real person", - "actual person", - "real human", - "actual human", - "someone real", - // Need/want human - "need a human", - "need human", - "want a human", - "want human", - "get me a human", - "get me human", - "get me someone", - // Transfer/connect - "transfer me", - "connect me", - "escalate this", - // Representative (removed standalone "rep" - too many false positives) - "representative", - "customer service rep", - "customer service representative", - // Not a bot - "not a bot", - "not talking to a bot", - "tired of bots", - ]) -}); - -static SUPPORT_PATTERNS: LazyLock> = LazyLock::new(|| { - normalize_patterns(&[ - // Contact support - "contact support", - "call support", - "reach support", - "get support", - // Customer support - "customer support", - "customer service", - "tech support", - "technical support", - // Help desk - "help desk", - "helpdesk", - "support desk", - // Talk to support - "talk to support", - "speak to support", - "speak with support", - "chat with support", - // Need help - "need real help", - "need actual help", - "help me now", - ]) -}); - -static QUIT_PATTERNS: LazyLock> = LazyLock::new(|| { - normalize_patterns(&[ - // Give up - "i give up", - "give up", - "giving up", - // Quit/leaving - "i'm going to quit", - "i quit", - "quitting", - "i'm leaving", - "i'm done", - "i'm out", - // Forget it - "forget it", - "forget this", - "screw it", - "screw this", - // Never mind - "never mind", - "nevermind", - "don't bother", - "not worth it", - // Hopeless - "this is hopeless", - // Going elsewhere - "going elsewhere", - "try somewhere else", - "look elsewhere", - "find another", - ]) -}); - -// ============================================================================ -// Core Signal Types -// ============================================================================ - -/// Overall quality assessment for an agent interaction session -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub enum InteractionQuality { - /// Excellent interaction with strong positive signals - Excellent, - /// Good interaction with mostly positive signals - Good, - /// Neutral interaction with mixed signals - Neutral, - /// Poor interaction with concerning signals - Poor, - /// Critical interaction with severe negative signals - Severe, -} - -/// Container for all computed signals for a conversation -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct SignalReport { - /// Turn count and efficiency metrics - pub turn_count: TurnCountSignal, - /// Follow-up and repair frequency - pub follow_up: FollowUpSignal, - /// User frustration indicators - pub frustration: FrustrationSignal, - /// Repetition and looping behavior - pub repetition: RepetitionSignal, - /// Positive feedback indicators - pub positive_feedback: PositiveFeedbackSignal, - /// User escalation requests - pub escalation: EscalationSignal, - /// Overall quality assessment - pub overall_quality: InteractionQuality, - /// Human-readable summary - pub summary: String, -} - -// ============================================================================ -// Individual Signal Types -// ============================================================================ - -/// Turn count and efficiency metrics -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct TurnCountSignal { - /// Total number of turns (user-agent exchanges) - pub total_turns: usize, - /// Number of user messages - pub user_turns: usize, - /// Number of assistant messages - pub assistant_turns: usize, - /// Whether the turn count is concerning (> 7) - pub is_concerning: bool, - /// Whether the turn count is excessive (> 12) - pub is_excessive: bool, - /// Efficiency score (0.0-1.0, lower turns = higher score) - pub efficiency_score: f64, -} - -/// Follow-up and repair frequency signal -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct FollowUpSignal { - /// Number of detected repair attempts - pub repair_count: usize, - /// Ratio of repairs to total user turns - pub repair_ratio: f64, - /// Whether repair ratio is concerning (> 0.3) - pub is_concerning: bool, - /// List of detected repair phrases - pub repair_phrases: Vec, -} - -/// User frustration indicators -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct FrustrationSignal { - /// Number of frustration indicators detected - pub frustration_count: usize, - /// Whether frustration is detected - pub has_frustration: bool, - /// Severity level (0-3: none, mild, moderate, severe) - pub severity: u8, - /// List of detected frustration indicators - pub indicators: Vec, -} - -/// Individual frustration indicator -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct FrustrationIndicator { - /// Type of frustration detected - pub indicator_type: FrustrationType, - /// Message index where detected - pub message_index: usize, - /// Relevant text snippet - pub snippet: String, -} - -/// Types of frustration indicators -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub enum FrustrationType { - /// Negative sentiment detected - NegativeSentiment, - /// All caps typing - AllCaps, - /// Excessive punctuation - ExcessivePunctuation, - /// Profanity detected - Profanity, - /// Direct complaint - DirectComplaint, - /// Expression of confusion - Confusion, -} - -/// Repetition and looping behavior signal -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct RepetitionSignal { - /// Number of repetitions detected - pub repetition_count: usize, - /// Whether significant looping detected (> 2 repetitions) - pub has_looping: bool, - /// Severity level (0-3: none, mild, moderate, severe) - pub severity: u8, - /// List of detected repetitions - pub repetitions: Vec, -} - -/// Individual repetition instance -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct RepetitionInstance { - /// Message indices involved in repetition - pub message_indices: Vec, - /// Similarity score (0.0-1.0) - pub similarity: f64, - /// Type of repetition - pub repetition_type: RepetitionType, -} - -/// Types of repetition -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub enum RepetitionType { - /// Exact repetition - Exact, - /// Near-duplicate (high similarity) - NearDuplicate, - /// Semantic repetition (similar meaning) - Semantic, -} - -/// Positive feedback indicators -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PositiveFeedbackSignal { - /// Number of positive indicators detected - pub positive_count: usize, - /// Whether positive feedback is present - pub has_positive_feedback: bool, - /// Confidence score (0.0-1.0) - pub confidence: f64, - /// List of detected positive indicators - pub indicators: Vec, -} - -/// Individual positive indicator -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PositiveIndicator { - /// Type of positive feedback - pub indicator_type: PositiveType, - /// Message index where detected - pub message_index: usize, - /// Relevant text snippet - pub snippet: String, -} - -/// Types of positive indicators -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub enum PositiveType { - /// Expression of gratitude - Gratitude, - /// Explicit satisfaction - Satisfaction, - /// Confirmation of success - Success, - /// Positive sentiment - PositiveSentiment, - /// Natural topic transition - TopicTransition, -} - -/// User escalation signal -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct EscalationSignal { - /// Whether escalation was requested - pub escalation_requested: bool, - /// Number of escalation requests - pub escalation_count: usize, - /// List of detected escalation requests - pub requests: Vec, -} - -/// Individual escalation request -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct EscalationRequest { - /// Message index where detected - pub message_index: usize, - /// Relevant text snippet - pub snippet: String, - /// Type of escalation - pub escalation_type: EscalationType, -} - -/// Types of escalation -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub enum EscalationType { - /// Request for human agent - HumanAgent, - /// Request for support - Support, - /// Threat to quit/leave - ThreatToQuit, - /// General help request - HelpRequest, -} - -// ============================================================================ -// Signal Analyzer -// ============================================================================ - -/// Trait for analyzing conversation signals -pub trait SignalAnalyzer { - /// Analyze a conversation and generate a complete signal report - fn analyze(&self, messages: &[Message]) -> SignalReport; -} - -/// Text-based implementation of signal analyzer that computes all signals from a message array -pub struct TextBasedSignalAnalyzer { - /// Baseline expected turns for normal interactions - baseline_turns: usize, - /// Threshold for character ngram similarity (0.0-1.0) - char_ngram_threshold: f64, - /// Threshold for token cosine similarity (0.0-1.0) - token_cosine_threshold: f64, - /// Maximum message length in characters (prevents unbounded computation) - max_message_length: usize, - /// Maximum number of messages to process (prevents unbounded computation) - max_messages: usize, - /// Maximum window size for repetition detection (prevents O(n²) explosion) - max_repetition_window: usize, -} - -impl TextBasedSignalAnalyzer { - /// Extract text content from MessageContent, skipping non-text content - fn extract_text(content: &Option) -> Option { - match content { - Some(hermesllm::apis::openai::MessageContent::Text(text)) => Some(text.clone()), - // Tool calls and other structured content are skipped - _ => None, - } - } - - /// Create a new signal analyzer with default settings - pub fn new() -> Self { +impl Default for SignalAnalyzerConfig { + fn default() -> Self { Self { baseline_turns: 5, - char_ngram_threshold: 0.50, // Lowered to handle typos and small edits realistically - token_cosine_threshold: 0.60, // Lowered for better semantic match in varied contexts - max_message_length: 2000, // Prevent unbounded ngram generation - max_messages: 100, // Prevent unbounded message processing - max_repetition_window: 20, // Prevent O(n²) explosion in repetition detection - } - } - - /// Create a new signal analyzer with custom baseline - pub fn with_baseline(baseline_turns: usize) -> Self { - Self { - baseline_turns, - char_ngram_threshold: 0.50, + char_ngram_threshold: 0.65, token_cosine_threshold: 0.60, max_message_length: 2000, max_messages: 100, - max_repetition_window: 20, } } - - /// Create a new signal analyzer with custom settings - /// - /// # Arguments - /// * `baseline_turns` - Expected baseline turns for normal interactions - /// * `char_ngram_threshold` - Threshold for character ngram similarity (0.0-1.0) - /// * `token_cosine_threshold` - Threshold for token cosine similarity (0.0-1.0) - pub fn with_settings( - baseline_turns: usize, - char_ngram_threshold: f64, - token_cosine_threshold: f64, - ) -> Self { - Self { - baseline_turns, - char_ngram_threshold, - token_cosine_threshold, - max_message_length: 2000, - max_messages: 100, - max_repetition_window: 20, - } - } - - /// Create a new signal analyzer with full custom settings including computation limits - /// - /// # Arguments - /// * `baseline_turns` - Expected baseline turns for normal interactions - /// * `char_ngram_threshold` - Threshold for character ngram similarity (0.0-1.0) - /// * `token_cosine_threshold` - Threshold for token cosine similarity (0.0-1.0) - /// * `max_message_length` - Maximum characters per message to process - /// * `max_messages` - Maximum number of messages to process - /// * `max_repetition_window` - Maximum messages to compare for repetition detection - pub fn with_full_settings( - baseline_turns: usize, - char_ngram_threshold: f64, - token_cosine_threshold: f64, - max_message_length: usize, - max_messages: usize, - max_repetition_window: usize, - ) -> Self { - Self { - baseline_turns, - char_ngram_threshold, - token_cosine_threshold, - max_message_length, - max_messages, - max_repetition_window, - } - } - - // ======================================================================== - // Individual Signal Analyzers - // ======================================================================== - - /// Analyze turn count and efficiency - fn analyze_turn_count(&self, messages: &[Message]) -> TurnCountSignal { - let mut user_turns = 0; - let mut assistant_turns = 0; - - for message in messages { - match message.role { - Role::User => user_turns += 1, - Role::Assistant => assistant_turns += 1, - _ => {} - } - } - - let total_turns = user_turns + assistant_turns; - let is_concerning = total_turns > 7; - let is_excessive = total_turns > 12; - - // Calculate efficiency score (exponential decay after baseline) - let efficiency_score = if total_turns == 0 || total_turns <= self.baseline_turns { - 1.0 - } else { - let excess = total_turns - self.baseline_turns; - 1.0 / (1.0 + (excess as f64 * 0.3)) - }; - - TurnCountSignal { - total_turns, - user_turns, - assistant_turns, - is_concerning, - is_excessive, - efficiency_score, - } - } - - /// Analyze follow-up and repair frequency - fn analyze_follow_up( - &self, - normalized_messages: &[(usize, Role, NormalizedMessage)], - ) -> FollowUpSignal { - let mut repair_count = 0; - let mut repair_phrases = Vec::new(); - let mut user_turn_count = 0; - - for (pos, (i, role, norm_msg)) in normalized_messages.iter().enumerate() { - if *role != Role::User { - continue; - } - - user_turn_count += 1; - - // Use per-turn boolean to prevent double-counting - let mut found_in_turn = false; - - // Use pre-computed patterns for fast matching - for pattern in REPAIR_PATTERNS.iter() { - if norm_msg.matches_normalized_pattern( - pattern, - self.char_ngram_threshold, - self.token_cosine_threshold, - ) { - repair_count += 1; - repair_phrases.push(format!("Turn {}: '{}'", i + 1, pattern.raw)); - found_in_turn = true; - break; - } - } - - // Only check for semantic similarity if no pattern matched. Walk - // backwards through the *normalized* list (not the original - // conversation indices, which may be non-contiguous because - // messages without extractable text are filtered out) to find the - // most recent prior user message. - if !found_in_turn && pos >= 1 { - for j in (0..pos).rev() { - let (_, prev_role, prev_norm_msg) = &normalized_messages[j]; - if *prev_role == Role::User { - if self.is_similar_rephrase(norm_msg, prev_norm_msg) { - repair_count += 1; - repair_phrases - .push(format!("Turn {}: Similar rephrase detected", i + 1)); - } - break; - } - } - } - } - - let repair_ratio = if user_turn_count == 0 { - 0.0 - } else { - repair_count as f64 / user_turn_count as f64 - }; - - let is_concerning = repair_ratio > 0.3; - - FollowUpSignal { - repair_count, - repair_ratio, - is_concerning, - repair_phrases, - } - } - - /// Analyze user frustration indicators - fn analyze_frustration( - &self, - normalized_messages: &[(usize, Role, NormalizedMessage)], - ) -> FrustrationSignal { - let mut indicators = Vec::new(); - - // Profanity list - only as standalone tokens, not substrings - let profanity_tokens = [ - "damn", "damnit", "crap", "wtf", "ffs", "bullshit", "shit", "fuck", "fucking", - ]; - - for (i, role, norm_msg) in normalized_messages { - if *role != Role::User { - continue; - } - - let text = &norm_msg.raw; - - // Check for all caps (at least 10 chars and 80% uppercase) - let alpha_chars: String = text.chars().filter(|c| c.is_alphabetic()).collect(); - if alpha_chars.len() >= 10 { - let upper_count = alpha_chars.chars().filter(|c| c.is_uppercase()).count(); - let upper_ratio = upper_count as f64 / alpha_chars.len() as f64; - if upper_ratio >= 0.8 { - indicators.push(FrustrationIndicator { - indicator_type: FrustrationType::AllCaps, - message_index: *i, - snippet: text.chars().take(50).collect(), - }); - } - } - - // Check for excessive punctuation - let question_marks = text.matches('?').count(); - let exclamation_marks = text.matches('!').count(); - if question_marks >= 3 || exclamation_marks >= 3 { - indicators.push(FrustrationIndicator { - indicator_type: FrustrationType::ExcessivePunctuation, - message_index: *i, - snippet: text.chars().take(50).collect(), - }); - } - - // Check for complaint patterns using pre-computed patterns - for pattern in COMPLAINT_PATTERNS.iter() { - if norm_msg.matches_normalized_pattern( - pattern, - self.char_ngram_threshold, - self.token_cosine_threshold, - ) { - indicators.push(FrustrationIndicator { - indicator_type: FrustrationType::DirectComplaint, - message_index: *i, - snippet: pattern.raw.clone(), - }); - break; - } - } - - // Check for confusion patterns using pre-computed patterns - for pattern in CONFUSION_PATTERNS.iter() { - if norm_msg.matches_normalized_pattern( - pattern, - self.char_ngram_threshold, - self.token_cosine_threshold, - ) { - indicators.push(FrustrationIndicator { - indicator_type: FrustrationType::Confusion, - message_index: *i, - snippet: pattern.raw.clone(), - }); - break; - } - } - - // Check for profanity (token-based, not substring) - for token in &profanity_tokens { - if norm_msg.contains_token(token) { - indicators.push(FrustrationIndicator { - indicator_type: FrustrationType::Profanity, - message_index: *i, - snippet: token.to_string(), - }); - break; - } - } - } - - let frustration_count = indicators.len(); - let has_frustration = frustration_count > 0; - - // Calculate severity - let severity = if frustration_count == 0 { - 0 - } else if frustration_count <= 2 { - 1 - } else if frustration_count <= 4 { - 2 - } else { - 3 - }; - - FrustrationSignal { - frustration_count, - has_frustration, - severity, - indicators, - } - } - - /// Analyze repetition and looping behavior - fn analyze_repetition( - &self, - normalized_messages: &[(usize, Role, NormalizedMessage)], - ) -> RepetitionSignal { - let mut repetitions = Vec::new(); - - // Collect assistant messages with normalized content - let assistant_messages: Vec<(usize, &NormalizedMessage)> = normalized_messages - .iter() - .filter(|(_, role, _)| *role == Role::Assistant) - .map(|(i, _, norm_msg)| (*i, norm_msg)) - .collect(); - - // Limit the window size to prevent O(n²) explosion - // Only compare messages within the max_repetition_window - let window_size = self.max_repetition_window.min(assistant_messages.len()); - - // Check for exact or near-duplicate responses using bigram similarity - // Only compare within the sliding window - for i in 0..assistant_messages.len() { - let window_start = i + 1; - let window_end = (i + 1 + window_size).min(assistant_messages.len()); - - for j in window_start..window_end { - let (idx_i, norm_msg_i) = &assistant_messages[i]; - let (idx_j, norm_msg_j) = &assistant_messages[j]; - - // Skip if messages are too short - if norm_msg_i.tokens.len() < 5 || norm_msg_j.tokens.len() < 5 { - continue; - } - - // Calculate bigram-based similarity (more accurate for near-duplicates) - let similarity = self.calculate_bigram_similarity(norm_msg_i, norm_msg_j); - - // Exact match - lowered from 0.95 to 0.85 for bigram similarity - if similarity >= 0.85 { - repetitions.push(RepetitionInstance { - message_indices: vec![*idx_i, *idx_j], - similarity, - repetition_type: RepetitionType::Exact, - }); - } - // Near duplicate - lowered from 0.75 to 0.50 to catch subtle repetitions - else if similarity >= 0.50 { - repetitions.push(RepetitionInstance { - message_indices: vec![*idx_i, *idx_j], - similarity, - repetition_type: RepetitionType::NearDuplicate, - }); - } - } - } - - let repetition_count = repetitions.len(); - let has_looping = repetition_count > 2; - - let severity = if repetition_count == 0 { - 0 - } else if repetition_count <= 2 { - 1 - } else if repetition_count <= 4 { - 2 - } else { - 3 - }; - - RepetitionSignal { - repetition_count, - has_looping, - severity, - repetitions, - } - } - - /// Calculate bigram similarity using cached bigram sets - fn calculate_bigram_similarity( - &self, - norm_msg1: &NormalizedMessage, - norm_msg2: &NormalizedMessage, - ) -> f64 { - // Use pre-cached bigram sets for O(1) lookups - let set1 = &norm_msg1.bigram_set; - let set2 = &norm_msg2.bigram_set; - - if set1.is_empty() && set2.is_empty() { - return 1.0; // Both empty = identical - } - - if set1.is_empty() || set2.is_empty() { - return 0.0; - } - - let intersection = set1.intersection(set2).count(); - let union = set1.union(set2).count(); - - if union == 0 { - return 0.0; - } - - intersection as f64 / union as f64 - } - - /// Analyze positive feedback indicators - fn analyze_positive_feedback( - &self, - normalized_messages: &[(usize, Role, NormalizedMessage)], - ) -> PositiveFeedbackSignal { - let mut indicators = Vec::new(); - - for (i, role, norm_msg) in normalized_messages { - if *role != Role::User { - continue; - } - - // Use per-turn boolean to prevent double-counting - let mut found_in_turn = false; - - // Check gratitude using pre-computed patterns - for pattern in GRATITUDE_PATTERNS.iter() { - if norm_msg.matches_normalized_pattern( - pattern, - self.char_ngram_threshold, - self.token_cosine_threshold, - ) { - indicators.push(PositiveIndicator { - indicator_type: PositiveType::Gratitude, - message_index: *i, - snippet: pattern.raw.clone(), - }); - found_in_turn = true; - break; - } - } - - if found_in_turn { - continue; - } - - // Check satisfaction using pre-computed patterns - for pattern in SATISFACTION_PATTERNS.iter() { - if norm_msg.matches_normalized_pattern( - pattern, - self.char_ngram_threshold, - self.token_cosine_threshold, - ) { - indicators.push(PositiveIndicator { - indicator_type: PositiveType::Satisfaction, - message_index: *i, - snippet: pattern.raw.clone(), - }); - found_in_turn = true; - break; - } - } - - if found_in_turn { - continue; - } - - // Check success confirmation using pre-computed patterns - for pattern in SUCCESS_PATTERNS.iter() { - if norm_msg.matches_normalized_pattern( - pattern, - self.char_ngram_threshold, - self.token_cosine_threshold, - ) { - indicators.push(PositiveIndicator { - indicator_type: PositiveType::Success, - message_index: *i, - snippet: pattern.raw.clone(), - }); - break; - } - } - } - - let positive_count = indicators.len(); - let has_positive_feedback = positive_count > 0; - - // Calculate confidence based on number and diversity of indicators - let confidence = if positive_count == 0 { - 0.0 - } else if positive_count == 1 { - 0.6 - } else if positive_count == 2 { - 0.8 - } else { - 0.95 - }; - - PositiveFeedbackSignal { - positive_count, - has_positive_feedback, - confidence, - indicators, - } - } - - /// Analyze user escalation requests - fn analyze_escalation( - &self, - normalized_messages: &[(usize, Role, NormalizedMessage)], - ) -> EscalationSignal { - let mut requests = Vec::new(); - - for (i, role, norm_msg) in normalized_messages { - if *role != Role::User { - continue; - } - - let mut found_human_agent = false; - - // Check for human agent request using pre-computed patterns - for pattern in HUMAN_AGENT_PATTERNS.iter() { - if norm_msg.matches_normalized_pattern( - pattern, - self.char_ngram_threshold, - self.token_cosine_threshold, - ) { - requests.push(EscalationRequest { - message_index: *i, - snippet: pattern.raw.clone(), - escalation_type: EscalationType::HumanAgent, - }); - found_human_agent = true; - break; - } - } - - // Check for support request (only if no human agent request found) - // HumanAgent and Support are too similar and often match the same phrase - if !found_human_agent { - for pattern in SUPPORT_PATTERNS.iter() { - if norm_msg.matches_normalized_pattern( - pattern, - self.char_ngram_threshold, - self.token_cosine_threshold, - ) { - requests.push(EscalationRequest { - message_index: *i, - snippet: pattern.raw.clone(), - escalation_type: EscalationType::Support, - }); - break; - } - } - } - - // Check for quit threats (independent of HumanAgent/Support) - // A message can contain both "give up" (quit) and "speak to human" (escalation) - for pattern in QUIT_PATTERNS.iter() { - if norm_msg.matches_normalized_pattern( - pattern, - self.char_ngram_threshold, - self.token_cosine_threshold, - ) { - requests.push(EscalationRequest { - message_index: *i, - snippet: pattern.raw.clone(), - escalation_type: EscalationType::ThreatToQuit, - }); - break; - } - } - } - - let escalation_count = requests.len(); - let escalation_requested = escalation_count > 0; - - EscalationSignal { - escalation_requested, - escalation_count, - requests, - } - } - - // ======================================================================== - // Helper Methods - // ======================================================================== - - /// Check if two messages are similar rephrases - fn is_similar_rephrase( - &self, - norm_msg1: &NormalizedMessage, - norm_msg2: &NormalizedMessage, - ) -> bool { - // Skip if too short - if norm_msg1.tokens.len() < 3 || norm_msg2.tokens.len() < 3 { - return false; - } - - // Common stopwords to downweight - let stopwords: HashSet<&str> = [ - "i", "me", "my", "you", "the", "a", "an", "is", "are", "was", "were", "to", "with", - "for", "of", "at", "by", "in", "on", "it", "this", "that", "can", "could", "do", - "does", "did", "will", "would", "should", "be", - ] - .iter() - .cloned() - .collect(); - - // Filter out stopwords for meaningful overlap - let tokens1: HashSet<_> = norm_msg1 - .tokens - .iter() - .filter(|t| !stopwords.contains(t.as_str())) - .collect(); - let tokens2: HashSet<_> = norm_msg2 - .tokens - .iter() - .filter(|t| !stopwords.contains(t.as_str())) - .collect(); - - // Need at least 2 non-stopword tokens - if tokens1.len() < 2 || tokens2.len() < 2 { - return false; - } - - let intersection = tokens1.intersection(&tokens2).count(); - let min_size = tokens1.len().min(tokens2.len()); - - // High overlap suggests rephrase - let overlap_ratio = intersection as f64 / min_size as f64; - overlap_ratio >= 0.6 - } - - /// Assess overall interaction quality based on all signals - fn assess_overall_quality( - &self, - turn_count: &TurnCountSignal, - follow_up: &FollowUpSignal, - frustration: &FrustrationSignal, - repetition: &RepetitionSignal, - positive: &PositiveFeedbackSignal, - escalation: &EscalationSignal, - ) -> InteractionQuality { - // Critical conditions - immediate fail - if escalation.escalation_requested - || frustration.severity >= 3 - || repetition.severity >= 3 - || turn_count.is_excessive - { - return InteractionQuality::Severe; - } - - // Calculate quality score - let mut score = 50.0; // Start at neutral - - // Positive factors - if positive.has_positive_feedback { - score += 20.0 * positive.confidence; - } - score += turn_count.efficiency_score * 10.0; - - // Negative factors - if frustration.has_frustration { - score -= frustration.severity as f64 * 10.00; - } - if follow_up.is_concerning { - score -= 15.0; - } - if repetition.has_looping { - score -= repetition.severity as f64 * 8.0; - } - if turn_count.is_concerning { - score -= 10.0; - } - - // Map score to quality level - if score >= 75.0 { - InteractionQuality::Excellent - } else if score >= 60.0 { - InteractionQuality::Good - } else if score >= 40.0 { - InteractionQuality::Neutral - } else if score >= 25.0 { - InteractionQuality::Poor - } else { - InteractionQuality::Severe - } - } - - /// Generate human-readable summary - #[allow(clippy::too_many_arguments)] - fn generate_summary( - &self, - turn_count: &TurnCountSignal, - follow_up: &FollowUpSignal, - frustration: &FrustrationSignal, - repetition: &RepetitionSignal, - positive: &PositiveFeedbackSignal, - escalation: &EscalationSignal, - quality: &InteractionQuality, - ) -> String { - let mut summary_parts = Vec::new(); - - summary_parts.push(format!("Overall Quality: {:?}", quality)); - - summary_parts.push(format!( - "Turn Count: {} turns (efficiency: {:.1}%)", - turn_count.total_turns, - turn_count.efficiency_score * 100.0 - )); - - if follow_up.is_concerning { - summary_parts.push(format!( - "⚠️ High repair rate: {:.1}% of user turns", - follow_up.repair_ratio * 100.0 - )); - } - - if frustration.has_frustration { - summary_parts.push(format!( - "⚠️ Frustration detected: {} indicators (severity: {})", - frustration.frustration_count, frustration.severity - )); - } - - if repetition.has_looping { - summary_parts.push(format!( - "⚠️ Looping detected: {} repetitions", - repetition.repetition_count - )); - } - - if positive.has_positive_feedback { - summary_parts.push(format!( - "✓ Positive feedback: {} indicators", - positive.positive_count - )); - } - - if escalation.escalation_requested { - summary_parts.push(format!( - "⚠️ Escalation requested: {} requests", - escalation.escalation_count - )); - } - - summary_parts.join(" | ") - } } -impl SignalAnalyzer for TextBasedSignalAnalyzer { - fn analyze(&self, messages: &[Message]) -> SignalReport { - // Limit the number of messages to process (take most recent messages) - let messages_to_process = if messages.len() > self.max_messages { - &messages[messages.len() - self.max_messages..] +/// Top-level analyzer. +pub struct SignalAnalyzer { + cfg: SignalAnalyzerConfig, +} + +impl Default for SignalAnalyzer { + fn default() -> Self { + Self::new(SignalAnalyzerConfig::default()) + } +} + +impl SignalAnalyzer { + pub fn new(cfg: SignalAnalyzerConfig) -> Self { + Self { cfg } + } + + /// Run the full multi-layer analysis on a ShareGPT-shaped conversation. + pub fn analyze_sharegpt(&self, messages: &[ShareGptMessage<'_>]) -> SignalReport { + // Truncate to the last `max_messages` (last-N is what the Python does). + let slice: &[ShareGptMessage<'_>] = if messages.len() > self.cfg.max_messages { + &messages[messages.len() - self.cfg.max_messages..] } else { messages }; + let offset = messages.len().saturating_sub(slice.len()); - // Preprocess all messages once, filtering out non-text content (tool calls, etc.) - // and truncating long messages - let normalized_messages: Vec<(usize, Role, NormalizedMessage)> = messages_to_process + // Preprocess to absolute-indexed normalized human/gpt messages. + let normalized_owned: Vec<(usize, &str, NormalizedMessage)> = slice .iter() .enumerate() - .filter_map(|(i, msg)| { - Self::extract_text(&msg.content).map(|text| { - ( - i, - msg.role.clone(), - NormalizedMessage::from_text_with_limit(&text, self.max_message_length), - ) - }) + .filter_map(|(i, m)| { + if (m.from == "human" || m.from == "gpt") && !m.value.is_empty() { + Some(( + offset + i, + m.from, + NormalizedMessage::from_text(m.value, self.cfg.max_message_length), + )) + } else { + None + } }) .collect(); - let turn_count = self.analyze_turn_count(messages_to_process); - let follow_up = self.analyze_follow_up(&normalized_messages); - let frustration = self.analyze_frustration(&normalized_messages); - let repetition = self.analyze_repetition(&normalized_messages); - let positive_feedback = self.analyze_positive_feedback(&normalized_messages); - let escalation = self.analyze_escalation(&normalized_messages); - - let overall_quality = self.assess_overall_quality( - &turn_count, - &follow_up, - &frustration, - &repetition, - &positive_feedback, - &escalation, + let misalignment = analyze_misalignment( + &normalized_owned, + self.cfg.char_ngram_threshold, + self.cfg.token_cosine_threshold, ); - let summary = self.generate_summary( - &turn_count, - &follow_up, - &frustration, - &repetition, - &positive_feedback, - &escalation, - &overall_quality, + let stagnation_input: Vec> = + slice.iter().map(|m| ShareGptMsg { from: m.from }).collect(); + let (mut stagnation, turn_metrics) = analyze_stagnation( + &stagnation_input, + &normalized_owned, + self.cfg.baseline_turns, + ); + + let disengagement = analyze_disengagement( + &normalized_owned, + self.cfg.char_ngram_threshold, + self.cfg.token_cosine_threshold, + ); + + let satisfaction = analyze_satisfaction( + &normalized_owned, + self.cfg.char_ngram_threshold, + self.cfg.token_cosine_threshold, + ); + + let failure = analyze_failure(slice); + let loops = analyze_loops(slice); + let exhaustion = analyze_exhaustion(slice); + + // Bias the dragging signal's message_index back into absolute coords. + for s in &mut stagnation.signals { + s.message_index = offset + s.message_index.min(slice.len().saturating_sub(1)); + } + + let interaction = InteractionSignals { + misalignment, + stagnation, + disengagement, + satisfaction, + }; + let execution = ExecutionSignals { failure, loops }; + let environment = EnvironmentSignals { exhaustion }; + + let (overall_quality, score) = assess_quality( + &interaction, + &execution, + &environment, + turn_metrics.user_turns, + ); + let summary = generate_summary( + &turn_metrics, + &interaction, + &execution, + &environment, + overall_quality, ); SignalReport { - turn_count, - follow_up, - frustration, - repetition, - positive_feedback, - escalation, + interaction, + execution, + environment, overall_quality, + quality_score: score, + turn_metrics, summary, } } -} -impl Default for TextBasedSignalAnalyzer { - fn default() -> Self { - Self::new() + /// Convenience entry point: convert OpenAI-shaped chat `Message`s into the + /// ShareGPT format the detectors operate on, then run analysis. + pub fn analyze_openai(&self, messages: &[Message]) -> SignalReport { + let owned = messages_to_sharegpt(messages); + let view: Vec> = owned + .iter() + .map(|(role, value)| ShareGptMessage { + from: role.as_str(), + value: value.as_str(), + }) + .collect(); + self.analyze_sharegpt(&view) } } -// ============================================================================ -// Tests -// ============================================================================ +/// Convert OpenAI-shaped messages to a sequence of ShareGPT +/// `(role, value)` pairs. +/// +/// Mapping (preserves original message order; tool calls are emitted as a +/// separate `function_call` row immediately after the assistant text): +/// +/// - `User` -> `("human", text)` +/// - `Assistant` -> `("gpt", text)`, then one `("function_call", json)` per tool call +/// - `Tool` -> `("observation", text)` +/// - `System` / `Developer` -> dropped (not analyzed) +pub fn messages_to_sharegpt(messages: &[Message]) -> Vec<(String, String)> { + let mut out: Vec<(String, String)> = Vec::with_capacity(messages.len()); + for m in messages { + match m.role { + Role::User => { + let text = m.content.extract_text(); + out.push(("human".to_string(), text)); + } + Role::Assistant => { + let text = m.content.extract_text(); + if !text.is_empty() { + out.push(("gpt".to_string(), text)); + } + if let Some(calls) = &m.tool_calls { + for call in calls { + let payload = serde_json::json!({ + "name": call.function.name, + "arguments": call.function.arguments, + }); + out.push(("function_call".to_string(), payload.to_string())); + } + } + } + Role::Tool => { + let text = m.content.extract_text(); + out.push(("observation".to_string(), text)); + } + Role::System | Role::Developer => {} + } + } + out +} + +// --------------------------------------------------------------------------- +// Quality scoring (mirrors `_assess_quality` in the reference) +// --------------------------------------------------------------------------- + +fn assess_quality( + interaction: &InteractionSignals, + execution: &ExecutionSignals, + environment: &EnvironmentSignals, + user_turns: usize, +) -> (InteractionQuality, f32) { + // Critical: explicit escalation/quit OR severe disengagement OR severe stagnation. + let has_escalation_or_quit = interaction.disengagement.signals.iter().any(|s| { + matches!( + s.signal_type, + SignalType::DisengagementEscalation | SignalType::DisengagementQuit + ) + }); + if (interaction.disengagement.count > 0 && has_escalation_or_quit) + || interaction.disengagement.severity >= 3 + || interaction.stagnation.severity >= 3 + { + return (InteractionQuality::Severe, 0.0); + } + + let mut score: f32 = 50.0; + + if interaction.satisfaction.count > 0 { + let confidence = match interaction.satisfaction.count { + 1 => 0.6, + 2 => 0.8, + _ => 0.95, + }; + score += 20.0 * confidence; + } + + if interaction.disengagement.count > 0 { + score -= interaction.disengagement.severity as f32 * 10.0; + } + if interaction.misalignment.severity > 0 { + let denom = user_turns.max(1) as f32; + if interaction.misalignment.count as f32 / denom > 0.3 { + score -= 15.0; + } + } + if interaction.stagnation.count > 2 { + score -= interaction.stagnation.severity as f32 * 8.0; + } + + if execution.failure.count > 0 { + score -= execution.failure.count as f32 * 8.0; + } + if execution.loops.count > 0 { + score -= execution.loops.count as f32 * 5.0; + } + if environment.exhaustion.count > 0 { + score -= environment.exhaustion.count as f32 * 3.0; + } + + score = score.clamp(0.0, 100.0); + + let quality = if score >= 75.0 { + InteractionQuality::Excellent + } else if score >= 60.0 { + InteractionQuality::Good + } else if score >= 40.0 { + InteractionQuality::Neutral + } else if score >= 25.0 { + InteractionQuality::Poor + } else { + InteractionQuality::Severe + }; + (quality, score) +} + +fn generate_summary( + turn_metrics: &TurnMetrics, + interaction: &InteractionSignals, + execution: &ExecutionSignals, + environment: &EnvironmentSignals, + quality: InteractionQuality, +) -> String { + let mut parts: Vec = Vec::new(); + parts.push(format!("Overall Quality: {}", quality.as_str())); + parts.push(format!( + "Turn Count: {} turns (efficiency: {:.1}%)", + turn_metrics.total_turns, + turn_metrics.efficiency_score * 100.0 + )); + + if interaction.misalignment.count > 0 { + let denom = turn_metrics.user_turns.max(1) as f32; + let repair_ratio = interaction.misalignment.count as f32 / denom; + if repair_ratio > 0.3 { + parts.push(format!( + "High misalignment rate: {:.1}% of user turns", + repair_ratio * 100.0 + )); + } + } + + if interaction.disengagement.count > 0 { + parts.push(format!( + "Disengagement detected: {} indicators (severity: {})", + interaction.disengagement.count, interaction.disengagement.severity + )); + } + + if interaction.stagnation.count > 2 { + parts.push(format!( + "Looping detected: {} repetitions", + interaction.stagnation.count + )); + } + + if interaction.satisfaction.count > 0 { + parts.push(format!( + "Positive feedback: {} indicators", + interaction.satisfaction.count + )); + } + + if execution.failure.count > 0 { + parts.push(format!( + "Execution failures: {} (agent-caused)", + execution.failure.count + )); + } + + if environment.exhaustion.count > 0 { + parts.push(format!( + "Environment issues: {} (external)", + environment.exhaustion.count + )); + } + + let escalation_count = interaction + .disengagement + .signals + .iter() + .filter(|s| matches!(s.signal_type, SignalType::DisengagementEscalation)) + .count(); + if escalation_count > 0 { + parts.push(format!( + "Escalation requested: {} requests", + escalation_count + )); + } + + parts.join(" | ") +} #[cfg(test)] mod tests { use super::*; - use hermesllm::apis::openai::MessageContent; - use hermesllm::transforms::lib::ExtractText; - use std::time::Instant; + use hermesllm::apis::openai::{Message, MessageContent, Role}; + #[allow(unused_imports)] + use hermesllm::transforms::ExtractText; - fn create_message(role: Role, content: &str) -> Message { + fn user(t: &str) -> Message { Message { - role, - content: Some(MessageContent::Text(content.to_string())), + role: Role::User, + content: Some(MessageContent::Text(t.to_string())), + name: None, + tool_calls: None, + tool_call_id: None, + } + } + fn assistant(t: &str) -> Message { + Message { + role: Role::Assistant, + content: Some(MessageContent::Text(t.to_string())), name: None, tool_calls: None, tool_call_id: None, } } - // ======================================================================== - // Tests for New Similarity Methods - // ======================================================================== - #[test] - fn test_char_ngram_similarity_exact_match() { - let msg = NormalizedMessage::from_text("thank you very much"); - let similarity = msg.char_ngram_similarity("thank you very much"); - assert!( - similarity > 0.95, - "Exact match should have very high similarity" - ); + fn report_quality_neutral_for_short_clean_chat() { + let msgs = vec![ + user("Hello, can you help me with a question?"), + assistant("Of course, what's your question?"), + user("How does X work?"), + assistant("X works by ..."), + ]; + let r = SignalAnalyzer::default().analyze_openai(&msgs); + assert!(matches!( + r.overall_quality, + InteractionQuality::Neutral | InteractionQuality::Good | InteractionQuality::Excellent + )); + assert!(r.summary.starts_with("Overall Quality:")); } #[test] - fn test_char_ngram_similarity_typo() { - let msg = NormalizedMessage::from_text("thank you very much"); - // Common typo: "thnks" instead of "thanks" - let similarity = msg.char_ngram_similarity("thnks you very much"); - assert!( - similarity > 0.50, - "Should handle single-character typo with decent similarity: {}", - similarity - ); - } - - #[test] - fn test_char_ngram_similarity_small_edit() { - let msg = NormalizedMessage::from_text("this doesn't work"); - let similarity = msg.char_ngram_similarity("this doesnt work"); - assert!( - similarity > 0.70, - "Should handle punctuation removal gracefully: {}", - similarity - ); - } - - #[test] - fn test_char_ngram_similarity_word_insertion() { - let msg = NormalizedMessage::from_text("i don't understand"); - let similarity = msg.char_ngram_similarity("i really don't understand"); - assert!( - similarity > 0.40, - "Should be robust to word insertions: {}", - similarity - ); - } - - #[test] - fn test_token_cosine_similarity_exact_match() { - let msg = NormalizedMessage::from_text("this is not helpful"); - let similarity = msg.token_cosine_similarity("this is not helpful"); - assert!( - (similarity - 1.0).abs() < 0.01, - "Exact match should have cosine similarity of 1.0" - ); - } - - #[test] - fn test_token_cosine_similarity_word_order() { - let msg = NormalizedMessage::from_text("not helpful at all"); - let similarity = msg.token_cosine_similarity("helpful not at all"); - assert!( - similarity > 0.95, - "Should be robust to word order changes: {}", - similarity - ); - } - - #[test] - fn test_token_cosine_similarity_frequency() { - let msg = NormalizedMessage::from_text("help help help please"); - let similarity = msg.token_cosine_similarity("help please"); - assert!( - similarity > 0.7 && similarity < 1.0, - "Should account for frequency differences: {}", - similarity - ); - } - - #[test] - fn test_token_cosine_similarity_long_message_with_context() { - let msg = NormalizedMessage::from_text( - "I've been trying to set up my account for the past hour \ - and the verification email never arrived. I checked my spam folder \ - and still nothing. This is really frustrating and not helpful at all.", - ); - let similarity = msg.token_cosine_similarity("not helpful"); - assert!( - similarity > 0.15 && similarity < 0.7, - "Should detect pattern in long message with lower but non-zero similarity: {}", - similarity - ); - } - - #[test] - fn test_layered_matching_exact_hit() { - let msg = NormalizedMessage::from_text("thank you so much"); - assert!( - msg.layered_contains_phrase("thank you", 0.50, 0.60), - "Should match exact phrase in Layer 0" - ); - } - - #[test] - fn test_layered_matching_typo_hit() { - // Test that shows layered matching is more robust than exact matching alone - let msg = NormalizedMessage::from_text("it doesnt work for me"); - - // "doesnt work" should match "doesn't work" via character ngrams (high overlap) - assert!( - msg.layered_contains_phrase("doesn't work", 0.50, 0.60), - "Should match 'doesnt work' to 'doesn't work' via character ngrams" - ); - } - - #[test] - fn test_layered_matching_word_order_hit() { - let msg = NormalizedMessage::from_text("helpful not very"); - assert!( - msg.layered_contains_phrase("not helpful", 0.50, 0.60), - "Should match reordered words via token cosine in Layer 2" - ); - } - - #[test] - fn test_layered_matching_long_message_with_pattern() { - let msg = NormalizedMessage::from_text( - "I've tried everything and followed all the instructions \ - but this is not helpful at all and I'm getting frustrated", - ); - assert!( - msg.layered_contains_phrase("not helpful", 0.50, 0.60), - "Should detect pattern buried in long message" - ); - } - - #[test] - fn test_layered_matching_no_match() { - let msg = NormalizedMessage::from_text("everything is working perfectly"); - assert!( - !msg.layered_contains_phrase("not helpful", 0.50, 0.60), - "Should not match completely different content" - ); - } - - #[test] - fn test_char_ngram_vs_token_cosine_tradeoffs() { - // Character ngrams handle character-level changes well - let msg1 = NormalizedMessage::from_text("this doesnt work"); - let char_sim1 = msg1.char_ngram_similarity("this doesn't work"); - assert!( - char_sim1 > 0.70, - "Character ngrams should handle punctuation: {}", - char_sim1 - ); - - // Token cosine is better for word order and long messages with semantic overlap - let msg2 = - NormalizedMessage::from_text("I really appreciate all your help with this issue today"); - let token_sim2 = msg2.token_cosine_similarity("thank you for help"); - assert!( - token_sim2 > 0.15, - "Token cosine should detect semantic overlap: {}", - token_sim2 - ); - } - - // ======================================================================== - // Existing Tests - // ======================================================================== - - fn preprocess_messages(messages: &[Message]) -> Vec<(usize, Role, NormalizedMessage)> { - messages + fn report_severe_when_user_escalates() { + let msgs = vec![ + user("This isn't helpful at all"), + assistant("I'm sorry, can you tell me more?"), + user("Get me a human, this is useless"), + ]; + let r = SignalAnalyzer::default().analyze_openai(&msgs); + assert_eq!(r.overall_quality, InteractionQuality::Severe); + assert!(r + .interaction + .disengagement + .signals .iter() - .enumerate() - .map(|(i, msg)| { - let text = msg.content.extract_text(); - (i, msg.role.clone(), NormalizedMessage::from_text(&text)) - }) - .collect() + .any(|s| matches!(s.signal_type, SignalType::DisengagementEscalation))); } #[test] - fn test_turn_count_efficient() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "Hello"), - create_message(Role::Assistant, "Hi! How can I help?"), - create_message(Role::User, "Thanks!"), + fn report_excellent_when_user_satisfied() { + let msgs = vec![ + user("Can you summarize this report?"), + assistant("Here's a summary: ..."), + user("That's perfect, exactly what I needed, you're awesome!"), ]; - - let signal = analyzer.analyze_turn_count(&messages); - assert_eq!(signal.total_turns, 3); - assert_eq!(signal.user_turns, 2); - assert_eq!(signal.assistant_turns, 1); - assert!(!signal.is_concerning); - assert!(!signal.is_excessive); - assert!(signal.efficiency_score > 0.9); - println!("test_turn_count_efficient took: {:?}", start.elapsed()); + let r = SignalAnalyzer::default().analyze_openai(&msgs); + assert!(r.interaction.satisfaction.count > 0); + assert!(matches!( + r.overall_quality, + InteractionQuality::Good | InteractionQuality::Excellent + )); } #[test] - fn test_turn_count_excessive() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - let mut messages = Vec::new(); - for i in 0..15 { - messages.push(create_message( - if i % 2 == 0 { - Role::User - } else { - Role::Assistant - }, - &format!("Message {}", i), - )); - } - - let signal = analyzer.analyze_turn_count(&messages); - assert_eq!(signal.total_turns, 15); - assert!(signal.is_concerning); - assert!(signal.is_excessive); - assert!(signal.efficiency_score < 0.5); - println!("test_turn_count_excessive took: {:?}", start.elapsed()); - } - - #[test] - fn test_follow_up_detection() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "Show me restaurants"), - create_message(Role::Assistant, "Here are some options"), - create_message(Role::User, "No, I meant Italian restaurants"), - create_message(Role::Assistant, "Here are Italian restaurants"), + fn repro_gratitude_does_not_trigger_misalignment() { + let msgs = vec![ + user("What is the weather in Istanbul?"), + assistant("Istanbul is 14C and partly cloudy."), + user("That worked, exactly what I needed. Thanks, that is perfect!"), ]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_follow_up(&normalized_messages); - assert_eq!(signal.repair_count, 1); - assert!(signal.repair_ratio > 0.0); - println!("test_follow_up_detection took: {:?}", start.elapsed()); - } - - #[test] - fn test_follow_up_does_not_panic_with_filtered_messages() { - // Regression test: the preprocessing pipeline filters out messages - // without extractable text (tool calls, tool results, empty content). - // The stored tuple index `i` is the ORIGINAL-conversation index, so - // once anything is filtered out, `i` no longer matches the position - // inside `normalized_messages`. The old code used `*i` to index into - // `normalized_messages`, which panicked with "index out of bounds" - // when a user message appeared after filtered entries. - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - Message { - role: Role::User, - content: Some(hermesllm::apis::openai::MessageContent::Text( - "first question".to_string(), - )), - name: None, - tool_calls: None, - tool_call_id: None, - }, - // Assistant message with no text content (e.g. tool call) — filtered out. - Message { - role: Role::Assistant, - content: None, - name: None, - tool_calls: None, - tool_call_id: None, - }, - // Tool-role message with no extractable text — filtered out. - Message { - role: Role::Tool, - content: None, - name: None, - tool_calls: None, - tool_call_id: None, - }, - Message { - role: Role::Assistant, - content: Some(hermesllm::apis::openai::MessageContent::Text( - "some answer".to_string(), - )), - name: None, - tool_calls: None, - tool_call_id: None, - }, - // Rephrased user turn — original index 4, but after filtering - // only 3 messages remain in `normalized_messages` before it. - Message { - role: Role::User, - content: Some(hermesllm::apis::openai::MessageContent::Text( - "first question please".to_string(), - )), - name: None, - tool_calls: None, - tool_call_id: None, - }, - ]; - - // Must not panic — exercises the full analyze pipeline. - let _report = analyzer.analyze(&messages); - } - - #[test] - fn test_frustration_detection() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "THIS IS RIDICULOUS!!!"), - create_message(Role::Assistant, "I apologize for the frustration"), - create_message(Role::User, "This doesn't work at all"), - ]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_frustration(&normalized_messages); - assert!(signal.has_frustration); - assert!(signal.frustration_count >= 2); - assert!(signal.severity > 0); - println!("test_frustration_detection took: {:?}", start.elapsed()); - } - - #[test] - fn test_positive_feedback_detection() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "Can you help me?"), - create_message(Role::Assistant, "Sure!"), - create_message(Role::User, "Thank you! That's exactly what I needed."), - ]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_positive_feedback(&normalized_messages); - assert!(signal.has_positive_feedback); - assert!(signal.positive_count >= 1); - assert!(signal.confidence > 0.5); - println!( - "test_positive_feedback_detection took: {:?}", - start.elapsed() - ); - } - - #[test] - fn test_escalation_detection() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "This isn't working"), - create_message(Role::Assistant, "Let me help"), - create_message(Role::User, "I need to speak to a human agent"), - ]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_escalation(&normalized_messages); - assert!(signal.escalation_requested); - assert_eq!(signal.escalation_count, 1); - println!("test_escalation_detection took: {:?}", start.elapsed()); - } - - #[test] - fn test_repetition_detection() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "What's the weather?"), - create_message( - Role::Assistant, - "I can help you with the weather information", - ), - create_message(Role::User, "Show me the forecast"), - create_message(Role::Assistant, "Sure, I can help you with the forecast"), - create_message(Role::User, "Stop repeating yourself"), - ]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_repetition(&normalized_messages); - - for rep in &signal.repetitions { - println!( - " - Messages {:?}, similarity: {:.3}, type: {:?}", - rep.message_indices, rep.similarity, rep.repetition_type + let r = SignalAnalyzer::default().analyze_openai(&msgs); + for s in &r.interaction.misalignment.signals { + eprintln!( + "misalignment fired: type={:?} idx={} snippet={:?} meta={:?}", + s.signal_type, s.message_index, s.snippet, s.metadata ); } - - assert!(signal.repetition_count > 0, - "Should detect the subtle repetition between 'I can help you with the weather information' \ - and 'Sure, I can help you with the forecast'"); - println!("test_repetition_detection took: {:?}", start.elapsed()); - } - - #[test] - fn test_full_analysis_excellent() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "I need to book a flight"), - create_message(Role::Assistant, "Sure! Where would you like to go?"), - create_message(Role::User, "New York"), - create_message(Role::Assistant, "Great! I found several options."), - create_message(Role::User, "Perfect!"), - ]; - - let report = analyzer.analyze(&messages); - assert!(matches!( - report.overall_quality, - InteractionQuality::Excellent | InteractionQuality::Good - )); - assert!(report.positive_feedback.has_positive_feedback); - assert!(!report.frustration.has_frustration); - println!("test_full_analysis_excellent took: {:?}", start.elapsed()); - } - - #[test] - fn test_full_analysis_poor() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "Help me"), - create_message(Role::Assistant, "How can I assist?"), - create_message(Role::User, "No, I meant something else"), - create_message(Role::Assistant, "What do you need?"), - create_message(Role::User, "THIS DOESN'T WORK!!!"), - create_message(Role::Assistant, "I apologize"), - create_message(Role::User, "Let me speak to a human"), - ]; - - let report = analyzer.analyze(&messages); - assert!(matches!( - report.overall_quality, - InteractionQuality::Poor | InteractionQuality::Severe - )); - assert!(report.frustration.has_frustration); - assert!(report.escalation.escalation_requested); - println!("test_full_analysis_poor took: {:?}", start.elapsed()); - } - - #[test] - fn test_fuzzy_matching_gratitude() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "Can you help me?"), - create_message(Role::Assistant, "Sure!"), - create_message(Role::User, "thnaks! that's exactly what i needed."), - ]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_positive_feedback(&normalized_messages); - assert!(signal.has_positive_feedback); - assert!(signal.positive_count >= 1); - println!("test_fuzzy_matching_gratitude took: {:?}", start.elapsed()); - } - - #[test] - fn test_fuzzy_matching_escalation() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "This isn't working"), - create_message(Role::Assistant, "Let me help"), - create_message(Role::User, "i need to speek to a human agnet"), - ]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_escalation(&normalized_messages); - assert!(signal.escalation_requested); - assert_eq!(signal.escalation_count, 1); - println!("test_fuzzy_matching_escalation took: {:?}", start.elapsed()); - } - - #[test] - fn test_fuzzy_matching_repair() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "Show me restaurants"), - create_message(Role::Assistant, "Here are some options"), - create_message(Role::User, "no i ment Italian restaurants"), - create_message(Role::Assistant, "Here are Italian restaurants"), - ]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_follow_up(&normalized_messages); - assert!(signal.repair_count >= 1); - println!("test_fuzzy_matching_repair took: {:?}", start.elapsed()); - } - - #[test] - fn test_fuzzy_matching_complaint() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - // Use a complaint that should match - "doesnt work" is close enough to "doesn't work" - let messages = vec![ - create_message(Role::User, "this doesnt work at all"), // Common typo: missing apostrophe - create_message(Role::Assistant, "I apologize"), - ]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_frustration(&normalized_messages); - - // The layered matching should catch this via character ngrams or token cosine - // "doesnt work" has high character-level similarity to "doesn't work" - assert!( - signal.has_frustration, - "Should detect frustration from complaint pattern" - ); - assert!(signal.frustration_count >= 1); - println!("test_fuzzy_matching_complaint took: {:?}", start.elapsed()); - } - - #[test] - fn test_exact_match_priority() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![create_message(Role::User, "thank you so much")]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_positive_feedback(&normalized_messages); - assert!(signal.has_positive_feedback); - // Should detect exact match, not fuzzy - assert!(signal.indicators[0].snippet.contains("thank you")); - assert!(!signal.indicators[0].snippet.contains("fuzzy")); - println!("test_exact_match_priority took: {:?}", start.elapsed()); - } - - // ======================================================================== - // Anti-Tests: Verify fixes stay fixed - // ======================================================================== - - #[test] - fn test_hello_not_profanity() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![create_message(Role::User, "hello there")]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_frustration(&normalized_messages); - assert!( - !signal.has_frustration, - "\"hello\" should not trigger profanity detection" - ); - } - - #[test] - fn test_prepare_not_escalation() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![create_message( - Role::User, - "Can you help me prepare for the meeting?", - )]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_escalation(&normalized_messages); - assert!( - !signal.escalation_requested, - "\"prepare\" should not trigger escalation (rep pattern removed)" - ); - } - - #[test] - fn test_unicode_apostrophe_confusion() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "I'm confused"), // Unicode apostrophe - ]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_frustration(&normalized_messages); - assert!( - signal.has_frustration, - "Unicode apostrophe 'I'm confused' should trigger confusion" - ); - } - - #[test] - fn test_unicode_quotes_work() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![create_message( - Role::User, - "\u{201C}doesn\u{2019}t work\u{201D} with unicode quotes", - )]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_frustration(&normalized_messages); - assert!( - signal.has_frustration, - "Unicode quotes should be normalized and match patterns" - ); - } - - #[test] - fn test_absolute_not_profanity() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![create_message(Role::User, "That's absolute nonsense")]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_frustration(&normalized_messages); - // Should match on "nonsense" logic, not on "bs" substring - let has_bs_match = signal - .indicators - .iter() - .any(|ind| ind.snippet.contains("bs")); - assert!( - !has_bs_match, - "\"absolute\" should not trigger 'bs' profanity match" - ); - } - - #[test] - fn test_stopwords_not_rephrase() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "Help me with X"), - create_message(Role::Assistant, "Sure"), - create_message(Role::User, "Help me with Y"), - ]; - - let normalized_messages = preprocess_messages(&messages); - let signal = analyzer.analyze_follow_up(&normalized_messages); - // Should not detect as rephrase since only stopwords overlap assert_eq!( - signal.repair_count, 0, - "Messages with only stopword overlap should not be rephrases" + r.interaction.misalignment.count, 0, + "a pure gratitude message should not trigger repair/misalignment" ); + assert!(r.interaction.satisfaction.count > 0); } #[test] - fn test_frustrated_user_with_legitimate_repair() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - - use hermesllm::apis::openai::{FunctionCall, ToolCall}; - - // Helper to create a message with tool calls - let create_assistant_with_tools = - |content: &str, tool_id: &str, tool_name: &str, args: &str| -> Message { - Message { - role: Role::Assistant, - content: Some(MessageContent::Text(content.to_string())), - name: None, - tool_calls: Some(vec![ToolCall { - id: tool_id.to_string(), - call_type: "function".to_string(), - function: FunctionCall { - name: tool_name.to_string(), - arguments: args.to_string(), - }, - }]), - tool_call_id: None, - } - }; - - // Helper to create a tool response message - let create_tool_message = |tool_call_id: &str, content: &str| -> Message { - Message { - role: Role::Tool, - content: Some(MessageContent::Text(content.to_string())), - name: None, - tool_calls: None, - tool_call_id: Some(tool_call_id.to_string()), - } - }; - - // Scenario: User DOES mention New York in first message, making "I already told you" legitimate - let messages = vec![ - create_message( - Role::User, - "I need to book a flight from New York to Paris for December 20th", - ), - create_assistant_with_tools( - "I'll help you search for flights to Paris.", - "call_123", - "search_flights", - r#"{"origin": "NYC", "destination": "Paris", "date": "2025-12-20"}"#, - ), - create_tool_message("call_123", r#"{"flights": []}"#), - create_message( - Role::Assistant, - "I couldn't find any flights. Could you provide your departure city?", - ), - create_message(Role::User, "I already told you, from New York!"), - create_assistant_with_tools( - "Let me try again.", - "call_456", - "search_flights", - r#"{"origin": "New York", "destination": "Paris", "date": "2025-12-20"}"#, - ), - create_tool_message("call_456", r#"{"flights": []}"#), - create_message( - Role::Assistant, - "I'm still not finding results. Let me check the system.", - ), - create_message( - Role::User, - "THIS IS RIDICULOUS!!! The tool doesn't work at all. Why do you keep calling it?", - ), - create_message( - Role::Assistant, - "I sincerely apologize for the frustration with the search tool.", - ), - create_message( - Role::User, - "Forget it. I need to speak to a human agent. This is a waste of time.", - ), + fn execution_failures_lower_quality() { + let msgs = vec![ShareGptMessage { + from: "human", + value: "do the thing", + }]; + let _ = msgs; + // Build a synthetic ShareGPT input with multiple tool failures. + let convo = vec![ + ShareGptMessage { + from: "human", + value: "create a user", + }, + ShareGptMessage { + from: "function_call", + value: r#"{"name":"create_user","arguments":{"age":"twelve"}}"#, + }, + ShareGptMessage { + from: "observation", + value: "Error: validation failed - expected integer got string", + }, + ShareGptMessage { + from: "function_call", + value: r#"{"name":"create_user","arguments":{}}"#, + }, + ShareGptMessage { + from: "observation", + value: "missing required field: name", + }, ]; - - let report = analyzer.analyze(&messages); - - // Tool messages should be filtered out, so we should only analyze text messages - // That's 4 user messages + 5 assistant text messages = 9 turns - assert_eq!( - report.turn_count.total_turns, 9, - "Should count 9 text messages (tool messages filtered out)" - ); - assert!( - report.turn_count.is_concerning, - "Should flag concerning turn count" - ); - - // Should detect frustration (all caps, complaints) - assert!( - report.frustration.has_frustration, - "Should detect frustration" - ); - assert!( - report.frustration.frustration_count >= 2, - "Should detect multiple frustration indicators" - ); - assert!( - report.frustration.severity >= 2, - "Should have moderate or higher frustration severity" - ); - - // Should detect escalation request - assert!( - report.escalation.escalation_requested, - "Should detect escalation to human agent" - ); - assert!( - report.escalation.escalation_count >= 1, - "Should detect at least one escalation" - ); - - // Overall quality should be Poor or Severe - assert!( - matches!( - report.overall_quality, - InteractionQuality::Poor | InteractionQuality::Severe - ), - "Quality should be Poor or Severe, got {:?}", - report.overall_quality - ); - - println!( - "test_frustrated_user_with_legitimate_repair took: {:?}", - start.elapsed() - ); - } - - #[test] - fn test_frustrated_user_false_claim() { - let start = Instant::now(); - let analyzer = TextBasedSignalAnalyzer::new(); - - use hermesllm::apis::openai::{FunctionCall, ToolCall}; - - // Helper to create a message with tool calls - let create_assistant_with_tools = - |content: &str, tool_id: &str, tool_name: &str, args: &str| -> Message { - Message { - role: Role::Assistant, - content: Some(MessageContent::Text(content.to_string())), - name: None, - tool_calls: Some(vec![ToolCall { - id: tool_id.to_string(), - call_type: "function".to_string(), - function: FunctionCall { - name: tool_name.to_string(), - arguments: args.to_string(), - }, - }]), - tool_call_id: None, - } - }; - - // Helper to create a tool response message - let create_tool_message = |tool_call_id: &str, content: &str| -> Message { - Message { - role: Role::Tool, - content: Some(MessageContent::Text(content.to_string())), - name: None, - tool_calls: None, - tool_call_id: Some(tool_call_id.to_string()), - } - }; - - // Scenario: User NEVER mentions New York in first message but claims "I already told you" - // This represents realistic frustrated user behavior - exaggeration/misremembering - let messages = vec![ - create_message( - Role::User, - "I need to book a flight to Paris for December 20th", - ), - create_assistant_with_tools( - "I'll help you search for flights to Paris.", - "call_123", - "search_flights", - r#"{"destination": "Paris", "date": "2025-12-20"}"#, - ), - create_tool_message("call_123", r#"{"error": "origin required"}"#), - create_message( - Role::Assistant, - "I couldn't find any flights. Could you provide your departure city?", - ), - create_message(Role::User, "I already told you, from New York!"), // False claim - never mentioned it - create_assistant_with_tools( - "Let me try again.", - "call_456", - "search_flights", - r#"{"origin": "New York", "destination": "Paris", "date": "2025-12-20"}"#, - ), - create_tool_message("call_456", r#"{"flights": []}"#), - create_message( - Role::Assistant, - "I'm still not finding results. Let me check the system.", - ), - create_message( - Role::User, - "THIS IS RIDICULOUS!!! The tool doesn't work at all. Why do you keep calling it?", - ), - create_message( - Role::Assistant, - "I sincerely apologize for the frustration with the search tool.", - ), - create_message( - Role::User, - "Forget it. I need to speak to a human agent. This is a waste of time.", - ), - ]; - - let report = analyzer.analyze(&messages); - - // Tool messages should be filtered out, so we should only analyze text messages - // That's 4 user messages + 5 assistant text messages = 9 turns - assert_eq!( - report.turn_count.total_turns, 9, - "Should count 9 text messages (tool messages filtered out)" - ); - assert!( - report.turn_count.is_concerning, - "Should flag concerning turn count" - ); - - // Should detect frustration (all caps, complaints, false claims) - assert!( - report.frustration.has_frustration, - "Should detect frustration" - ); - assert!( - report.frustration.frustration_count >= 2, - "Should detect multiple frustration indicators" - ); - assert!( - report.frustration.severity >= 2, - "Should have moderate or higher frustration severity" - ); - - // Should detect escalation request - assert!( - report.escalation.escalation_requested, - "Should detect escalation to human agent" - ); - assert!( - report.escalation.escalation_count >= 1, - "Should detect at least one escalation" - ); - - // Note: May detect false positive "positive feedback" due to fuzzy matching - // e.g., "I already told YOU" matches "you rock", "THIS is RIDICULOUS" matches "this helps" - // However, the overall quality should still be Poor/Severe due to frustration+escalation - - // Overall quality should be Poor or Severe (frustration + escalation indicates poor interaction) - assert!( - matches!( - report.overall_quality, - InteractionQuality::Poor | InteractionQuality::Severe - ), - "Quality should be Poor or Severe for frustrated user with false claims, got {:?}", - report.overall_quality - ); - - println!( - "test_frustrated_user_false_claim took: {:?}", - start.elapsed() - ); - } - - // false negative tests - #[test] - fn test_dissatisfaction_polite_not_working_for_me() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "Thanks, but this still isn't working for me."), // Polite dissatisfaction, e.g., I appreciate it, but this isn't what I was looking for. - create_message(Role::Assistant, "Sorry—what error do you see?"), - ]; - let normalized = preprocess_messages(&messages); - let signal = analyzer.analyze_frustration(&normalized); - assert!( - signal.has_frustration, - "Polite dissatisfaction should be detected" - ); - } - - #[test] - fn test_dissatisfaction_giving_up_without_escalation() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![create_message( - Role::User, - "Never mind, I'll figure it out myself.", - )]; - let normalized = preprocess_messages(&messages); - let signal = analyzer.analyze_escalation(&normalized); - assert!( - signal.escalation_requested, - "Giving up should count as escalation/quit intent" - ); - } - - #[test] - fn test_dissatisfaction_same_problem_again() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![create_message( - Role::User, - "I'm running into the same issue again.", - )]; - let normalized = preprocess_messages(&messages); - let signal = analyzer.analyze_frustration(&normalized); - assert!( - signal.has_frustration, - "'same issue again' should be detected" - ); - } - - #[test] - fn test_unsatisfied_incomplete() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![create_message(Role::User, "This feels incomplete.")]; - let normalized = preprocess_messages(&messages); - let signal = analyzer.analyze_frustration(&normalized); - assert!( - signal.has_frustration, - "Should detect 'incomplete' dissatisfaction" - ); - } - - #[test] - fn test_low_mood_overwhelming() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![create_message( - Role::User, - "This is overwhelming and I'm not sure what to do.", - )]; - let normalized = preprocess_messages(&messages); - let signal = analyzer.analyze_frustration(&normalized); - assert!(signal.has_frustration, "Should detect overwhelmed language"); - } - - #[test] - fn test_low_mood_exhausted_trying() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![create_message( - Role::User, - "I'm exhausted trying to get this working.", - )]; - let normalized = preprocess_messages(&messages); - let signal = analyzer.analyze_frustration(&normalized); - assert!( - signal.has_frustration, - "Should detect exhaustion/struggle language" - ); - } - - #[test] - fn test_common_polite_unresolved_dissatisfaction() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "I'm trying to set up SSH keys for GitHub."), - create_message( - Role::Assistant, - "Sure. First generate a key using ssh-keygen.", - ), - create_message(Role::User, "I did that already."), - create_message( - Role::Assistant, - "Then add the key to your GitHub account settings.", - ), - create_message(Role::User, "I've done that too."), - create_message( - Role::Assistant, - "After that, make sure your SSH agent is running.", - ), - create_message( - Role::User, - "Okay, but this still doesn't seem to fix the issue.", - ), - create_message(Role::Assistant, "What error message are you seeing?"), - create_message(Role::User, "It's just not connecting the way I expected."), - ]; - - let report = analyzer.analyze(&messages); - - // This is a common false negative if you only look for caps/profanity. - // Desired: detect dissatisfaction/frustration (or at least not rate as Excellent). - assert!( - report.frustration.has_frustration || report.follow_up.repair_count >= 1, - "Should detect polite unresolved dissatisfaction via frustration or follow-up indicators" - ); - - assert!( - !matches!(report.overall_quality, InteractionQuality::Excellent), - "Should not classify unresolved dissatisfaction as Excellent" - ); - } - - #[test] - fn test_common_resigned_giving_up_quietly() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message( - Role::User, - "Can you explain how to deploy this with Docker?", - ), - create_message( - Role::Assistant, - "You need to write a Dockerfile and build an image.", - ), - create_message(Role::User, "I tried that."), - create_message(Role::Assistant, "Then you can run docker-compose up."), - create_message(Role::User, "I did, but it didn’t really help."), - create_message(Role::Assistant, "What error are you getting?"), - create_message( - Role::User, - "Honestly, never mind. I’ll just try something else.", - ), - ]; - - let report = analyzer.analyze(&messages); - - // Many systems miss "never mind / I'll try something else" if they only look for "human agent". - assert!( - report.escalation.escalation_requested || report.frustration.has_frustration, - "Resigned quitting language should trigger escalation or frustration" - ); - - assert!( - matches!( - report.overall_quality, - InteractionQuality::Poor | InteractionQuality::Severe - ) || report.escalation.escalation_requested - || report.frustration.has_frustration, - "Giving up should not be classified as a high-quality interaction" - ); - } - - #[test] - fn test_common_discouraged_overwhelmed_low_mood() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "I'm trying to understand backpropagation."), - create_message( - Role::Assistant, - "It's a way to compute gradients efficiently.", - ), - create_message(Role::User, "I’ve read that explanation already."), - create_message(Role::Assistant, "Would you like a mathematical derivation?"), - create_message(Role::User, "Maybe, but I’m still having trouble following."), - create_message(Role::Assistant, "I can walk through a simple example."), - create_message( - Role::User, - "That might help, but honestly this is pretty overwhelming.", - ), - create_message(Role::Assistant, "Let’s slow it down step by step."), - create_message( - Role::User, - "Yeah… I’m just feeling kind of discouraged right now.", - ), - ]; - - let report = analyzer.analyze(&messages); - - // This is negative affect without caps/profanity. Should still count as frustration/negative signal. - assert!( - report.frustration.has_frustration, - "Overwhelmed/discouraged language should be detected as negative sentiment/frustration" - ); - - assert!( - !matches!(report.overall_quality, InteractionQuality::Excellent), - "Low-mood discouragement should not be classified as Excellent" - ); - } - - #[test] - fn test_common_misalignment_not_what_i_asked() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "How do I optimize this SQL query?"), - create_message( - Role::Assistant, - "You can add indexes to improve performance.", - ), - create_message(Role::User, "I already have indexes."), - create_message(Role::Assistant, "Then you could consider query caching."), - create_message(Role::User, "That’s not really what I was asking about."), - create_message( - Role::Assistant, - "What specifically are you trying to optimize?", - ), - create_message( - Role::User, - "The execution plan — this answer doesn’t address that.", - ), - ]; - - let report = analyzer.analyze(&messages); - - // Misalignment often shows as follow-up repair or frustration. - assert!( - report.follow_up.repair_count >= 1 || report.frustration.has_frustration, - "Misalignment ('not what I asked') should trigger repair or frustration signals" - ); - - assert!( - !matches!(report.overall_quality, InteractionQuality::Excellent), - "Misalignment should not be rated as Excellent" - ); - } - - #[test] - fn test_common_false_negative_polite_disappointment_complexity() { - let analyzer = TextBasedSignalAnalyzer::new(); - let messages = vec![ - create_message(Role::User, "Can you help me write a regex for this?"), - create_message(Role::Assistant, "Sure, try this pattern: ^[a-z]+$"), - create_message(Role::User, "I tested it."), - create_message(Role::Assistant, "Did it work?"), - create_message(Role::User, "Not quite — it matches more than it should."), - create_message(Role::Assistant, "You can refine it with a lookahead."), - create_message( - Role::User, - "I see… this is more complicated than I expected.", - ), - ]; - - let report = analyzer.analyze(&messages); - - // Polite disappointment often becomes a false negative. - assert!( - report.frustration.has_frustration || report.follow_up.repair_count >= 1, - "Polite dissatisfaction ('not quite', 'more complicated than expected') should trigger a negative signal" - ); - - assert!( - !matches!(report.overall_quality, InteractionQuality::Excellent), - "Polite disappointment should not be classified as Excellent" - ); - } - - #[test] - fn test_catastrophic_failure_looping_assistant() { - let analyzer = TextBasedSignalAnalyzer::new(); - - // Catastrophic failure: assistant stuck in loop, user increasingly frustrated - let messages = vec![ - // Turn 1: Initial request - create_message(Role::User, "I need to cancel my subscription"), - create_message(Role::Assistant, "I can help you with account management. What would you like to do today?"), - - // Turn 3: First repair - repetition clarification - create_message(Role::User, "I just said I want to cancel my subscription"), - // Turn 4: Exact repetition from assistant - create_message(Role::Assistant, "I can help you with account management. What would you like to do today?"), - - // Turn 5: Repair (negation) + frustration (ALL CAPS) - create_message(Role::User, "No, I meant CANCEL. My. Subscription."), - // Turn 6: Near-duplicate repetition - create_message(Role::Assistant, "I understand you need help with your account. Let me assist you with that."), - - // Turn 7: Severe frustration (complaint + excessive punctuation + ALL CAPS) - create_message(Role::User, "This is not helpful!! I want to CANCEL my subscription!!!"), - // Turn 8: Exact repetition again (looping) - create_message(Role::Assistant, "I can help you with account management. What would you like to do today?"), - - // Turn 9: Repair (explicit rephrase) + frustration (complaint) - create_message(Role::User, "This doesnt work. Let me rephrase: I need to terminate my account and stop being charged"), - // Turn 10: Near-duplicate repetition (looping continues) - create_message(Role::Assistant, "I understand you need help with your account. Let me assist you with that."), - - // Turn 11: Escalation (multiple types) + frustration - create_message(Role::User, "I give up. Can I speak to a real person? This is a waste of time"), - ]; - - let report = analyzer.analyze(&messages); - - // Validate turn count - assert_eq!( - report.turn_count.total_turns, 11, - "Should have 11 total turns" - ); - assert_eq!(report.turn_count.user_turns, 6, "Should have 6 user turns"); - assert_eq!( - report.turn_count.assistant_turns, 5, - "Should have 5 assistant turns" - ); - assert!( - report.turn_count.is_concerning, - "11 turns should be concerning (>7)" - ); - assert!( - !report.turn_count.is_excessive, - "11 turns should not be excessive (<=12)" - ); - assert!( - report.turn_count.efficiency_score < 0.5, - "Efficiency should be low" - ); - - // Validate repair detection (USER signals - query reformulation) - // Detected repairs: - // 1. "I just said I want to cancel..." - pattern: "I just said" - // 2. "No, I meant CANCEL..." - pattern: "No, I meant" - // 3. "Let me rephrase: I need to terminate..." - pattern: "let me rephrase" - // Note: "This is not helpful!!" is frustration (not repair) - // Note: "I give up..." is escalation (not repair) - assert_eq!( - report.follow_up.repair_count, 3, - "Should detect exactly 3 repair attempts from user messages" - ); - assert_eq!( - report.follow_up.repair_ratio, 0.5, - "Repair ratio should be 0.5 (3 repairs / 6 user messages)" - ); - assert!( - report.follow_up.is_concerning, - "50% repair ratio should be highly concerning (threshold is 30%)" - ); - - // Validate frustration detection - assert!( - report.frustration.has_frustration, - "Should detect frustration" - ); - assert!( - report.frustration.frustration_count >= 4, - "Should detect multiple frustration indicators: found {}", - report.frustration.frustration_count - ); - assert!( - report.frustration.severity >= 2, - "Should be at least moderate frustration" - ); - - // Validate repetition/looping detection (ASSISTANT signals - not following instructions) - // The assistant repeats the same unhelpful responses multiple times: - // 1. "I can help you with account management..." appears 3 times (exact repetition) - // 2. "I understand you need help with your account..." appears 2 times (near-duplicate) - assert!( - report.repetition.repetition_count >= 4, - "Should detect at least 4 assistant repetitions (exact + near-duplicates)" - ); - assert!( - report.repetition.has_looping, - "Should detect looping (>2 repetitions indicates stuck agent)" - ); - assert!( - report.repetition.severity >= 2, - "Should be moderate to severe looping (assistant not adapting)" - ); - - // Validate escalation detection - assert!( - report.escalation.escalation_requested, - "Should detect escalation request" - ); - assert!( - report.escalation.escalation_count >= 2, - "Should detect multiple escalation indicators: 'give up' + 'speak to a real person'" - ); - - // Validate overall quality - assert_eq!(report.overall_quality, InteractionQuality::Severe, "Should be classified as Severe due to escalation + excessive frustration + looping + high repair ratio"); + let r = SignalAnalyzer::default().analyze_sharegpt(&convo); + assert!(r.execution.failure.count >= 1); + assert!(r.quality_score < 50.0); } } diff --git a/crates/brightstaff/src/signals/environment/exhaustion.rs b/crates/brightstaff/src/signals/environment/exhaustion.rs new file mode 100644 index 00000000..142e7d6e --- /dev/null +++ b/crates/brightstaff/src/signals/environment/exhaustion.rs @@ -0,0 +1,347 @@ +//! Environment exhaustion detector. Direct port of +//! `signals/environment/exhaustion.py`. + +use std::sync::OnceLock; + +use regex::Regex; +use serde_json::json; + +use crate::signals::analyzer::ShareGptMessage; +use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType}; + +pub const API_ERROR_PATTERNS: &[&str] = &[ + r"500\s*(internal\s+)?server\s+error", + r"502\s*bad\s+gateway", + r"503\s*service\s+unavailable", + r"504\s*gateway\s+timeout", + r"internal\s+server\s+error", + r"service\s+unavailable", + r"server\s+error", + r"backend\s+error", + r"upstream\s+error", + r"service\s+temporarily\s+unavailable", + r"maintenance\s+mode", + r"under\s+maintenance", + r"try\s+again\s+later", + r"temporarily\s+unavailable", + r"system\s+error", + r"unexpected\s+error", + r"unhandled\s+exception", +]; + +pub const TIMEOUT_PATTERNS: &[&str] = &[ + r"timeout", + r"timed?\s*out", + r"etimedout", + r"connection\s+timed?\s*out", + r"read\s+timed?\s*out", + r"request\s+timed?\s*out", + r"gateway\s+timeout", + r"deadline\s+exceeded", + r"took\s+too\s+long", + r"operation\s+timed?\s*out", + r"socket\s+timeout", +]; + +pub const RATE_LIMIT_PATTERNS: &[&str] = &[ + r"rate\s+limit", + r"rate.limited", + r"(status|error|http)\s*:?\s*429", + r"429\s+(too\s+many|rate|limit)", + r"too\s+many\s+requests?", + r"quota\s+exceeded", + r"quota\s+limit", + r"throttl(ed|ing)", + r"request\s+limit", + r"api\s+limit", + r"calls?\s+per\s+(second|minute|hour|day)", + r"exceeded\s+.*\s+limit", + r"slow\s+down", + r"retry\s+after", + r"requests?\s+exceeded", +]; + +pub const NETWORK_PATTERNS: &[&str] = &[ + r"connection\s+refused", + r"econnrefused", + r"econnreset", + r"connection\s+reset", + r"enotfound", + r"dns\s+(error|failure|lookup)", + r"host\s+not\s+found", + r"network\s+(error|failure|unreachable)", + r"no\s+route\s+to\s+host", + r"socket\s+error", + r"connection\s+failed", + r"unable\s+to\s+connect", + r"cannot\s+connect", + r"could\s+not\s+connect", + r"connect\s+error", + r"ssl\s+(error|handshake|certificate)", + r"certificate\s+(error|invalid|expired)", +]; + +pub const MALFORMED_PATTERNS: &[&str] = &[ + r"json\s+parse\s+error", + r"invalid\s+json", + r"unexpected\s+token", + r"syntax\s+error.*json", + r"malformed\s+(response|json|data)", + r"unexpected\s+end\s+of", + r"parse\s+error", + r"parsing\s+failed", + r"invalid\s+response", + r"unexpected\s+response", + r"response\s+format", + r"missing\s+field.*response", + r"unexpected\s+schema", + r"schema\s+validation", + r"deserialization\s+error", + r"failed\s+to\s+decode", +]; + +pub const CONTEXT_OVERFLOW_PATTERNS: &[&str] = &[ + r"context\s+(length|limit|overflow|exceeded)", + r"token\s+(limit|overflow|exceeded)", + r"max(imum)?\s+tokens?", + r"input\s+too\s+(long|large)", + r"exceeds?\s+(context|token|character|input)\s+limit", + r"message\s+too\s+(long|large)", + r"content\s+too\s+(long|large)", + r"truncat(ed|ion)\s+(due\s+to|because|for)\s+(length|size|limit)", + r"maximum\s+context", + r"prompt\s+too\s+(long|large)", +]; + +fn compile(patterns: &[&str]) -> Regex { + let combined = patterns + .iter() + .map(|p| format!("({})", p)) + .collect::>() + .join("|"); + Regex::new(&format!("(?i){}", combined)).expect("exhaustion pattern regex must compile") +} + +fn api_error_re() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| compile(API_ERROR_PATTERNS)) +} +fn timeout_re() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| compile(TIMEOUT_PATTERNS)) +} +fn rate_limit_re() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| compile(RATE_LIMIT_PATTERNS)) +} +fn network_re() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| compile(NETWORK_PATTERNS)) +} +fn malformed_re() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| compile(MALFORMED_PATTERNS)) +} +fn context_overflow_re() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| compile(CONTEXT_OVERFLOW_PATTERNS)) +} + +fn snippet_around(text: &str, m: regex::Match<'_>, context: usize) -> String { + let start = m.start().saturating_sub(context); + let end = (m.end() + context).min(text.len()); + let start = align_char_boundary(text, start, false); + let end = align_char_boundary(text, end, true); + let mut snippet = String::new(); + if start > 0 { + snippet.push_str("..."); + } + snippet.push_str(&text[start..end]); + if end < text.len() { + snippet.push_str("..."); + } + snippet +} + +fn align_char_boundary(s: &str, mut idx: usize, forward: bool) -> usize { + if idx >= s.len() { + return s.len(); + } + while !s.is_char_boundary(idx) { + if forward { + idx += 1; + } else if idx == 0 { + break; + } else { + idx -= 1; + } + } + idx +} + +pub fn analyze_exhaustion(messages: &[ShareGptMessage<'_>]) -> SignalGroup { + let mut group = SignalGroup::new("exhaustion"); + + for (i, msg) in messages.iter().enumerate() { + if msg.from != "observation" { + continue; + } + let value = msg.value; + let lower = value.to_lowercase(); + + if let Some(m) = rate_limit_re().find(&lower) { + group.add_signal(emit( + SignalType::EnvironmentExhaustionRateLimit, + i, + snippet_around(value, m, 50), + 0.95, + "rate_limit", + m.as_str(), + )); + continue; + } + + if let Some(m) = api_error_re().find(&lower) { + group.add_signal(emit( + SignalType::EnvironmentExhaustionApiError, + i, + snippet_around(value, m, 50), + 0.9, + "api_error", + m.as_str(), + )); + continue; + } + + if let Some(m) = timeout_re().find(&lower) { + group.add_signal(emit( + SignalType::EnvironmentExhaustionTimeout, + i, + snippet_around(value, m, 50), + 0.9, + "timeout", + m.as_str(), + )); + continue; + } + + if let Some(m) = network_re().find(&lower) { + group.add_signal(emit( + SignalType::EnvironmentExhaustionNetwork, + i, + snippet_around(value, m, 50), + 0.9, + "network", + m.as_str(), + )); + continue; + } + + if let Some(m) = malformed_re().find(&lower) { + group.add_signal(emit( + SignalType::EnvironmentExhaustionMalformed, + i, + snippet_around(value, m, 50), + 0.85, + "malformed_response", + m.as_str(), + )); + continue; + } + + if let Some(m) = context_overflow_re().find(&lower) { + group.add_signal(emit( + SignalType::EnvironmentExhaustionContextOverflow, + i, + snippet_around(value, m, 50), + 0.9, + "context_overflow", + m.as_str(), + )); + } + } + + group +} + +fn emit( + t: SignalType, + idx: usize, + snippet: String, + confidence: f32, + kind: &str, + matched: &str, +) -> SignalInstance { + SignalInstance::new(t, idx, snippet) + .with_confidence(confidence) + .with_metadata(json!({ + "exhaustion_type": kind, + "matched": matched, + })) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn obs(value: &str) -> ShareGptMessage<'_> { + ShareGptMessage { + from: "observation", + value, + } + } + + #[test] + fn detects_rate_limit() { + let g = analyze_exhaustion(&[obs("HTTP 429: too many requests, retry after 30s")]); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::EnvironmentExhaustionRateLimit))); + } + + #[test] + fn detects_api_error() { + let g = analyze_exhaustion(&[obs("503 service unavailable - try again later")]); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::EnvironmentExhaustionApiError))); + } + + #[test] + fn detects_timeout() { + let g = analyze_exhaustion(&[obs("Connection timed out after 30 seconds")]); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::EnvironmentExhaustionTimeout))); + } + + #[test] + fn detects_network_failure() { + let g = analyze_exhaustion(&[obs("ECONNREFUSED: connection refused by remote host")]); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::EnvironmentExhaustionNetwork))); + } + + #[test] + fn detects_malformed_response() { + let g = analyze_exhaustion(&[obs("Invalid JSON: unexpected token at position 42")]); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::EnvironmentExhaustionMalformed))); + } + + #[test] + fn detects_context_overflow() { + let g = analyze_exhaustion(&[obs("Maximum context length exceeded for this model")]); + assert!(g.signals.iter().any(|s| matches!( + s.signal_type, + SignalType::EnvironmentExhaustionContextOverflow + ))); + } +} diff --git a/crates/brightstaff/src/signals/environment/mod.rs b/crates/brightstaff/src/signals/environment/mod.rs new file mode 100644 index 00000000..97d9b300 --- /dev/null +++ b/crates/brightstaff/src/signals/environment/mod.rs @@ -0,0 +1,3 @@ +//! Environment signals: exhaustion (external system failures and constraints). + +pub mod exhaustion; diff --git a/crates/brightstaff/src/signals/execution/failure.rs b/crates/brightstaff/src/signals/execution/failure.rs new file mode 100644 index 00000000..3e171446 --- /dev/null +++ b/crates/brightstaff/src/signals/execution/failure.rs @@ -0,0 +1,388 @@ +//! Execution failure detector. Direct port of `signals/execution/failure.py`. + +use std::sync::OnceLock; + +use regex::Regex; +use serde_json::json; + +use crate::signals::analyzer::ShareGptMessage; +use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType}; + +pub const INVALID_ARGS_PATTERNS: &[&str] = &[ + r"invalid\s+argument", + r"invalid\s+parameter", + r"invalid\s+type", + r"type\s*error", + r"expected\s+\w+\s*,?\s*got\s+\w+", + r"required\s+field", + r"required\s+parameter", + r"missing\s+required", + r"missing\s+argument", + r"validation\s+failed", + r"validation\s+error", + r"invalid\s+value", + r"invalid\s+format", + r"must\s+be\s+(a|an)\s+\w+", + r"cannot\s+be\s+(null|empty|none)", + r"is\s+not\s+valid", + r"does\s+not\s+match", + r"out\s+of\s+range", + r"invalid\s+date", + r"invalid\s+json", + r"malformed\s+request", +]; + +pub const BAD_QUERY_PATTERNS: &[&str] = &[ + r"invalid\s+query", + r"query\s+syntax\s+error", + r"malformed\s+query", + r"unknown\s+field", + r"invalid\s+field", + r"invalid\s+filter", + r"invalid\s+search", + r"unknown\s+id", + r"invalid\s+id", + r"id\s+format\s+error", + r"invalid\s+identifier", + r"query\s+failed", + r"search\s+error", + r"invalid\s+operator", + r"unsupported\s+query", +]; + +pub const TOOL_NOT_FOUND_PATTERNS: &[&str] = &[ + r"unknown\s+function", + r"unknown\s+tool", + r"function\s+not\s+found", + r"tool\s+not\s+found", + r"no\s+such\s+function", + r"no\s+such\s+tool", + r"undefined\s+function", + r"action\s+not\s+supported", + r"invalid\s+tool", + r"invalid\s+function", + r"unrecognized\s+function", +]; + +pub const AUTH_MISUSE_PATTERNS: &[&str] = &[ + r"\bunauthorized\b", + r"(status|error|http|code)\s*:?\s*401", + r"401\s+unauthorized", + r"403\s+forbidden", + r"permission\s+denied", + r"access\s+denied", + r"authentication\s+required", + r"invalid\s+credentials", + r"invalid\s+token", + r"token\s+expired", + r"missing\s+authorization", + r"\bforbidden\b", + r"not\s+authorized", + r"insufficient\s+permissions?", +]; + +pub const STATE_ERROR_PATTERNS: &[&str] = &[ + r"invalid\s+state", + r"illegal\s+state", + r"must\s+call\s+\w+\s+first", + r"must\s+\w+\s+before", + r"cannot\s+\w+\s+before", + r"already\s+(exists?|created|started|finished)", + r"not\s+initialized", + r"not\s+started", + r"already\s+in\s+progress", + r"operation\s+in\s+progress", + r"sequence\s+error", + r"precondition\s+failed", + r"(status|error|http)\s*:?\s*409", + r"409\s+conflict", + r"\bconflict\b", +]; + +fn compile(patterns: &[&str]) -> Regex { + // Use `(?i)` flag for case-insensitive matching, matching Python's `re.IGNORECASE`. + let combined = patterns + .iter() + .map(|p| format!("({})", p)) + .collect::>() + .join("|"); + Regex::new(&format!("(?i){}", combined)).expect("failure pattern regex must compile") +} + +fn invalid_args_re() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| compile(INVALID_ARGS_PATTERNS)) +} +fn bad_query_re() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| compile(BAD_QUERY_PATTERNS)) +} +fn tool_not_found_re() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| compile(TOOL_NOT_FOUND_PATTERNS)) +} +fn auth_misuse_re() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| compile(AUTH_MISUSE_PATTERNS)) +} +fn state_error_re() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| compile(STATE_ERROR_PATTERNS)) +} + +/// Pull tool name + args from a `function_call` message. Mirrors +/// `_extract_tool_info` in the reference. +pub(crate) fn extract_tool_info(value: &str) -> (String, String) { + if let Ok(parsed) = serde_json::from_str::(value) { + if let Some(obj) = parsed.as_object() { + let name = obj + .get("name") + .or_else(|| obj.get("function")) + .and_then(|v| v.as_str()) + .map(|s| s.to_string()) + .unwrap_or_else(|| "unknown".to_string()); + let args = match obj.get("arguments").or_else(|| obj.get("args")) { + Some(serde_json::Value::Object(o)) => { + serde_json::to_string(&serde_json::Value::Object(o.clone())).unwrap_or_default() + } + Some(other) => other + .as_str() + .map(|s| s.to_string()) + .unwrap_or_else(|| serde_json::to_string(other).unwrap_or_default()), + None => String::new(), + }; + return (name, args); + } + } + let mut snippet: String = value.chars().take(200).collect(); + snippet.shrink_to_fit(); + ("unknown".to_string(), snippet) +} + +/// Build a context-window snippet around a regex match, with leading/trailing +/// ellipses when truncated. Mirrors `_get_snippet`. +fn snippet_around(text: &str, m: regex::Match<'_>, context: usize) -> String { + let start = m.start().saturating_sub(context); + let end = (m.end() + context).min(text.len()); + // Ensure we cut on UTF-8 boundaries. + let start = align_char_boundary(text, start, false); + let end = align_char_boundary(text, end, true); + let mut snippet = String::new(); + if start > 0 { + snippet.push_str("..."); + } + snippet.push_str(&text[start..end]); + if end < text.len() { + snippet.push_str("..."); + } + snippet +} + +fn align_char_boundary(s: &str, mut idx: usize, forward: bool) -> usize { + if idx >= s.len() { + return s.len(); + } + while !s.is_char_boundary(idx) { + if forward { + idx += 1; + } else if idx == 0 { + break; + } else { + idx -= 1; + } + } + idx +} + +pub fn analyze_failure(messages: &[ShareGptMessage<'_>]) -> SignalGroup { + let mut group = SignalGroup::new("failure"); + let mut last_call: Option<(usize, String, String)> = None; + + for (i, msg) in messages.iter().enumerate() { + match msg.from { + "function_call" => { + let (name, args) = extract_tool_info(msg.value); + last_call = Some((i, name, args)); + continue; + } + "observation" => {} + _ => continue, + } + + let value = msg.value; + let lower = value.to_lowercase(); + let (call_index, tool_name) = match &last_call { + Some((idx, name, _)) => (*idx, name.clone()), + None => (i.saturating_sub(1), "unknown".to_string()), + }; + + if let Some(m) = invalid_args_re().find(&lower) { + group.add_signal( + SignalInstance::new( + SignalType::ExecutionFailureInvalidArgs, + i, + snippet_around(value, m, 50), + ) + .with_confidence(0.9) + .with_metadata(json!({ + "tool_name": tool_name, + "call_index": call_index, + "error_type": "invalid_args", + "matched": m.as_str(), + })), + ); + continue; + } + + if let Some(m) = tool_not_found_re().find(&lower) { + group.add_signal( + SignalInstance::new( + SignalType::ExecutionFailureToolNotFound, + i, + snippet_around(value, m, 50), + ) + .with_confidence(0.95) + .with_metadata(json!({ + "tool_name": tool_name, + "call_index": call_index, + "error_type": "tool_not_found", + "matched": m.as_str(), + })), + ); + continue; + } + + if let Some(m) = auth_misuse_re().find(&lower) { + group.add_signal( + SignalInstance::new( + SignalType::ExecutionFailureAuthMisuse, + i, + snippet_around(value, m, 50), + ) + .with_confidence(0.8) + .with_metadata(json!({ + "tool_name": tool_name, + "call_index": call_index, + "error_type": "auth_misuse", + "matched": m.as_str(), + })), + ); + continue; + } + + if let Some(m) = state_error_re().find(&lower) { + group.add_signal( + SignalInstance::new( + SignalType::ExecutionFailureStateError, + i, + snippet_around(value, m, 50), + ) + .with_confidence(0.85) + .with_metadata(json!({ + "tool_name": tool_name, + "call_index": call_index, + "error_type": "state_error", + "matched": m.as_str(), + })), + ); + continue; + } + + if let Some(m) = bad_query_re().find(&lower) { + let confidence = if ["error", "invalid", "failed"] + .iter() + .any(|w| lower.contains(w)) + { + 0.8 + } else { + 0.6 + }; + group.add_signal( + SignalInstance::new( + SignalType::ExecutionFailureBadQuery, + i, + snippet_around(value, m, 50), + ) + .with_confidence(confidence) + .with_metadata(json!({ + "tool_name": tool_name, + "call_index": call_index, + "error_type": "bad_query", + "matched": m.as_str(), + })), + ); + } + } + + group +} + +#[cfg(test)] +mod tests { + use super::*; + + fn fc(value: &str) -> ShareGptMessage<'_> { + ShareGptMessage { + from: "function_call", + value, + } + } + fn obs(value: &str) -> ShareGptMessage<'_> { + ShareGptMessage { + from: "observation", + value, + } + } + + #[test] + fn detects_invalid_args() { + let msgs = vec![ + fc(r#"{"name":"create_user","arguments":{"age":"twelve"}}"#), + obs("Error: validation failed - expected integer got string for field age"), + ]; + let g = analyze_failure(&msgs); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::ExecutionFailureInvalidArgs))); + } + + #[test] + fn detects_tool_not_found() { + let msgs = vec![ + fc(r#"{"name":"send_thought","arguments":{}}"#), + obs("Error: unknown function 'send_thought'"), + ]; + let g = analyze_failure(&msgs); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::ExecutionFailureToolNotFound))); + } + + #[test] + fn detects_auth_misuse() { + let msgs = vec![ + fc(r#"{"name":"get_secret","arguments":{}}"#), + obs("HTTP 401 Unauthorized"), + ]; + let g = analyze_failure(&msgs); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::ExecutionFailureAuthMisuse))); + } + + #[test] + fn detects_state_error() { + let msgs = vec![ + fc(r#"{"name":"commit_tx","arguments":{}}"#), + obs("must call begin_tx first"), + ]; + let g = analyze_failure(&msgs); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::ExecutionFailureStateError))); + } +} diff --git a/crates/brightstaff/src/signals/execution/loops.rs b/crates/brightstaff/src/signals/execution/loops.rs new file mode 100644 index 00000000..70b90e83 --- /dev/null +++ b/crates/brightstaff/src/signals/execution/loops.rs @@ -0,0 +1,433 @@ +//! Execution loops detector. Direct port of `signals/execution/loops.py`. + +use serde_json::json; + +use crate::signals::analyzer::ShareGptMessage; +use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType}; + +pub const RETRY_THRESHOLD: usize = 3; +pub const PARAMETER_DRIFT_THRESHOLD: usize = 3; +pub const OSCILLATION_CYCLES_THRESHOLD: usize = 3; + +#[derive(Debug, Clone)] +pub struct ToolCall { + pub index: usize, + pub name: String, + /// Canonical JSON string of arguments (sorted keys when parseable). + pub args: String, + pub args_dict: Option>, +} + +impl ToolCall { + pub fn args_equal(&self, other: &ToolCall) -> bool { + match (&self.args_dict, &other.args_dict) { + (Some(a), Some(b)) => a == b, + _ => self.args == other.args, + } + } +} + +fn parse_tool_call(index: usize, msg: &ShareGptMessage<'_>) -> Option { + if msg.from != "function_call" { + return None; + } + let value = msg.value; + + if let Ok(parsed) = serde_json::from_str::(value) { + if let Some(obj) = parsed.as_object() { + let name = obj + .get("name") + .or_else(|| obj.get("function")) + .and_then(|v| v.as_str()) + .map(|s| s.to_string()) + .unwrap_or_else(|| "unknown".to_string()); + let raw_args = obj.get("arguments").or_else(|| obj.get("args")); + let (args_str, args_dict) = match raw_args { + Some(serde_json::Value::Object(o)) => { + let mut keys: Vec<&String> = o.keys().collect(); + keys.sort(); + let mut canon = serde_json::Map::new(); + for k in keys { + canon.insert(k.clone(), o[k].clone()); + } + ( + serde_json::to_string(&serde_json::Value::Object(canon.clone())) + .unwrap_or_default(), + Some(canon), + ) + } + Some(other) => ( + other + .as_str() + .map(|s| s.to_string()) + .unwrap_or_else(|| serde_json::to_string(other).unwrap_or_default()), + None, + ), + None => (String::new(), None), + }; + return Some(ToolCall { + index, + name, + args: args_str, + args_dict, + }); + } + } + + if let Some(paren) = value.find('(') { + if paren > 0 { + let name = value[..paren].trim().to_string(); + let args_part = &value[paren..]; + if args_part.starts_with('(') && args_part.ends_with(')') { + let inner = args_part[1..args_part.len() - 1].trim(); + if let Ok(serde_json::Value::Object(o)) = + serde_json::from_str::(inner) + { + let mut keys: Vec<&String> = o.keys().collect(); + keys.sort(); + let mut canon = serde_json::Map::new(); + for k in keys { + canon.insert(k.clone(), o[k].clone()); + } + return Some(ToolCall { + index, + name, + args: serde_json::to_string(&serde_json::Value::Object(canon.clone())) + .unwrap_or_default(), + args_dict: Some(canon), + }); + } + return Some(ToolCall { + index, + name, + args: inner.to_string(), + args_dict: None, + }); + } + return Some(ToolCall { + index, + name, + args: args_part.to_string(), + args_dict: None, + }); + } + } + + Some(ToolCall { + index, + name: value.trim().to_string(), + args: String::new(), + args_dict: None, + }) +} + +fn extract_tool_calls(messages: &[ShareGptMessage<'_>]) -> Vec { + let mut out = Vec::new(); + for (i, msg) in messages.iter().enumerate() { + if let Some(c) = parse_tool_call(i, msg) { + out.push(c); + } + } + out +} + +fn detect_retry(calls: &[ToolCall]) -> Vec<(usize, usize, String)> { + if calls.len() < RETRY_THRESHOLD { + return Vec::new(); + } + let mut patterns = Vec::new(); + let mut i = 0; + while i < calls.len() { + let current = &calls[i]; + let mut j = i + 1; + let mut run_length = 1; + while j < calls.len() { + if calls[j].name == current.name && calls[j].args_equal(current) { + run_length += 1; + j += 1; + } else { + break; + } + } + if run_length >= RETRY_THRESHOLD { + patterns.push((calls[i].index, calls[j - 1].index, current.name.clone())); + i = j; + } else { + i += 1; + } + } + patterns +} + +fn detect_parameter_drift(calls: &[ToolCall]) -> Vec<(usize, usize, String, usize)> { + if calls.len() < PARAMETER_DRIFT_THRESHOLD { + return Vec::new(); + } + let mut patterns = Vec::new(); + let mut i = 0; + while i < calls.len() { + let current_name = calls[i].name.clone(); + let mut seen_args: Vec = vec![calls[i].args.clone()]; + let mut unique_args = 1; + let mut j = i + 1; + while j < calls.len() { + if calls[j].name != current_name { + break; + } + if !seen_args.iter().any(|a| a == &calls[j].args) { + seen_args.push(calls[j].args.clone()); + unique_args += 1; + } + j += 1; + } + let run_length = j - i; + if run_length >= PARAMETER_DRIFT_THRESHOLD && unique_args >= 2 { + patterns.push(( + calls[i].index, + calls[j - 1].index, + current_name, + unique_args, + )); + i = j; + } else { + i += 1; + } + } + patterns +} + +fn detect_oscillation(calls: &[ToolCall]) -> Vec<(usize, usize, Vec, usize)> { + let min_calls = 2 * OSCILLATION_CYCLES_THRESHOLD; + if calls.len() < min_calls { + return Vec::new(); + } + let mut patterns = Vec::new(); + let mut i: usize = 0; + while i + min_calls <= calls.len() { + let max_pat_len = (5usize).min(calls.len() - i); + let mut found_for_i = false; + for pat_len in 2..=max_pat_len { + let pattern_names: Vec = + (0..pat_len).map(|k| calls[i + k].name.clone()).collect(); + let unique: std::collections::HashSet<&String> = pattern_names.iter().collect(); + if unique.len() < 2 { + continue; + } + let mut cycles = 1; + let mut pos = i + pat_len; + while pos + pat_len <= calls.len() { + let mut all_match = true; + for k in 0..pat_len { + if calls[pos + k].name != pattern_names[k] { + all_match = false; + break; + } + } + if all_match { + cycles += 1; + pos += pat_len; + } else { + break; + } + } + if cycles >= OSCILLATION_CYCLES_THRESHOLD { + let end_idx_in_calls = i + (cycles * pat_len) - 1; + patterns.push(( + calls[i].index, + calls[end_idx_in_calls].index, + pattern_names, + cycles, + )); + // Mirror Python: `i = end_idx + 1 - pattern_len`. We set `i` so that + // the next outer iteration begins after we account for overlap. + i = end_idx_in_calls + 1 - pat_len; + found_for_i = true; + break; + } + } + if !found_for_i { + i += 1; + } else { + // Match Python's `i = end_idx + 1 - pattern_len; break` then loop. + // We'll continue; the outer while re-checks i. + } + } + if patterns.len() > 1 { + patterns = deduplicate_patterns(patterns); + } + patterns +} + +fn deduplicate_patterns( + mut patterns: Vec<(usize, usize, Vec, usize)>, +) -> Vec<(usize, usize, Vec, usize)> { + if patterns.is_empty() { + return patterns; + } + patterns.sort_by(|a, b| { + let ord = a.0.cmp(&b.0); + if ord != std::cmp::Ordering::Equal { + ord + } else { + (b.1 - b.0).cmp(&(a.1 - a.0)) + } + }); + let mut result = Vec::new(); + let mut last_end: i64 = -1; + for p in patterns { + if (p.0 as i64) > last_end { + last_end = p.1 as i64; + result.push(p); + } + } + result +} + +pub fn analyze_loops(messages: &[ShareGptMessage<'_>]) -> SignalGroup { + let mut group = SignalGroup::new("loops"); + let calls = extract_tool_calls(messages); + if calls.len() < RETRY_THRESHOLD { + return group; + } + + let retries = detect_retry(&calls); + for (start_idx, end_idx, tool_name) in &retries { + let call_count = calls + .iter() + .filter(|c| *start_idx <= c.index && c.index <= *end_idx) + .count(); + group.add_signal( + SignalInstance::new( + SignalType::ExecutionLoopsRetry, + *start_idx, + format!( + "Tool '{}' called {} times with identical arguments", + tool_name, call_count + ), + ) + .with_confidence(0.95) + .with_metadata(json!({ + "tool_name": tool_name, + "start_index": start_idx, + "end_index": end_idx, + "call_count": call_count, + "loop_type": "retry", + })), + ); + } + + let drifts = detect_parameter_drift(&calls); + for (start_idx, end_idx, tool_name, variation_count) in &drifts { + let overlaps_retry = retries + .iter() + .any(|r| !(*end_idx < r.0 || *start_idx > r.1)); + if overlaps_retry { + continue; + } + let call_count = calls + .iter() + .filter(|c| *start_idx <= c.index && c.index <= *end_idx) + .count(); + group.add_signal( + SignalInstance::new( + SignalType::ExecutionLoopsParameterDrift, + *start_idx, + format!( + "Tool '{}' called {} times with {} different argument variations", + tool_name, call_count, variation_count + ), + ) + .with_confidence(0.85) + .with_metadata(json!({ + "tool_name": tool_name, + "start_index": start_idx, + "end_index": end_idx, + "call_count": call_count, + "variation_count": variation_count, + "loop_type": "parameter_drift", + })), + ); + } + + let oscillations = detect_oscillation(&calls); + for (start_idx, end_idx, tool_names, cycle_count) in &oscillations { + let pattern_str = tool_names.join(" \u{2192} "); + group.add_signal( + SignalInstance::new( + SignalType::ExecutionLoopsOscillation, + *start_idx, + format!( + "Oscillation pattern [{}] repeated {} times", + pattern_str, cycle_count + ), + ) + .with_confidence(0.9) + .with_metadata(json!({ + "pattern": tool_names, + "start_index": start_idx, + "end_index": end_idx, + "cycle_count": cycle_count, + "loop_type": "oscillation", + })), + ); + } + + group +} + +#[cfg(test)] +mod tests { + use super::*; + + fn fc(value: &str) -> ShareGptMessage<'_> { + ShareGptMessage { + from: "function_call", + value, + } + } + + #[test] + fn detects_retry_loop() { + let arg = r#"{"name":"check_status","arguments":{"id":"abc"}}"#; + let msgs = vec![fc(arg), fc(arg), fc(arg), fc(arg)]; + let g = analyze_loops(&msgs); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::ExecutionLoopsRetry))); + } + + #[test] + fn detects_parameter_drift() { + let msgs = vec![ + fc(r#"{"name":"search","arguments":{"q":"a"}}"#), + fc(r#"{"name":"search","arguments":{"q":"ab"}}"#), + fc(r#"{"name":"search","arguments":{"q":"abc"}}"#), + fc(r#"{"name":"search","arguments":{"q":"abcd"}}"#), + ]; + let g = analyze_loops(&msgs); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::ExecutionLoopsParameterDrift))); + } + + #[test] + fn detects_oscillation() { + let a = r#"{"name":"toolA","arguments":{}}"#; + let b = r#"{"name":"toolB","arguments":{}}"#; + let msgs = vec![fc(a), fc(b), fc(a), fc(b), fc(a), fc(b)]; + let g = analyze_loops(&msgs); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::ExecutionLoopsOscillation))); + } + + #[test] + fn no_signals_when_few_calls() { + let msgs = vec![fc(r#"{"name":"only_once","arguments":{}}"#)]; + let g = analyze_loops(&msgs); + assert!(g.signals.is_empty()); + } +} diff --git a/crates/brightstaff/src/signals/execution/mod.rs b/crates/brightstaff/src/signals/execution/mod.rs new file mode 100644 index 00000000..87dc28c4 --- /dev/null +++ b/crates/brightstaff/src/signals/execution/mod.rs @@ -0,0 +1,5 @@ +//! Execution signals: failure (agent-caused tool errors) and loops +//! (repetitive tool-call behavior). + +pub mod failure; +pub mod loops; diff --git a/crates/brightstaff/src/signals/interaction/constants.rs b/crates/brightstaff/src/signals/interaction/constants.rs new file mode 100644 index 00000000..2301395c --- /dev/null +++ b/crates/brightstaff/src/signals/interaction/constants.rs @@ -0,0 +1,193 @@ +//! Shared constants for the interaction layer detectors. +//! +//! Direct port of `signals/interaction/constants.py`. + +use std::collections::HashSet; +use std::sync::OnceLock; + +pub const POSITIVE_PREFIXES: &[&str] = &[ + "yes", + "yeah", + "yep", + "yup", + "sure", + "ok", + "okay", + "great", + "awesome", + "perfect", + "thanks", + "thank", + "wonderful", + "excellent", + "amazing", + "nice", + "good", + "cool", + "absolutely", + "definitely", + "please", +]; + +pub const CONFIRMATION_PREFIXES: &[&str] = &[ + "yes", + "yeah", + "yep", + "yup", + "correct", + "right", + "that's correct", + "thats correct", + "that's right", + "thats right", + "that is correct", + "that is right", +]; + +const STOPWORD_LIST: &[&str] = &[ + "a", + "about", + "above", + "after", + "again", + "against", + "all", + "am", + "an", + "and", + "any", + "are", + "as", + "at", + "be", + "because", + "been", + "before", + "being", + "below", + "between", + "both", + "but", + "by", + "can", + "could", + "did", + "do", + "does", + "doing", + "down", + "during", + "each", + "few", + "for", + "from", + "further", + "had", + "has", + "have", + "having", + "he", + "her", + "here", + "hers", + "herself", + "him", + "himself", + "his", + "how", + "i", + "if", + "in", + "into", + "is", + "it", + "its", + "itself", + "just", + "me", + "more", + "most", + "my", + "myself", + "no", + "nor", + "not", + "now", + "of", + "off", + "on", + "once", + "only", + "or", + "other", + "our", + "ours", + "ourselves", + "out", + "over", + "own", + "same", + "she", + "should", + "so", + "some", + "such", + "than", + "that", + "the", + "their", + "theirs", + "them", + "themselves", + "then", + "there", + "these", + "they", + "this", + "those", + "through", + "to", + "too", + "under", + "until", + "up", + "very", + "was", + "we", + "were", + "what", + "when", + "where", + "which", + "while", + "who", + "whom", + "why", + "with", + "would", + "you", + "your", + "yours", + "yourself", + "yourselves", +]; + +pub fn stopwords() -> &'static HashSet<&'static str> { + static SET: OnceLock> = OnceLock::new(); + SET.get_or_init(|| STOPWORD_LIST.iter().copied().collect()) +} + +/// Returns true if `text` (case-insensitive, trimmed) starts with any of the +/// given prefixes treated as **whole tokens or token sequences**. This matches +/// the Python's `text_lower.startswith(prefix)` plus the natural intent that +/// `"please"` shouldn't fire on `"pleased"`. +pub fn starts_with_prefix(text: &str, prefixes: &[&str]) -> bool { + let lowered = text.to_lowercase(); + let trimmed = lowered.trim_start(); + for prefix in prefixes { + if trimmed.starts_with(prefix) { + return true; + } + } + false +} diff --git a/crates/brightstaff/src/signals/interaction/disengagement.rs b/crates/brightstaff/src/signals/interaction/disengagement.rs new file mode 100644 index 00000000..28711d18 --- /dev/null +++ b/crates/brightstaff/src/signals/interaction/disengagement.rs @@ -0,0 +1,445 @@ +//! Disengagement signals: escalation, quit, negative stance. +//! +//! Direct port of `signals/interaction/disengagement.py`. + +use std::sync::OnceLock; + +use regex::Regex; +use serde_json::json; + +use super::constants::{starts_with_prefix, POSITIVE_PREFIXES}; +use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType}; +use crate::signals::text_processing::{normalize_patterns, NormalizedMessage, NormalizedPattern}; + +const ESCALATION_PATTERN_TEXTS: &[&str] = &[ + // Human requests + "speak to a human", + "talk to a human", + "connect me to a human", + "connect me with a human", + "transfer me to a human", + "get me a human", + "chat with a human", + // Person requests + "speak to a person", + "talk to a person", + "connect me to a person", + "connect me with a person", + "transfer me to a person", + "get me a person", + "chat with a person", + // Real person requests + "speak to a real person", + "talk to a real person", + "connect me to a real person", + "connect me with a real person", + "transfer me to a real person", + "get me a real person", + "chat with a real person", + // Actual person requests + "speak to an actual person", + "talk to an actual person", + "connect me to an actual person", + "connect me with an actual person", + "transfer me to an actual person", + "get me an actual person", + "chat with an actual person", + // Supervisor requests + "speak to a supervisor", + "talk to a supervisor", + "connect me to a supervisor", + "connect me with a supervisor", + "transfer me to a supervisor", + "get me a supervisor", + "chat with a supervisor", + // Manager requests + "speak to a manager", + "talk to a manager", + "connect me to a manager", + "connect me with a manager", + "transfer me to a manager", + "get me a manager", + "chat with a manager", + // Customer service requests + "speak to customer service", + "talk to customer service", + "connect me to customer service", + "connect me with customer service", + "transfer me to customer service", + "get me customer service", + "chat with customer service", + // Customer support requests + "speak to customer support", + "talk to customer support", + "connect me to customer support", + "connect me with customer support", + "transfer me to customer support", + "get me customer support", + "chat with customer support", + // Support requests + "speak to support", + "talk to support", + "connect me to support", + "connect me with support", + "transfer me to support", + "get me support", + "chat with support", + // Tech support requests + "speak to tech support", + "talk to tech support", + "connect me to tech support", + "connect me with tech support", + "transfer me to tech support", + "get me tech support", + "chat with tech support", + // Help desk requests + "speak to help desk", + "talk to help desk", + "connect me to help desk", + "connect me with help desk", + "transfer me to help desk", + "get me help desk", + "chat with help desk", + // Explicit escalation + "escalate this", +]; + +const QUIT_PATTERN_TEXTS: &[&str] = &[ + "i give up", + "i'm giving up", + "im giving up", + "i'm going to quit", + "i quit", + "forget it", + "forget this", + "screw it", + "screw this", + "don't bother trying", + "don't bother with this", + "don't bother with it", + "don't even bother", + "why bother", + "not worth it", + "this is hopeless", + "going elsewhere", + "try somewhere else", + "look elsewhere", +]; + +const NEGATIVE_STANCE_PATTERN_TEXTS: &[&str] = &[ + "this is useless", + "not helpful", + "doesn't help", + "not helping", + "you're not helping", + "youre not helping", + "this doesn't work", + "this doesnt work", + "this isn't working", + "this isnt working", + "still doesn't work", + "still doesnt work", + "still not working", + "still isn't working", + "still isnt working", + "waste of time", + "wasting my time", + "this is ridiculous", + "this is absurd", + "this is insane", + "this is stupid", + "this is dumb", + "this sucks", + "this is frustrating", + "not good enough", + "why can't you", + "why cant you", + "same issue", + "did that already", + "done that already", + "tried that already", + "already tried that", + "i've done that", + "ive done that", + "i've tried that", + "ive tried that", + "i'm disappointed", + "im disappointed", + "disappointed with you", + "disappointed in you", + "useless bot", + "dumb bot", + "stupid bot", +]; + +const AGENT_DIRECTED_PROFANITY_PATTERN_TEXTS: &[&str] = &[ + "this is bullshit", + "what bullshit", + "such bullshit", + "total bullshit", + "complete bullshit", + "this is crap", + "what crap", + "this is shit", + "what the hell is wrong with you", + "what the fuck is wrong with you", + "you're fucking useless", + "youre fucking useless", + "you are fucking useless", + "fucking useless", + "this bot is shit", + "this bot is crap", + "damn bot", + "fucking bot", + "stupid fucking", + "are you fucking kidding", + "wtf is wrong with you", + "wtf is this", + "ffs just", + "for fucks sake", + "for fuck's sake", + "what the f**k", + "what the f*ck", + "what the f***", + "that's bullsh*t", + "thats bullsh*t", + "that's bull***t", + "thats bull***t", + "that's bs", + "thats bs", + "this is bullsh*t", + "this is bull***t", + "this is bs", +]; + +fn escalation_patterns() -> &'static Vec { + static PATS: OnceLock> = OnceLock::new(); + PATS.get_or_init(|| normalize_patterns(ESCALATION_PATTERN_TEXTS)) +} + +fn quit_patterns() -> &'static Vec { + static PATS: OnceLock> = OnceLock::new(); + PATS.get_or_init(|| normalize_patterns(QUIT_PATTERN_TEXTS)) +} + +fn negative_stance_patterns() -> &'static Vec { + static PATS: OnceLock> = OnceLock::new(); + PATS.get_or_init(|| normalize_patterns(NEGATIVE_STANCE_PATTERN_TEXTS)) +} + +fn profanity_patterns() -> &'static Vec { + static PATS: OnceLock> = OnceLock::new(); + PATS.get_or_init(|| normalize_patterns(AGENT_DIRECTED_PROFANITY_PATTERN_TEXTS)) +} + +fn re_consecutive_q() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| Regex::new(r"\?{2,}").unwrap()) +} +fn re_consecutive_e() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| Regex::new(r"!{2,}").unwrap()) +} +fn re_mixed_punct() -> &'static Regex { + static R: OnceLock = OnceLock::new(); + R.get_or_init(|| Regex::new(r"[?!]{3,}").unwrap()) +} + +pub fn analyze_disengagement( + normalized_messages: &[(usize, &str, NormalizedMessage)], + char_ngram_threshold: f32, + token_cosine_threshold: f32, +) -> SignalGroup { + let mut group = SignalGroup::new("disengagement"); + + for (idx, role, norm_msg) in normalized_messages { + if *role != "human" { + continue; + } + + let text = &norm_msg.raw; + + // All-caps shouting check. + let alpha_chars: String = text.chars().filter(|c| c.is_alphabetic()).collect(); + if alpha_chars.chars().count() >= 10 { + let upper_count = alpha_chars.chars().filter(|c| c.is_uppercase()).count(); + let upper_ratio = upper_count as f32 / alpha_chars.chars().count() as f32; + if upper_ratio >= 0.8 { + let snippet: String = text.chars().take(50).collect(); + group.add_signal( + SignalInstance::new(SignalType::DisengagementNegativeStance, *idx, snippet) + .with_metadata(json!({ + "indicator_type": "all_caps", + "upper_ratio": upper_ratio, + })), + ); + } + } + + // Excessive consecutive punctuation. + let starts_with_positive = starts_with_prefix(text, POSITIVE_PREFIXES); + let cq = re_consecutive_q().find_iter(text).count(); + let ce = re_consecutive_e().find_iter(text).count(); + let mixed = re_mixed_punct().find_iter(text).count(); + if !starts_with_positive && (cq >= 1 || ce >= 1 || mixed >= 1) { + let snippet: String = text.chars().take(50).collect(); + group.add_signal( + SignalInstance::new(SignalType::DisengagementNegativeStance, *idx, snippet) + .with_metadata(json!({ + "indicator_type": "excessive_punctuation", + "consecutive_questions": cq, + "consecutive_exclamations": ce, + "mixed_punctuation": mixed, + })), + ); + } + + // Escalation patterns. + let mut found_escalation = false; + for pattern in escalation_patterns() { + if norm_msg.matches_normalized_pattern( + pattern, + char_ngram_threshold, + token_cosine_threshold, + ) { + group.add_signal( + SignalInstance::new( + SignalType::DisengagementEscalation, + *idx, + pattern.raw.clone(), + ) + .with_metadata(json!({"pattern_type": "escalation"})), + ); + found_escalation = true; + break; + } + } + + // Quit patterns (independent of escalation). + for pattern in quit_patterns() { + if norm_msg.matches_normalized_pattern( + pattern, + char_ngram_threshold, + token_cosine_threshold, + ) { + group.add_signal( + SignalInstance::new(SignalType::DisengagementQuit, *idx, pattern.raw.clone()) + .with_metadata(json!({"pattern_type": "quit"})), + ); + break; + } + } + + // Profanity (more specific) before generic negative stance. + let mut found_profanity = false; + for pattern in profanity_patterns() { + if norm_msg.matches_normalized_pattern( + pattern, + char_ngram_threshold, + token_cosine_threshold, + ) { + group.add_signal( + SignalInstance::new( + SignalType::DisengagementNegativeStance, + *idx, + pattern.raw.clone(), + ) + .with_metadata(json!({ + "indicator_type": "profanity", + "pattern": pattern.raw, + })), + ); + found_profanity = true; + break; + } + } + + if !found_escalation && !found_profanity { + for pattern in negative_stance_patterns() { + if norm_msg.matches_normalized_pattern( + pattern, + char_ngram_threshold, + token_cosine_threshold, + ) { + group.add_signal( + SignalInstance::new( + SignalType::DisengagementNegativeStance, + *idx, + pattern.raw.clone(), + ) + .with_metadata(json!({ + "indicator_type": "complaint", + "pattern": pattern.raw, + })), + ); + break; + } + } + } + } + + group +} + +#[cfg(test)] +mod tests { + use super::*; + + fn nm(s: &str) -> NormalizedMessage { + NormalizedMessage::from_text(s, 2000) + } + + #[test] + fn detects_human_escalation_request() { + let msgs = vec![( + 0usize, + "human", + nm("This is taking forever, get me a human"), + )]; + let g = analyze_disengagement(&msgs, 0.65, 0.6); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::DisengagementEscalation))); + } + + #[test] + fn detects_quit_intent() { + let msgs = vec![(0usize, "human", nm("Forget it, I give up"))]; + let g = analyze_disengagement(&msgs, 0.65, 0.6); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::DisengagementQuit))); + } + + #[test] + fn detects_negative_stance_complaint() { + let msgs = vec![(0usize, "human", nm("This is useless"))]; + let g = analyze_disengagement(&msgs, 0.65, 0.6); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::DisengagementNegativeStance))); + } + + #[test] + fn detects_excessive_punctuation_as_negative_stance() { + let msgs = vec![(0usize, "human", nm("WHY isn't this working???"))]; + let g = analyze_disengagement(&msgs, 0.65, 0.6); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::DisengagementNegativeStance))); + } + + #[test] + fn positive_excitement_is_not_disengagement() { + let msgs = vec![(0usize, "human", nm("Yes!! That's perfect!!!"))]; + let g = analyze_disengagement(&msgs, 0.65, 0.6); + assert!(g + .signals + .iter() + .all(|s| !matches!(s.signal_type, SignalType::DisengagementNegativeStance))); + } +} diff --git a/crates/brightstaff/src/signals/interaction/misalignment.rs b/crates/brightstaff/src/signals/interaction/misalignment.rs new file mode 100644 index 00000000..3dcf3ddd --- /dev/null +++ b/crates/brightstaff/src/signals/interaction/misalignment.rs @@ -0,0 +1,338 @@ +//! Misalignment signals: corrections, rephrases, clarifications. +//! +//! Direct port of `signals/interaction/misalignment.py`. + +use std::sync::OnceLock; + +use serde_json::json; + +use super::constants::{stopwords, CONFIRMATION_PREFIXES}; +use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType}; +use crate::signals::text_processing::{normalize_patterns, NormalizedMessage, NormalizedPattern}; + +const CORRECTION_PATTERN_TEXTS: &[&str] = &[ + "no, i meant", + "no i meant", + "no, i said", + "no i said", + "no, i asked", + "no i asked", + "nah, i meant", + "nope, i meant", + "not what i said", + "not what i asked", + "that's not what i said", + "that's not what i asked", + "that's not what i meant", + "thats not what i said", + "thats not what i asked", + "thats not what i meant", + "that's not what you", + "no that's not what i", + "no, that's not what i", + "you're not quite right", + "youre not quite right", + "you're not exactly right", + "youre not exactly right", + "you're wrong about", + "youre wrong about", + "i just said", + "i already said", + "i already told you", +]; + +const REPHRASE_PATTERN_TEXTS: &[&str] = &[ + "let me rephrase", + "let me explain again", + "what i'm trying to say", + "what i'm saying is", + "in other words", +]; + +const CLARIFICATION_PATTERN_TEXTS: &[&str] = &[ + "i don't understand", + "don't understand", + "not understanding", + "can't understand", + "don't get it", + "don't follow", + "i'm confused", + "so confused", + "makes no sense", + "doesn't make sense", + "not making sense", + "what do you mean", + "what does that mean", + "what are you saying", + "i'm lost", + "totally lost", + "lost me", + "no clue what you", + "no idea what you", + "no clue what that", + "no idea what that", + "come again", + "say that again", + "repeat that", + "trouble following", + "hard to follow", + "can't follow", +]; + +fn correction_patterns() -> &'static Vec { + static PATS: OnceLock> = OnceLock::new(); + PATS.get_or_init(|| normalize_patterns(CORRECTION_PATTERN_TEXTS)) +} + +fn rephrase_patterns() -> &'static Vec { + static PATS: OnceLock> = OnceLock::new(); + PATS.get_or_init(|| normalize_patterns(REPHRASE_PATTERN_TEXTS)) +} + +fn clarification_patterns() -> &'static Vec { + static PATS: OnceLock> = OnceLock::new(); + PATS.get_or_init(|| normalize_patterns(CLARIFICATION_PATTERN_TEXTS)) +} + +fn is_confirmation_message(text: &str) -> bool { + let lowered = text.to_lowercase(); + let trimmed = lowered.trim(); + CONFIRMATION_PREFIXES.iter().any(|p| trimmed.starts_with(p)) +} + +/// Detect whether two user messages appear to be rephrases of each other. +pub fn is_similar_rephrase( + norm_msg1: &NormalizedMessage, + norm_msg2: &NormalizedMessage, + overlap_threshold: f32, + min_meaningful_tokens: usize, + max_new_content_ratio: f32, +) -> bool { + if norm_msg1.tokens.len() < 3 || norm_msg2.tokens.len() < 3 { + return false; + } + if is_confirmation_message(&norm_msg1.raw) { + return false; + } + + let stops = stopwords(); + let tokens1: std::collections::HashSet<&str> = norm_msg1 + .tokens + .iter() + .filter(|t| !stops.contains(t.as_str())) + .map(|s| s.as_str()) + .collect(); + let tokens2: std::collections::HashSet<&str> = norm_msg2 + .tokens + .iter() + .filter(|t| !stops.contains(t.as_str())) + .map(|s| s.as_str()) + .collect(); + + if tokens1.len() < min_meaningful_tokens || tokens2.len() < min_meaningful_tokens { + return false; + } + + let new_tokens: std::collections::HashSet<&&str> = tokens1.difference(&tokens2).collect(); + let new_content_ratio = if tokens1.is_empty() { + 0.0 + } else { + new_tokens.len() as f32 / tokens1.len() as f32 + }; + if new_content_ratio > max_new_content_ratio { + return false; + } + + let intersection = tokens1.intersection(&tokens2).count(); + let min_size = tokens1.len().min(tokens2.len()); + if min_size == 0 { + return false; + } + let overlap_ratio = intersection as f32 / min_size as f32; + overlap_ratio >= overlap_threshold +} + +/// Analyze user messages for misalignment signals. +pub fn analyze_misalignment( + normalized_messages: &[(usize, &str, NormalizedMessage)], + char_ngram_threshold: f32, + token_cosine_threshold: f32, +) -> SignalGroup { + let mut group = SignalGroup::new("misalignment"); + + let mut prev_user_idx: Option = None; + let mut prev_user_msg: Option<&NormalizedMessage> = None; + + for (idx, role, norm_msg) in normalized_messages { + if *role != "human" { + continue; + } + + let mut found_in_turn = false; + + for pattern in correction_patterns() { + if norm_msg.matches_normalized_pattern( + pattern, + char_ngram_threshold, + token_cosine_threshold, + ) { + group.add_signal( + SignalInstance::new( + SignalType::MisalignmentCorrection, + *idx, + pattern.raw.clone(), + ) + .with_metadata(json!({"pattern_type": "correction"})), + ); + found_in_turn = true; + break; + } + } + + if found_in_turn { + prev_user_idx = Some(*idx); + prev_user_msg = Some(norm_msg); + continue; + } + + for pattern in rephrase_patterns() { + if norm_msg.matches_normalized_pattern( + pattern, + char_ngram_threshold, + token_cosine_threshold, + ) { + group.add_signal( + SignalInstance::new( + SignalType::MisalignmentRephrase, + *idx, + pattern.raw.clone(), + ) + .with_metadata(json!({"pattern_type": "rephrase"})), + ); + found_in_turn = true; + break; + } + } + + if found_in_turn { + prev_user_idx = Some(*idx); + prev_user_msg = Some(norm_msg); + continue; + } + + for pattern in clarification_patterns() { + if norm_msg.matches_normalized_pattern( + pattern, + char_ngram_threshold, + token_cosine_threshold, + ) { + group.add_signal( + SignalInstance::new( + SignalType::MisalignmentClarification, + *idx, + pattern.raw.clone(), + ) + .with_metadata(json!({"pattern_type": "clarification"})), + ); + found_in_turn = true; + break; + } + } + + if found_in_turn { + prev_user_idx = Some(*idx); + prev_user_msg = Some(norm_msg); + continue; + } + + // Semantic rephrase vs the previous user message (recent only). + if let (Some(prev_idx), Some(prev_msg)) = (prev_user_idx, prev_user_msg) { + let turns_between = idx.saturating_sub(prev_idx); + if turns_between <= 3 && is_similar_rephrase(norm_msg, prev_msg, 0.75, 4, 0.5) { + group.add_signal( + SignalInstance::new( + SignalType::MisalignmentRephrase, + *idx, + "[similar rephrase detected]", + ) + .with_confidence(0.8) + .with_metadata(json!({ + "pattern_type": "semantic_rephrase", + "compared_to": prev_idx, + })), + ); + } + } + + prev_user_idx = Some(*idx); + prev_user_msg = Some(norm_msg); + } + + group +} + +#[cfg(test)] +mod tests { + use super::*; + + fn nm(s: &str) -> NormalizedMessage { + NormalizedMessage::from_text(s, 2000) + } + + fn make(items: &[(&'static str, &str)]) -> Vec<(usize, &'static str, NormalizedMessage)> { + items + .iter() + .enumerate() + .map(|(i, (role, text))| (i, *role, nm(text))) + .collect() + } + + #[test] + fn detects_explicit_correction() { + let msgs = make(&[ + ("human", "Show me my orders"), + ("gpt", "Sure, here are your invoices"), + ("human", "No, I meant my recent orders"), + ]); + let g = analyze_misalignment(&msgs, 0.65, 0.6); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::MisalignmentCorrection))); + } + + #[test] + fn detects_rephrase_marker() { + let msgs = make(&[ + ("human", "Show me X"), + ("gpt", "Sure"), + ("human", "Let me rephrase: I want X grouped by date"), + ]); + let g = analyze_misalignment(&msgs, 0.65, 0.6); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::MisalignmentRephrase))); + } + + #[test] + fn detects_clarification_request() { + let msgs = make(&[ + ("human", "Run the report"), + ("gpt", "Foobar quux baz."), + ("human", "I don't understand what you mean"), + ]); + let g = analyze_misalignment(&msgs, 0.65, 0.6); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::MisalignmentClarification))); + } + + #[test] + fn confirmation_is_not_a_rephrase() { + let m1 = nm("Yes, that's correct, please proceed with the order"); + let m2 = nm("please proceed with the order for the same product"); + assert!(!is_similar_rephrase(&m1, &m2, 0.75, 4, 0.5)); + } +} diff --git a/crates/brightstaff/src/signals/interaction/mod.rs b/crates/brightstaff/src/signals/interaction/mod.rs new file mode 100644 index 00000000..b60a6748 --- /dev/null +++ b/crates/brightstaff/src/signals/interaction/mod.rs @@ -0,0 +1,10 @@ +//! Interaction signals: misalignment, stagnation, disengagement, satisfaction. +//! +//! These signals capture how the dialogue itself unfolds (semantic alignment, +//! progress, engagement, closure) independent of tool execution outcomes. + +pub mod constants; +pub mod disengagement; +pub mod misalignment; +pub mod satisfaction; +pub mod stagnation; diff --git a/crates/brightstaff/src/signals/interaction/satisfaction.rs b/crates/brightstaff/src/signals/interaction/satisfaction.rs new file mode 100644 index 00000000..ad719960 --- /dev/null +++ b/crates/brightstaff/src/signals/interaction/satisfaction.rs @@ -0,0 +1,177 @@ +//! Satisfaction signals: gratitude, confirmation, success. +//! +//! Direct port of `signals/interaction/satisfaction.py`. + +use std::sync::OnceLock; + +use serde_json::json; + +use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType}; +use crate::signals::text_processing::{normalize_patterns, NormalizedMessage, NormalizedPattern}; + +const GRATITUDE_PATTERN_TEXTS: &[&str] = &[ + "that's helpful", + "that helps", + "this helps", + "appreciate it", + "appreciate that", + "that's perfect", + "exactly what i needed", + "just what i needed", + "you're the best", + "you rock", + "you're awesome", + "you're amazing", + "you're great", +]; + +const CONFIRMATION_PATTERN_TEXTS: &[&str] = &[ + "that works", + "this works", + "that's great", + "that's amazing", + "this is great", + "that's awesome", + "love it", + "love this", + "love that", +]; + +const SUCCESS_PATTERN_TEXTS: &[&str] = &[ + "it worked", + "that worked", + "this worked", + "it's working", + "that's working", + "this is working", +]; + +fn gratitude_patterns() -> &'static Vec { + static PATS: OnceLock> = OnceLock::new(); + PATS.get_or_init(|| normalize_patterns(GRATITUDE_PATTERN_TEXTS)) +} + +fn confirmation_patterns() -> &'static Vec { + static PATS: OnceLock> = OnceLock::new(); + PATS.get_or_init(|| normalize_patterns(CONFIRMATION_PATTERN_TEXTS)) +} + +fn success_patterns() -> &'static Vec { + static PATS: OnceLock> = OnceLock::new(); + PATS.get_or_init(|| normalize_patterns(SUCCESS_PATTERN_TEXTS)) +} + +pub fn analyze_satisfaction( + normalized_messages: &[(usize, &str, NormalizedMessage)], + char_ngram_threshold: f32, + token_cosine_threshold: f32, +) -> SignalGroup { + let mut group = SignalGroup::new("satisfaction"); + + for (idx, role, norm_msg) in normalized_messages { + if *role != "human" { + continue; + } + + let mut found = false; + + for pattern in gratitude_patterns() { + if norm_msg.matches_normalized_pattern( + pattern, + char_ngram_threshold, + token_cosine_threshold, + ) { + group.add_signal( + SignalInstance::new( + SignalType::SatisfactionGratitude, + *idx, + pattern.raw.clone(), + ) + .with_metadata(json!({"pattern_type": "gratitude"})), + ); + found = true; + break; + } + } + if found { + continue; + } + + for pattern in confirmation_patterns() { + if norm_msg.matches_normalized_pattern( + pattern, + char_ngram_threshold, + token_cosine_threshold, + ) { + group.add_signal( + SignalInstance::new( + SignalType::SatisfactionConfirmation, + *idx, + pattern.raw.clone(), + ) + .with_metadata(json!({"pattern_type": "confirmation"})), + ); + found = true; + break; + } + } + if found { + continue; + } + + for pattern in success_patterns() { + if norm_msg.matches_normalized_pattern( + pattern, + char_ngram_threshold, + token_cosine_threshold, + ) { + group.add_signal( + SignalInstance::new(SignalType::SatisfactionSuccess, *idx, pattern.raw.clone()) + .with_metadata(json!({"pattern_type": "success"})), + ); + break; + } + } + } + + group +} + +#[cfg(test)] +mod tests { + use super::*; + + fn nm(s: &str) -> NormalizedMessage { + NormalizedMessage::from_text(s, 2000) + } + + #[test] + fn detects_gratitude() { + let msgs = vec![(0usize, "human", nm("That's perfect, appreciate it!"))]; + let g = analyze_satisfaction(&msgs, 0.65, 0.6); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::SatisfactionGratitude))); + } + + #[test] + fn detects_confirmation() { + let msgs = vec![(0usize, "human", nm("That works for me, thanks"))]; + let g = analyze_satisfaction(&msgs, 0.65, 0.6); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::SatisfactionConfirmation))); + } + + #[test] + fn detects_success() { + let msgs = vec![(0usize, "human", nm("Great, it worked!"))]; + let g = analyze_satisfaction(&msgs, 0.65, 0.6); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::SatisfactionSuccess))); + } +} diff --git a/crates/brightstaff/src/signals/interaction/stagnation.rs b/crates/brightstaff/src/signals/interaction/stagnation.rs new file mode 100644 index 00000000..d7d03c80 --- /dev/null +++ b/crates/brightstaff/src/signals/interaction/stagnation.rs @@ -0,0 +1,241 @@ +//! Stagnation signals: dragging (turn-count efficiency) and repetition. +//! +//! Direct port of `signals/interaction/stagnation.py`. + +use serde_json::json; + +use super::constants::{starts_with_prefix, POSITIVE_PREFIXES}; +use crate::signals::schemas::{SignalGroup, SignalInstance, SignalType, TurnMetrics}; +use crate::signals::text_processing::NormalizedMessage; + +/// Adapter row used by stagnation::dragging detector. Mirrors the ShareGPT +/// `{"from": role, "value": text}` shape used in the Python reference. +pub struct ShareGptMsg<'a> { + pub from: &'a str, +} + +pub fn analyze_dragging( + messages: &[ShareGptMsg<'_>], + baseline_turns: usize, + efficiency_threshold: f32, +) -> (SignalGroup, TurnMetrics) { + let mut group = SignalGroup::new("stagnation"); + + let mut user_turns: usize = 0; + let mut assistant_turns: usize = 0; + for m in messages { + match m.from { + "human" => user_turns += 1, + "gpt" => assistant_turns += 1, + _ => {} + } + } + + let total_turns = user_turns; + let efficiency_score: f32 = if total_turns == 0 || total_turns <= baseline_turns { + 1.0 + } else { + let excess = (total_turns - baseline_turns) as f32; + 1.0 / (1.0 + excess * 0.25) + }; + + let is_dragging = efficiency_score < efficiency_threshold; + let metrics = TurnMetrics { + total_turns, + user_turns, + assistant_turns, + is_dragging, + efficiency_score, + }; + + if is_dragging { + let last_idx = messages.len().saturating_sub(1); + group.add_signal( + SignalInstance::new( + SignalType::StagnationDragging, + last_idx, + format!( + "Conversation dragging: {} turns (efficiency: {:.2})", + total_turns, efficiency_score + ), + ) + .with_confidence(1.0 - efficiency_score) + .with_metadata(json!({ + "total_turns": total_turns, + "efficiency_score": efficiency_score, + "baseline_turns": baseline_turns, + })), + ); + } + + (group, metrics) +} + +pub fn analyze_repetition( + normalized_messages: &[(usize, &str, NormalizedMessage)], + lookback: usize, + exact_threshold: f32, + near_duplicate_threshold: f32, +) -> SignalGroup { + let mut group = SignalGroup::new("stagnation"); + + // We keep references into `normalized_messages`. Since `normalized_messages` + // is borrowed for the whole function, this avoids cloning. + let mut prev_human: Vec<(usize, &NormalizedMessage)> = Vec::new(); + let mut prev_gpt: Vec<(usize, &NormalizedMessage)> = Vec::new(); + + for (idx, role, norm_msg) in normalized_messages { + if *role != "human" && *role != "gpt" { + continue; + } + + // Skip human positive-prefix messages; they're naturally repetitive. + if *role == "human" && starts_with_prefix(&norm_msg.raw, POSITIVE_PREFIXES) { + prev_human.push((*idx, norm_msg)); + continue; + } + + if norm_msg.tokens.len() < 5 { + if *role == "human" { + prev_human.push((*idx, norm_msg)); + } else { + prev_gpt.push((*idx, norm_msg)); + } + continue; + } + + let prev = if *role == "human" { + &prev_human + } else { + &prev_gpt + }; + let start = prev.len().saturating_sub(lookback); + let mut matched = false; + for (prev_idx, prev_msg) in &prev[start..] { + if prev_msg.tokens.len() < 5 { + continue; + } + let similarity = norm_msg.ngram_similarity_with_message(prev_msg); + if similarity >= exact_threshold { + group.add_signal( + SignalInstance::new( + SignalType::StagnationRepetition, + *idx, + format!("Exact repetition with message {}", prev_idx), + ) + .with_confidence(similarity) + .with_metadata(json!({ + "repetition_type": "exact", + "compared_to": prev_idx, + "similarity": similarity, + "role": role, + })), + ); + matched = true; + break; + } else if similarity >= near_duplicate_threshold { + group.add_signal( + SignalInstance::new( + SignalType::StagnationRepetition, + *idx, + format!("Near-duplicate with message {}", prev_idx), + ) + .with_confidence(similarity) + .with_metadata(json!({ + "repetition_type": "near_duplicate", + "compared_to": prev_idx, + "similarity": similarity, + "role": role, + })), + ); + matched = true; + break; + } + } + let _ = matched; + + if *role == "human" { + prev_human.push((*idx, norm_msg)); + } else { + prev_gpt.push((*idx, norm_msg)); + } + } + + group +} + +/// Combined stagnation analyzer: dragging + repetition. +pub fn analyze_stagnation( + messages: &[ShareGptMsg<'_>], + normalized_messages: &[(usize, &str, NormalizedMessage)], + baseline_turns: usize, +) -> (SignalGroup, TurnMetrics) { + let (dragging_group, metrics) = analyze_dragging(messages, baseline_turns, 0.5); + let repetition_group = analyze_repetition(normalized_messages, 2, 0.95, 0.85); + + let mut combined = SignalGroup::new("stagnation"); + for s in dragging_group.signals.iter().cloned() { + combined.add_signal(s); + } + for s in repetition_group.signals.iter().cloned() { + combined.add_signal(s); + } + (combined, metrics) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn nm(s: &str) -> NormalizedMessage { + NormalizedMessage::from_text(s, 2000) + } + + #[test] + fn dragging_after_many_user_turns() { + let msgs: Vec<_> = (0..15) + .flat_map(|_| [ShareGptMsg { from: "human" }, ShareGptMsg { from: "gpt" }]) + .collect(); + let (g, m) = analyze_dragging(&msgs, 5, 0.5); + assert!(m.is_dragging); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::StagnationDragging))); + } + + #[test] + fn no_dragging_below_baseline() { + let msgs = vec![ + ShareGptMsg { from: "human" }, + ShareGptMsg { from: "gpt" }, + ShareGptMsg { from: "human" }, + ShareGptMsg { from: "gpt" }, + ]; + let (g, m) = analyze_dragging(&msgs, 5, 0.5); + assert!(!m.is_dragging); + assert!(g.signals.is_empty()); + } + + #[test] + fn detects_exact_repetition_in_user_messages() { + let n = vec![ + ( + 0usize, + "human", + nm("This widget is broken and needs repair right now"), + ), + (1, "gpt", nm("Sorry to hear that. Let me look into it.")), + ( + 2, + "human", + nm("This widget is broken and needs repair right now"), + ), + ]; + let g = analyze_repetition(&n, 2, 0.95, 0.85); + assert!(g + .signals + .iter() + .any(|s| matches!(s.signal_type, SignalType::StagnationRepetition))); + } +} diff --git a/crates/brightstaff/src/signals/mod.rs b/crates/brightstaff/src/signals/mod.rs index 83db943e..d96d3bf0 100644 --- a/crates/brightstaff/src/signals/mod.rs +++ b/crates/brightstaff/src/signals/mod.rs @@ -1,3 +1,26 @@ -mod analyzer; +//! Plano signals: behavioral quality indicators for agent interactions. +//! +//! This is a Rust port of the paper-aligned Python reference implementation at +//! `https://github.com/katanemo/signals` (or `/Users/shashmi/repos/signals`). +//! +//! Three layers of signals are detected from a conversation transcript: +//! +//! - **Interaction**: misalignment, stagnation, disengagement, satisfaction +//! - **Execution**: failure, loops +//! - **Environment**: exhaustion +//! +//! See `SignalType` for the full hierarchy. -pub use analyzer::*; +pub mod analyzer; +pub mod environment; +pub mod execution; +pub mod interaction; +pub mod otel; +pub mod schemas; +pub mod text_processing; + +pub use analyzer::{SignalAnalyzer, FLAG_MARKER}; +pub use schemas::{ + EnvironmentSignals, ExecutionSignals, InteractionQuality, InteractionSignals, SignalGroup, + SignalInstance, SignalLayer, SignalReport, SignalType, TurnMetrics, +}; diff --git a/crates/brightstaff/src/signals/otel.rs b/crates/brightstaff/src/signals/otel.rs new file mode 100644 index 00000000..deb3c1b5 --- /dev/null +++ b/crates/brightstaff/src/signals/otel.rs @@ -0,0 +1,241 @@ +//! Helpers for emitting `SignalReport` data to OpenTelemetry spans. +//! +//! Two sets of attributes are emitted: +//! +//! - **Legacy** keys under `signals.*` (e.g. `signals.frustration.count`), +//! computed from the new layered counts. Preserved for one release for +//! backward compatibility with existing dashboards. +//! - **New** layered keys (e.g. `signals.interaction.misalignment.count`), +//! one set of `count`/`severity` attributes per category, plus per-instance +//! span events named `signal.`. + +use opentelemetry::trace::SpanRef; +use opentelemetry::KeyValue; + +use crate::signals::schemas::{SignalGroup, SignalReport, SignalType}; + +/// Emit both legacy and layered OTel attributes/events for a `SignalReport`. +/// +/// Returns `true` if any "concerning" signal was found, mirroring the previous +/// behavior used to flag the span operation name. +pub fn emit_signals_to_span(span: &SpanRef<'_>, report: &SignalReport) -> bool { + emit_overall(span, report); + emit_layered_attributes(span, report); + emit_legacy_attributes(span, report); + emit_signal_events(span, report); + + is_concerning(report) +} + +fn emit_overall(span: &SpanRef<'_>, report: &SignalReport) { + span.set_attribute(KeyValue::new( + "signals.quality", + report.overall_quality.as_str().to_string(), + )); + span.set_attribute(KeyValue::new( + "signals.quality_score", + report.quality_score as f64, + )); + span.set_attribute(KeyValue::new( + "signals.turn_count", + report.turn_metrics.total_turns as i64, + )); + span.set_attribute(KeyValue::new( + "signals.efficiency_score", + report.turn_metrics.efficiency_score as f64, + )); +} + +fn emit_group(span: &SpanRef<'_>, prefix: &str, group: &SignalGroup) { + if group.count == 0 { + return; + } + span.set_attribute(KeyValue::new( + format!("{}.count", prefix), + group.count as i64, + )); + span.set_attribute(KeyValue::new( + format!("{}.severity", prefix), + group.severity as i64, + )); +} + +fn emit_layered_attributes(span: &SpanRef<'_>, report: &SignalReport) { + emit_group( + span, + "signals.interaction.misalignment", + &report.interaction.misalignment, + ); + emit_group( + span, + "signals.interaction.stagnation", + &report.interaction.stagnation, + ); + emit_group( + span, + "signals.interaction.disengagement", + &report.interaction.disengagement, + ); + emit_group( + span, + "signals.interaction.satisfaction", + &report.interaction.satisfaction, + ); + emit_group(span, "signals.execution.failure", &report.execution.failure); + emit_group(span, "signals.execution.loops", &report.execution.loops); + emit_group( + span, + "signals.environment.exhaustion", + &report.environment.exhaustion, + ); +} + +fn count_of(report: &SignalReport, t: SignalType) -> usize { + report.iter_signals().filter(|s| s.signal_type == t).count() +} + +/// Emit the legacy attribute keys consumed by existing dashboards. These are +/// derived from the new `SignalReport` so no detector contract is broken. +fn emit_legacy_attributes(span: &SpanRef<'_>, report: &SignalReport) { + use crate::tracing::signals as legacy; + + // signals.follow_up.repair.{count,ratio} - misalignment proxies repairs. + let repair_count = report.interaction.misalignment.count; + let user_turns = report.turn_metrics.user_turns.max(1) as f32; + if repair_count > 0 { + span.set_attribute(KeyValue::new(legacy::REPAIR_COUNT, repair_count as i64)); + let ratio = repair_count as f32 / user_turns; + span.set_attribute(KeyValue::new(legacy::REPAIR_RATIO, format!("{:.3}", ratio))); + } + + // signals.frustration.{count,severity} - disengagement.negative_stance is + // the closest legacy analog of "frustration". + let frustration_count = count_of(report, SignalType::DisengagementNegativeStance); + if frustration_count > 0 { + span.set_attribute(KeyValue::new( + legacy::FRUSTRATION_COUNT, + frustration_count as i64, + )); + let severity = match frustration_count { + 0 => 0, + 1..=2 => 1, + 3..=4 => 2, + _ => 3, + }; + span.set_attribute(KeyValue::new(legacy::FRUSTRATION_SEVERITY, severity as i64)); + } + + // signals.repetition.count - stagnation (repetition + dragging). + if report.interaction.stagnation.count > 0 { + span.set_attribute(KeyValue::new( + legacy::REPETITION_COUNT, + report.interaction.stagnation.count as i64, + )); + } + + // signals.escalation.requested - any escalation/quit signal. + let escalated = report.interaction.disengagement.signals.iter().any(|s| { + matches!( + s.signal_type, + SignalType::DisengagementEscalation | SignalType::DisengagementQuit + ) + }); + if escalated { + span.set_attribute(KeyValue::new(legacy::ESCALATION_REQUESTED, true)); + } + + // signals.positive_feedback.count - satisfaction signals. + if report.interaction.satisfaction.count > 0 { + span.set_attribute(KeyValue::new( + legacy::POSITIVE_FEEDBACK_COUNT, + report.interaction.satisfaction.count as i64, + )); + } +} + +fn emit_signal_events(span: &SpanRef<'_>, report: &SignalReport) { + for sig in report.iter_signals() { + let event_name = format!("signal.{}", sig.signal_type.as_str()); + let mut attrs: Vec = vec![ + KeyValue::new("signal.type", sig.signal_type.as_str().to_string()), + KeyValue::new("signal.message_index", sig.message_index as i64), + KeyValue::new("signal.confidence", sig.confidence as f64), + ]; + if !sig.snippet.is_empty() { + attrs.push(KeyValue::new("signal.snippet", sig.snippet.clone())); + } + if !sig.metadata.is_null() { + attrs.push(KeyValue::new("signal.metadata", sig.metadata.to_string())); + } + span.add_event(event_name, attrs); + } +} + +fn is_concerning(report: &SignalReport) -> bool { + use crate::signals::schemas::InteractionQuality; + if matches!( + report.overall_quality, + InteractionQuality::Poor | InteractionQuality::Severe + ) { + return true; + } + if report.interaction.disengagement.count > 0 { + return true; + } + if report.interaction.stagnation.count > 2 { + return true; + } + if report.execution.failure.count > 0 || report.execution.loops.count > 0 { + return true; + } + false +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::signals::schemas::{ + EnvironmentSignals, ExecutionSignals, InteractionQuality, InteractionSignals, SignalGroup, + SignalInstance, SignalReport, SignalType, TurnMetrics, + }; + + fn report_with_escalation() -> SignalReport { + let mut diseng = SignalGroup::new("disengagement"); + diseng.add_signal(SignalInstance::new( + SignalType::DisengagementEscalation, + 3, + "get me a human", + )); + SignalReport { + interaction: InteractionSignals { + disengagement: diseng, + ..InteractionSignals::default() + }, + execution: ExecutionSignals::default(), + environment: EnvironmentSignals::default(), + overall_quality: InteractionQuality::Severe, + quality_score: 0.0, + turn_metrics: TurnMetrics { + total_turns: 3, + user_turns: 2, + assistant_turns: 1, + is_dragging: false, + efficiency_score: 1.0, + }, + summary: String::new(), + } + } + + #[test] + fn is_concerning_flags_disengagement() { + let r = report_with_escalation(); + assert!(is_concerning(&r)); + } + + #[test] + fn count_of_returns_per_type_count() { + let r = report_with_escalation(); + assert_eq!(count_of(&r, SignalType::DisengagementEscalation), 1); + assert_eq!(count_of(&r, SignalType::DisengagementNegativeStance), 0); + } +} diff --git a/crates/brightstaff/src/signals/schemas.rs b/crates/brightstaff/src/signals/schemas.rs new file mode 100644 index 00000000..5ff8b5df --- /dev/null +++ b/crates/brightstaff/src/signals/schemas.rs @@ -0,0 +1,420 @@ +//! Data shapes for the signal analyzer. +//! +//! Mirrors `signals/schemas.py` from the reference implementation. Where the +//! Python library exposes a `Dict[str, SignalGroup]` partitioned by category, +//! the Rust port uses strongly-typed sub-structs (`InteractionSignals`, +//! `ExecutionSignals`, `EnvironmentSignals`) for the same partitioning. + +use serde::{Deserialize, Serialize}; + +/// Hierarchical signal type. The 20 leaf variants mirror the paper taxonomy +/// and the Python reference's `SignalType` string enum. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub enum SignalType { + // Interaction > Misalignment + MisalignmentCorrection, + MisalignmentRephrase, + MisalignmentClarification, + + // Interaction > Stagnation + StagnationDragging, + StagnationRepetition, + + // Interaction > Disengagement + DisengagementEscalation, + DisengagementQuit, + DisengagementNegativeStance, + + // Interaction > Satisfaction + SatisfactionGratitude, + SatisfactionConfirmation, + SatisfactionSuccess, + + // Execution > Failure + ExecutionFailureInvalidArgs, + ExecutionFailureBadQuery, + ExecutionFailureToolNotFound, + ExecutionFailureAuthMisuse, + ExecutionFailureStateError, + + // Execution > Loops + ExecutionLoopsRetry, + ExecutionLoopsParameterDrift, + ExecutionLoopsOscillation, + + // Environment > Exhaustion + EnvironmentExhaustionApiError, + EnvironmentExhaustionTimeout, + EnvironmentExhaustionRateLimit, + EnvironmentExhaustionNetwork, + EnvironmentExhaustionMalformed, + EnvironmentExhaustionContextOverflow, +} + +impl SignalType { + /// Dotted hierarchical string identifier, e.g. + /// `"interaction.misalignment.correction"`. Matches the Python reference's + /// `SignalType` enum *value* strings byte-for-byte. + pub fn as_str(&self) -> &'static str { + match self { + SignalType::MisalignmentCorrection => "interaction.misalignment.correction", + SignalType::MisalignmentRephrase => "interaction.misalignment.rephrase", + SignalType::MisalignmentClarification => "interaction.misalignment.clarification", + SignalType::StagnationDragging => "interaction.stagnation.dragging", + SignalType::StagnationRepetition => "interaction.stagnation.repetition", + SignalType::DisengagementEscalation => "interaction.disengagement.escalation", + SignalType::DisengagementQuit => "interaction.disengagement.quit", + SignalType::DisengagementNegativeStance => "interaction.disengagement.negative_stance", + SignalType::SatisfactionGratitude => "interaction.satisfaction.gratitude", + SignalType::SatisfactionConfirmation => "interaction.satisfaction.confirmation", + SignalType::SatisfactionSuccess => "interaction.satisfaction.success", + SignalType::ExecutionFailureInvalidArgs => "execution.failure.invalid_args", + SignalType::ExecutionFailureBadQuery => "execution.failure.bad_query", + SignalType::ExecutionFailureToolNotFound => "execution.failure.tool_not_found", + SignalType::ExecutionFailureAuthMisuse => "execution.failure.auth_misuse", + SignalType::ExecutionFailureStateError => "execution.failure.state_error", + SignalType::ExecutionLoopsRetry => "execution.loops.retry", + SignalType::ExecutionLoopsParameterDrift => "execution.loops.parameter_drift", + SignalType::ExecutionLoopsOscillation => "execution.loops.oscillation", + SignalType::EnvironmentExhaustionApiError => "environment.exhaustion.api_error", + SignalType::EnvironmentExhaustionTimeout => "environment.exhaustion.timeout", + SignalType::EnvironmentExhaustionRateLimit => "environment.exhaustion.rate_limit", + SignalType::EnvironmentExhaustionNetwork => "environment.exhaustion.network", + SignalType::EnvironmentExhaustionMalformed => { + "environment.exhaustion.malformed_response" + } + SignalType::EnvironmentExhaustionContextOverflow => { + "environment.exhaustion.context_overflow" + } + } + } + + pub fn layer(&self) -> SignalLayer { + match self { + SignalType::MisalignmentCorrection + | SignalType::MisalignmentRephrase + | SignalType::MisalignmentClarification + | SignalType::StagnationDragging + | SignalType::StagnationRepetition + | SignalType::DisengagementEscalation + | SignalType::DisengagementQuit + | SignalType::DisengagementNegativeStance + | SignalType::SatisfactionGratitude + | SignalType::SatisfactionConfirmation + | SignalType::SatisfactionSuccess => SignalLayer::Interaction, + SignalType::ExecutionFailureInvalidArgs + | SignalType::ExecutionFailureBadQuery + | SignalType::ExecutionFailureToolNotFound + | SignalType::ExecutionFailureAuthMisuse + | SignalType::ExecutionFailureStateError + | SignalType::ExecutionLoopsRetry + | SignalType::ExecutionLoopsParameterDrift + | SignalType::ExecutionLoopsOscillation => SignalLayer::Execution, + SignalType::EnvironmentExhaustionApiError + | SignalType::EnvironmentExhaustionTimeout + | SignalType::EnvironmentExhaustionRateLimit + | SignalType::EnvironmentExhaustionNetwork + | SignalType::EnvironmentExhaustionMalformed + | SignalType::EnvironmentExhaustionContextOverflow => SignalLayer::Environment, + } + } + + /// Category name within the layer (e.g. `"misalignment"`, `"failure"`). + pub fn category(&self) -> &'static str { + // Strip the layer prefix and take everything before the next dot. + let s = self.as_str(); + let after_layer = s.split_once('.').map(|(_, rest)| rest).unwrap_or(s); + after_layer + .split_once('.') + .map(|(c, _)| c) + .unwrap_or(after_layer) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub enum SignalLayer { + Interaction, + Execution, + Environment, +} + +impl SignalLayer { + pub fn as_str(&self) -> &'static str { + match self { + SignalLayer::Interaction => "interaction", + SignalLayer::Execution => "execution", + SignalLayer::Environment => "environment", + } + } +} + +/// Overall quality assessment for an agent interaction session. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum InteractionQuality { + Excellent, + Good, + Neutral, + Poor, + Severe, +} + +impl InteractionQuality { + pub fn as_str(&self) -> &'static str { + match self { + InteractionQuality::Excellent => "excellent", + InteractionQuality::Good => "good", + InteractionQuality::Neutral => "neutral", + InteractionQuality::Poor => "poor", + InteractionQuality::Severe => "severe", + } + } +} + +/// A single detected signal instance. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SignalInstance { + pub signal_type: SignalType, + /// Absolute index into the original conversation `Vec`. + pub message_index: usize, + pub snippet: String, + pub confidence: f32, + /// Free-form metadata payload mirroring the Python `Dict[str, Any]`. + /// Stored as a JSON object so we can faithfully reproduce the reference's + /// flexible per-detector metadata. + #[serde(default)] + pub metadata: serde_json::Value, +} + +impl SignalInstance { + pub fn new(signal_type: SignalType, message_index: usize, snippet: impl Into) -> Self { + Self { + signal_type, + message_index, + snippet: snippet.into(), + confidence: 1.0, + metadata: serde_json::Value::Object(serde_json::Map::new()), + } + } + + pub fn with_confidence(mut self, c: f32) -> Self { + self.confidence = c; + self + } + + pub fn with_metadata(mut self, m: serde_json::Value) -> Self { + self.metadata = m; + self + } +} + +/// Aggregated signals for a specific category. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SignalGroup { + pub category: String, + pub count: usize, + pub signals: Vec, + /// Severity level (0-3: none, mild, moderate, severe). + pub severity: u8, +} + +impl SignalGroup { + pub fn new(category: impl Into) -> Self { + Self { + category: category.into(), + count: 0, + signals: Vec::new(), + severity: 0, + } + } + + pub fn add_signal(&mut self, signal: SignalInstance) { + self.signals.push(signal); + self.count = self.signals.len(); + self.update_severity(); + } + + fn update_severity(&mut self) { + self.severity = match self.count { + 0 => 0, + 1..=2 => 1, + 3..=4 => 2, + _ => 3, + }; + } +} + +/// Turn count and efficiency metrics, used by stagnation.dragging. +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct TurnMetrics { + pub total_turns: usize, + pub user_turns: usize, + pub assistant_turns: usize, + pub is_dragging: bool, + pub efficiency_score: f32, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct InteractionSignals { + pub misalignment: SignalGroup, + pub stagnation: SignalGroup, + pub disengagement: SignalGroup, + pub satisfaction: SignalGroup, +} + +impl Default for InteractionSignals { + fn default() -> Self { + Self { + misalignment: SignalGroup::new("misalignment"), + stagnation: SignalGroup::new("stagnation"), + disengagement: SignalGroup::new("disengagement"), + satisfaction: SignalGroup::new("satisfaction"), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ExecutionSignals { + pub failure: SignalGroup, + pub loops: SignalGroup, +} + +impl Default for ExecutionSignals { + fn default() -> Self { + Self { + failure: SignalGroup::new("failure"), + loops: SignalGroup::new("loops"), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EnvironmentSignals { + pub exhaustion: SignalGroup, +} + +impl Default for EnvironmentSignals { + fn default() -> Self { + Self { + exhaustion: SignalGroup::new("exhaustion"), + } + } +} + +/// Complete signal analysis report for a conversation. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SignalReport { + pub interaction: InteractionSignals, + pub execution: ExecutionSignals, + pub environment: EnvironmentSignals, + pub overall_quality: InteractionQuality, + pub quality_score: f32, + pub turn_metrics: TurnMetrics, + pub summary: String, +} + +impl Default for SignalReport { + fn default() -> Self { + Self { + interaction: InteractionSignals::default(), + execution: ExecutionSignals::default(), + environment: EnvironmentSignals::default(), + overall_quality: InteractionQuality::Neutral, + quality_score: 50.0, + turn_metrics: TurnMetrics::default(), + summary: String::new(), + } + } +} + +impl SignalReport { + /// Iterate over every `SignalInstance` across all layers and groups. + pub fn iter_signals(&self) -> impl Iterator { + self.interaction + .misalignment + .signals + .iter() + .chain(self.interaction.stagnation.signals.iter()) + .chain(self.interaction.disengagement.signals.iter()) + .chain(self.interaction.satisfaction.signals.iter()) + .chain(self.execution.failure.signals.iter()) + .chain(self.execution.loops.signals.iter()) + .chain(self.environment.exhaustion.signals.iter()) + } + + pub fn has_signal_type(&self, t: SignalType) -> bool { + self.iter_signals().any(|s| s.signal_type == t) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn signal_type_strings_match_paper_taxonomy() { + assert_eq!( + SignalType::MisalignmentCorrection.as_str(), + "interaction.misalignment.correction" + ); + assert_eq!( + SignalType::ExecutionFailureInvalidArgs.as_str(), + "execution.failure.invalid_args" + ); + assert_eq!( + SignalType::EnvironmentExhaustionMalformed.as_str(), + "environment.exhaustion.malformed_response" + ); + } + + #[test] + fn signal_type_layer_and_category() { + assert_eq!( + SignalType::MisalignmentRephrase.layer(), + SignalLayer::Interaction + ); + assert_eq!(SignalType::MisalignmentRephrase.category(), "misalignment"); + assert_eq!( + SignalType::ExecutionLoopsRetry.layer(), + SignalLayer::Execution + ); + assert_eq!(SignalType::ExecutionLoopsRetry.category(), "loops"); + assert_eq!( + SignalType::EnvironmentExhaustionTimeout.layer(), + SignalLayer::Environment + ); + assert_eq!( + SignalType::EnvironmentExhaustionTimeout.category(), + "exhaustion" + ); + } + + #[test] + fn signal_group_severity_buckets_match_python() { + let mut g = SignalGroup::new("misalignment"); + assert_eq!(g.severity, 0); + for n in 1..=2 { + g.add_signal(SignalInstance::new( + SignalType::MisalignmentCorrection, + n, + "x", + )); + } + assert_eq!(g.severity, 1); + for n in 3..=4 { + g.add_signal(SignalInstance::new( + SignalType::MisalignmentCorrection, + n, + "x", + )); + } + assert_eq!(g.severity, 2); + for n in 5..=6 { + g.add_signal(SignalInstance::new( + SignalType::MisalignmentCorrection, + n, + "x", + )); + } + assert_eq!(g.severity, 3); + } +} diff --git a/crates/brightstaff/src/signals/text_processing.rs b/crates/brightstaff/src/signals/text_processing.rs new file mode 100644 index 00000000..a1d463cc --- /dev/null +++ b/crates/brightstaff/src/signals/text_processing.rs @@ -0,0 +1,401 @@ +//! Text normalization and similarity primitives. +//! +//! Direct Rust port of `signals/text_processing.py` from the reference. The +//! shapes (`NormalizedMessage`, `NormalizedPattern`) and similarity formulas +//! match the Python implementation exactly so that pattern matching produces +//! the same results on the same inputs. + +use std::collections::{HashMap, HashSet}; + +/// Size of character n-grams used for fuzzy similarity (3 = trigrams). +pub const NGRAM_SIZE: usize = 3; + +const PUNCT_TRIM: &[char] = &[ + '!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', + '>', '?', '@', '[', '\\', ']', '^', '_', '`', '{', '|', '}', '~', +]; + +/// Pre-processed message with normalized text and tokens for efficient matching. +#[derive(Debug, Clone, Default)] +pub struct NormalizedMessage { + pub raw: String, + pub tokens: Vec, + pub token_set: HashSet, + pub bigram_set: HashSet, + pub char_ngram_set: HashSet, + pub token_frequency: HashMap, +} + +impl NormalizedMessage { + /// Create a normalized message from raw text. Mirrors + /// `NormalizedMessage.from_text` in the reference, including the + /// head-20%/tail-80% truncation strategy when text exceeds `max_length`. + pub fn from_text(text: &str, max_length: usize) -> Self { + let char_count = text.chars().count(); + + let raw: String = if char_count <= max_length { + text.to_string() + } else { + let head_len = max_length / 5; + // Reserve one char for the joining space. + let tail_len = max_length.saturating_sub(head_len + 1); + let head: String = text.chars().take(head_len).collect(); + let tail: String = text + .chars() + .skip(char_count.saturating_sub(tail_len)) + .collect(); + format!("{} {}", head, tail) + }; + + // Normalize unicode punctuation to ASCII equivalents. + let normalized_unicode = raw + .replace(['\u{2019}', '\u{2018}'], "'") + .replace(['\u{201c}', '\u{201d}'], "\"") + .replace(['\u{2013}', '\u{2014}'], "-"); + + // Lowercase + collapse whitespace (matches Python's `" ".join(s.split())`). + let normalized: String = normalized_unicode + .to_lowercase() + .split_whitespace() + .collect::>() + .join(" "); + + let mut tokens: Vec = Vec::new(); + for word in normalized.split_whitespace() { + let stripped: String = word.trim_matches(PUNCT_TRIM).to_string(); + if !stripped.is_empty() { + tokens.push(stripped); + } + } + + let token_set: HashSet = tokens.iter().cloned().collect(); + + let mut bigram_set: HashSet = HashSet::new(); + for i in 0..tokens.len().saturating_sub(1) { + bigram_set.insert(format!("{} {}", tokens[i], tokens[i + 1])); + } + + let tokens_text = tokens.join(" "); + let char_ngram_set = char_ngrams(&tokens_text, NGRAM_SIZE); + + let mut token_frequency: HashMap = HashMap::new(); + for t in &tokens { + *token_frequency.entry(t.clone()).or_insert(0) += 1; + } + + Self { + raw, + tokens, + token_set, + bigram_set, + char_ngram_set, + token_frequency, + } + } + + pub fn contains_token(&self, token: &str) -> bool { + self.token_set.contains(token) + } + + pub fn contains_phrase(&self, phrase: &str) -> bool { + let phrase_tokens: Vec<&str> = phrase.split_whitespace().collect(); + if phrase_tokens.is_empty() { + return false; + } + if phrase_tokens.len() == 1 { + return self.contains_token(phrase_tokens[0]); + } + if phrase_tokens.len() > self.tokens.len() { + return false; + } + let n = phrase_tokens.len(); + for i in 0..=self.tokens.len() - n { + if self.tokens[i..i + n] + .iter() + .zip(phrase_tokens.iter()) + .all(|(a, b)| a == b) + { + return true; + } + } + false + } + + /// Character n-gram (Jaccard) similarity vs another normalized message. + pub fn ngram_similarity_with_message(&self, other: &NormalizedMessage) -> f32 { + jaccard(&self.char_ngram_set, &other.char_ngram_set) + } + + /// Character n-gram (Jaccard) similarity vs a raw pattern string. + pub fn ngram_similarity_with_pattern(&self, pattern: &str) -> f32 { + let normalized = strip_non_word_chars(&pattern.to_lowercase()); + let pattern_ngrams = char_ngrams(&normalized, NGRAM_SIZE); + jaccard(&self.char_ngram_set, &pattern_ngrams) + } + + /// Fraction of pattern's ngrams contained in this message's ngram set. + pub fn char_ngram_containment(&self, pattern: &str) -> f32 { + let normalized = strip_non_word_chars(&pattern.to_lowercase()); + let pattern_ngrams = char_ngrams(&normalized, NGRAM_SIZE); + if pattern_ngrams.is_empty() { + return 0.0; + } + let contained = pattern_ngrams + .iter() + .filter(|ng| self.char_ngram_set.contains(*ng)) + .count(); + contained as f32 / pattern_ngrams.len() as f32 + } + + /// Token-frequency cosine similarity vs a raw pattern string. + pub fn token_cosine_similarity(&self, pattern: &str) -> f32 { + let mut pattern_freq: HashMap = HashMap::new(); + for word in pattern.to_lowercase().split_whitespace() { + let stripped = word.trim_matches(PUNCT_TRIM); + if !stripped.is_empty() { + *pattern_freq.entry(stripped.to_string()).or_insert(0) += 1; + } + } + cosine_freq(&self.token_frequency, &pattern_freq) + } + + /// Layered match against a pre-normalized pattern. Mirrors + /// `matches_normalized_pattern` from the reference: exact phrase -> + /// char-ngram Jaccard -> token cosine. + pub fn matches_normalized_pattern( + &self, + pattern: &NormalizedPattern, + char_ngram_threshold: f32, + token_cosine_threshold: f32, + ) -> bool { + // Layer 0: exact phrase match using pre-tokenized message. + let plen = pattern.tokens.len(); + let slen = self.tokens.len(); + if plen > 0 && plen <= slen { + for i in 0..=slen - plen { + if self.tokens[i..i + plen] == pattern.tokens[..] { + return true; + } + } + } + + // Layer 1: character n-gram Jaccard similarity. + if !self.char_ngram_set.is_empty() && !pattern.char_ngram_set.is_empty() { + let inter = self + .char_ngram_set + .intersection(&pattern.char_ngram_set) + .count(); + let union = self.char_ngram_set.union(&pattern.char_ngram_set).count(); + if union > 0 { + let sim = inter as f32 / union as f32; + if sim >= char_ngram_threshold { + return true; + } + } + } + + // Layer 2: token frequency cosine similarity. + if !self.token_frequency.is_empty() && !pattern.token_frequency.is_empty() { + let sim = cosine_freq(&self.token_frequency, &pattern.token_frequency); + if sim >= token_cosine_threshold { + return true; + } + } + + false + } +} + +/// Pre-processed pattern with normalized text and pre-computed n-grams/tokens. +#[derive(Debug, Clone, Default)] +pub struct NormalizedPattern { + pub raw: String, + pub tokens: Vec, + pub char_ngram_set: HashSet, + pub token_frequency: HashMap, +} + +impl NormalizedPattern { + pub fn from_text(pattern: &str) -> Self { + let normalized = pattern + .to_lowercase() + .replace(['\u{2019}', '\u{2018}'], "'") + .replace(['\u{201c}', '\u{201d}'], "\"") + .replace(['\u{2013}', '\u{2014}'], "-"); + let normalized: String = normalized.split_whitespace().collect::>().join(" "); + + // Tokenize the same way as NormalizedMessage (trim boundary punctuation, + // keep internal punctuation). + let mut tokens: Vec = Vec::new(); + for word in normalized.split_whitespace() { + let stripped = word.trim_matches(PUNCT_TRIM); + if !stripped.is_empty() { + tokens.push(stripped.to_string()); + } + } + + // For ngrams + cosine, strip ALL punctuation (matches Python's + // `re.sub(r"[^\w\s]", "", normalized)`). + let normalized_for_ngrams = strip_non_word_chars(&normalized); + let char_ngram_set = char_ngrams(&normalized_for_ngrams, NGRAM_SIZE); + + let tokens_no_punct: Vec<&str> = normalized_for_ngrams.split_whitespace().collect(); + let mut token_frequency: HashMap = HashMap::new(); + for t in &tokens_no_punct { + *token_frequency.entry((*t).to_string()).or_insert(0) += 1; + } + + Self { + raw: pattern.to_string(), + tokens, + char_ngram_set, + token_frequency, + } + } +} + +/// Convenience: normalize a list of raw pattern strings into `NormalizedPattern`s. +pub fn normalize_patterns(patterns: &[&str]) -> Vec { + patterns + .iter() + .map(|p| NormalizedPattern::from_text(p)) + .collect() +} + +// --------------------------------------------------------------------------- +// Similarity primitives +// --------------------------------------------------------------------------- + +fn char_ngrams(s: &str, n: usize) -> HashSet { + // Python iterates by character index, not byte; mirror that with .chars(). + let chars: Vec = s.chars().collect(); + let mut out: HashSet = HashSet::new(); + if chars.len() < n { + return out; + } + for i in 0..=chars.len() - n { + out.insert(chars[i..i + n].iter().collect()); + } + out +} + +fn jaccard(a: &HashSet, b: &HashSet) -> f32 { + if a.is_empty() && b.is_empty() { + return 1.0; + } + if a.is_empty() || b.is_empty() { + return 0.0; + } + let inter = a.intersection(b).count(); + let union = a.union(b).count(); + if union == 0 { + 0.0 + } else { + inter as f32 / union as f32 + } +} + +fn cosine_freq(a: &HashMap, b: &HashMap) -> f32 { + if a.is_empty() && b.is_empty() { + return 1.0; + } + if a.is_empty() || b.is_empty() { + return 0.0; + } + let mut dot: f64 = 0.0; + let mut n1_sq: f64 = 0.0; + let mut n2_sq: f64 = 0.0; + for (token, &freq2) in b { + let freq1 = *a.get(token).unwrap_or(&0); + dot += (freq1 * freq2) as f64; + n2_sq += (freq2 * freq2) as f64; + } + for &freq1 in a.values() { + n1_sq += (freq1 * freq1) as f64; + } + let n1 = n1_sq.sqrt(); + let n2 = n2_sq.sqrt(); + if n1 == 0.0 || n2 == 0.0 { + 0.0 + } else { + (dot / (n1 * n2)) as f32 + } +} + +/// Python equivalent: `re.sub(r"[^\w\s]", "", text)` followed by whitespace +/// collapse. Python's `\w` is `[A-Za-z0-9_]` plus unicode word characters; we +/// use Rust's `char::is_alphanumeric()` plus `_` for an equivalent definition. +fn strip_non_word_chars(text: &str) -> String { + let mut out = String::with_capacity(text.len()); + for c in text.chars() { + if c.is_alphanumeric() || c == '_' || c.is_whitespace() { + out.push(c); + } + } + out.split_whitespace().collect::>().join(" ") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn normalize_lowercases_and_strips_punctuation() { + let m = NormalizedMessage::from_text("Hello, World!", 2000); + assert_eq!(m.tokens, vec!["hello".to_string(), "world".to_string()]); + } + + #[test] + fn normalizes_smart_quotes() { + let m = NormalizedMessage::from_text("don\u{2019}t", 2000); + assert!(m.tokens.contains(&"don't".to_string())); + } + + #[test] + fn truncates_long_text_with_head_tail() { + let long = "a".repeat(3000); + let m = NormalizedMessage::from_text(&long, 2000); + // raw should be ~ 2000 chars (head + space + tail) + assert!(m.raw.chars().count() <= 2001); + assert!(m.raw.starts_with("aa")); + assert!(m.raw.ends_with("aa")); + } + + #[test] + fn contains_phrase_matches_consecutive_tokens() { + let m = NormalizedMessage::from_text("I think this is great work", 2000); + assert!(m.contains_phrase("this is great")); + assert!(!m.contains_phrase("great this")); + } + + #[test] + fn matches_pattern_via_exact_phrase() { + let m = NormalizedMessage::from_text("No, I meant the second one", 2000); + let p = NormalizedPattern::from_text("no i meant"); + assert!(m.matches_normalized_pattern(&p, 0.65, 0.6)); + } + + #[test] + fn matches_pattern_via_char_ngram_fuzziness() { + // Typo in "meant" -> "ment" so layer 0 (exact phrase) cannot match, + // forcing the matcher to fall back to layer 1 (char n-gram Jaccard). + let m = NormalizedMessage::from_text("No I ment", 2000); + let p = NormalizedPattern::from_text("no i meant"); + assert!(m.matches_normalized_pattern(&p, 0.4, 0.6)); + } + + #[test] + fn jaccard_identical_sets_is_one() { + let a: HashSet = ["abc", "bcd"].iter().map(|s| s.to_string()).collect(); + assert!((jaccard(&a, &a) - 1.0).abs() < 1e-6); + } + + #[test] + fn cosine_freq_orthogonal_is_zero() { + let mut a: HashMap = HashMap::new(); + a.insert("hello".to_string(), 1); + let mut b: HashMap = HashMap::new(); + b.insert("world".to_string(), 1); + assert_eq!(cosine_freq(&a, &b), 0.0); + } +} diff --git a/crates/brightstaff/src/streaming.rs b/crates/brightstaff/src/streaming.rs index 8a0f414b..26af8672 100644 --- a/crates/brightstaff/src/streaming.rs +++ b/crates/brightstaff/src/streaming.rs @@ -22,8 +22,9 @@ const STREAM_BUFFER_SIZE: usize = 16; const USAGE_BUFFER_MAX: usize = 2 * 1024 * 1024; use crate::metrics as bs_metrics; use crate::metrics::labels as metric_labels; -use crate::signals::{InteractionQuality, SignalAnalyzer, TextBasedSignalAnalyzer, FLAG_MARKER}; -use crate::tracing::{llm, set_service_name, signals as signal_constants}; +use crate::signals::otel::emit_signals_to_span; +use crate::signals::{SignalAnalyzer, FLAG_MARKER}; +use crate::tracing::{llm, set_service_name}; use hermesllm::apis::openai::Message; /// Parsed usage + resolved-model details from a provider response. @@ -365,77 +366,19 @@ impl StreamProcessor for ObservableStreamProcessor { self.response_buffer.clear(); self.response_buffer.shrink_to_fit(); - // Analyze signals if messages are available and record as span attributes + // Analyze signals if messages are available and record as span + // attributes + per-signal events. We dual-emit legacy aggregate keys + // and the new layered taxonomy so existing dashboards keep working + // while new consumers can opt into the richer hierarchy. if let Some(ref messages) = self.messages { - let analyzer: Box = Box::new(TextBasedSignalAnalyzer::new()); - let report = analyzer.analyze(messages); + let analyzer = SignalAnalyzer::default(); + let report = analyzer.analyze_openai(messages); - // Get the current OTel span to set signal attributes let span = tracing::Span::current(); let otel_context = span.context(); let otel_span = otel_context.span(); - // Add overall quality - otel_span.set_attribute(KeyValue::new( - signal_constants::QUALITY, - format!("{:?}", report.overall_quality), - )); - - // Add repair/follow-up metrics if concerning - if report.follow_up.is_concerning || report.follow_up.repair_count > 0 { - otel_span.set_attribute(KeyValue::new( - signal_constants::REPAIR_COUNT, - report.follow_up.repair_count as i64, - )); - otel_span.set_attribute(KeyValue::new( - signal_constants::REPAIR_RATIO, - format!("{:.3}", report.follow_up.repair_ratio), - )); - } - - // Add frustration metrics - if report.frustration.has_frustration { - otel_span.set_attribute(KeyValue::new( - signal_constants::FRUSTRATION_COUNT, - report.frustration.frustration_count as i64, - )); - otel_span.set_attribute(KeyValue::new( - signal_constants::FRUSTRATION_SEVERITY, - report.frustration.severity as i64, - )); - } - - // Add repetition metrics - if report.repetition.has_looping { - otel_span.set_attribute(KeyValue::new( - signal_constants::REPETITION_COUNT, - report.repetition.repetition_count as i64, - )); - } - - // Add escalation metrics - if report.escalation.escalation_requested { - otel_span - .set_attribute(KeyValue::new(signal_constants::ESCALATION_REQUESTED, true)); - } - - // Add positive feedback metrics - if report.positive_feedback.has_positive_feedback { - otel_span.set_attribute(KeyValue::new( - signal_constants::POSITIVE_FEEDBACK_COUNT, - report.positive_feedback.positive_count as i64, - )); - } - - // Flag the span name if any concerning signal is detected - let should_flag = report.frustration.has_frustration - || report.repetition.has_looping - || report.escalation.escalation_requested - || matches!( - report.overall_quality, - InteractionQuality::Poor | InteractionQuality::Severe - ); - + let should_flag = emit_signals_to_span(&otel_span, &report); if should_flag { otel_span.update_name(format!("{} {}", self.operation_name, FLAG_MARKER)); }