test: add property-based tests and integration tests for retry-on-ratelimit

Add 302 property-based unit tests (proptest, 100+ iterations each) and
13 integration test scenarios covering all retry behaviors.

Unit tests cover:
- Configuration round-trip parsing, defaults, and validation
- Status code range expansion and error classification
- Exponential backoff formula, bounds, and scope filtering
- Provider selection strategy correctness and fallback ordering
- Retry-After state scope behavior and max expiration updates
- Cooldown exclusion invariants and initial selection cooldown
- Bounded retry (max_attempts + budget enforcement)
- Request preservation across retries
- Latency trigger sliding window and block state management
- Timeout vs high-latency precedence
- Error response detail completeness

Integration tests (tests/e2e/):
- IT-1 through IT-13 covering 429/503 retry, exhaustion, backoff,
  fallback priority, Retry-After honoring, timeout retry, high-latency
  failover, streaming preservation, and body preservation
This commit is contained in:
raheelshahzad 2026-03-08 18:45:19 -07:00
parent 9870be530c
commit 98bf02456a
25 changed files with 10887 additions and 0 deletions

View file

@ -710,3 +710,904 @@ impl From<&PromptTarget> for ChatCompletionTool {
}
}
#[cfg(test)]
mod test {
use pretty_assertions::assert_eq;
use std::fs;
use super::{IntoModels, LlmProvider, LlmProviderType};
use crate::api::open_ai::ToolType;
use proptest::prelude::*;
// ── Proptest Strategies for Retry Config Types ─────────────────────────
fn arb_retry_strategy() -> impl Strategy<Value = super::RetryStrategy> {
prop_oneof![
Just(super::RetryStrategy::SameModel),
Just(super::RetryStrategy::SameProvider),
Just(super::RetryStrategy::DifferentProvider),
]
}
fn arb_block_scope() -> impl Strategy<Value = super::BlockScope> {
prop_oneof![
Just(super::BlockScope::Model),
Just(super::BlockScope::Provider),
]
}
fn arb_apply_to() -> impl Strategy<Value = super::ApplyTo> {
prop_oneof![
Just(super::ApplyTo::Global),
Just(super::ApplyTo::Request),
]
}
fn arb_backoff_apply_to() -> impl Strategy<Value = super::BackoffApplyTo> {
prop_oneof![
Just(super::BackoffApplyTo::SameModel),
Just(super::BackoffApplyTo::SameProvider),
Just(super::BackoffApplyTo::Global),
]
}
fn arb_latency_measure() -> impl Strategy<Value = super::LatencyMeasure> {
prop_oneof![
Just(super::LatencyMeasure::Ttfb),
Just(super::LatencyMeasure::Total),
]
}
fn arb_status_code_entry() -> impl Strategy<Value = super::StatusCodeEntry> {
prop_oneof![
(100u16..=599u16).prop_map(super::StatusCodeEntry::Single),
(100u16..=599u16)
.prop_flat_map(|start| (Just(start), start..=599u16))
.prop_map(|(start, end)| super::StatusCodeEntry::Range(format!("{}-{}", start, end))),
]
}
fn arb_status_code_config() -> impl Strategy<Value = super::StatusCodeConfig> {
(
prop::collection::vec(arb_status_code_entry(), 1..=3),
arb_retry_strategy(),
1u32..=10u32,
)
.prop_map(|(codes, strategy, max_attempts)| super::StatusCodeConfig {
codes,
strategy,
max_attempts,
})
}
fn arb_timeout_retry_config() -> impl Strategy<Value = super::TimeoutRetryConfig> {
(arb_retry_strategy(), 1u32..=10u32).prop_map(|(strategy, max_attempts)| {
super::TimeoutRetryConfig {
strategy,
max_attempts,
}
})
}
fn arb_backoff_config() -> impl Strategy<Value = super::BackoffConfig> {
(
arb_backoff_apply_to(),
1u64..=1000u64,
prop::bool::ANY,
)
.prop_flat_map(|(apply_to, base_ms, jitter)| {
let max_ms_min = base_ms + 1;
(
Just(apply_to),
Just(base_ms),
max_ms_min..=(base_ms + 50000),
Just(jitter),
)
})
.prop_map(|(apply_to, base_ms, max_ms, jitter)| super::BackoffConfig {
apply_to,
base_ms,
max_ms,
jitter,
})
}
fn arb_retry_after_handling_config() -> impl Strategy<Value = super::RetryAfterHandlingConfig> {
(arb_block_scope(), arb_apply_to(), 1u64..=3600u64).prop_map(
|(scope, apply_to, max_retry_after_seconds)| super::RetryAfterHandlingConfig {
scope,
apply_to,
max_retry_after_seconds,
},
)
}
fn arb_high_latency_config() -> impl Strategy<Value = super::HighLatencyConfig> {
(
1u64..=60000u64,
arb_latency_measure(),
1u32..=10u32,
arb_retry_strategy(),
1u32..=10u32,
1u64..=3600u64,
arb_block_scope(),
arb_apply_to(),
)
.prop_map(
|(
threshold_ms,
measure,
min_triggers,
strategy,
max_attempts,
block_duration_seconds,
scope,
apply_to,
)| {
let trigger_window_seconds = if min_triggers > 1 {
Some(60u64)
} else {
None
};
super::HighLatencyConfig {
threshold_ms,
measure,
min_triggers,
trigger_window_seconds,
strategy,
max_attempts,
block_duration_seconds,
scope,
apply_to,
}
},
)
}
fn arb_retry_policy() -> impl Strategy<Value = super::RetryPolicy> {
(
prop::collection::vec("[a-z]{2,6}/[a-z0-9-]{3,10}", 0..=3),
arb_retry_strategy(),
1u32..=10u32,
prop::collection::vec(arb_status_code_config(), 0..=3),
prop::option::of(arb_timeout_retry_config()),
prop::option::of(arb_high_latency_config()),
prop::option::of(arb_backoff_config()),
prop::option::of(arb_retry_after_handling_config()),
prop::option::of(1u64..=120000u64),
)
.prop_map(
|(
fallback_models,
default_strategy,
default_max_attempts,
on_status_codes,
on_timeout,
on_high_latency,
backoff,
retry_after_handling,
max_retry_duration_ms,
)| {
super::RetryPolicy {
fallback_models,
default_strategy,
default_max_attempts,
on_status_codes,
on_timeout,
on_high_latency,
backoff,
retry_after_handling,
max_retry_duration_ms,
}
},
)
}
// ── Property Tests ─────────────────────────────────────────────────────
// Feature: retry-on-ratelimit, Property 1: Configuration Round-Trip Parsing
// **Validates: Requirements 1.2**
proptest! {
#![proptest_config(proptest::prelude::ProptestConfig::with_cases(100))]
/// Property 1: Configuration Round-Trip Parsing
/// Generate arbitrary valid RetryPolicy structs, serialize to YAML,
/// re-parse, and assert equivalence.
#[test]
fn prop_retry_policy_round_trip(policy in arb_retry_policy()) {
let yaml = serde_yaml::to_string(&policy)
.expect("serialization should succeed");
let parsed: super::RetryPolicy = serde_yaml::from_str(&yaml)
.expect("deserialization should succeed");
// Direct structural equality — all types derive PartialEq
prop_assert_eq!(&policy, &parsed);
}
}
// Feature: retry-on-ratelimit, Property 2: Configuration Defaults Applied Correctly
// **Validates: Requirements 1.2**
proptest! {
#![proptest_config(proptest::prelude::ProptestConfig::with_cases(100))]
/// Property 2: Configuration Defaults Applied Correctly
/// Generate RetryPolicy YAML with optional fields omitted, parse,
/// and assert correct defaults are applied.
#[test]
fn prop_retry_policy_defaults(
include_on_status_codes in prop::bool::ANY,
include_backoff in prop::bool::ANY,
include_retry_after in prop::bool::ANY,
include_on_timeout in prop::bool::ANY,
include_on_high_latency in prop::bool::ANY,
) {
// Build a minimal YAML — RetryPolicy has serde defaults for all fields,
// so even an empty mapping is valid.
let mut parts: Vec<String> = Vec::new();
// When we include sections, only provide required sub-fields so
// we can verify the optional sub-fields get their defaults.
if include_on_status_codes {
parts.push("on_status_codes:\n - codes: [429]\n strategy: same_model\n max_attempts: 2".to_string());
}
if include_backoff {
parts.push("backoff:\n apply_to: global".to_string());
}
if include_retry_after {
parts.push("retry_after_handling:\n scope: provider".to_string());
}
if include_on_timeout {
parts.push("on_timeout:\n strategy: same_model\n max_attempts: 1".to_string());
}
if include_on_high_latency {
parts.push("on_high_latency:\n threshold_ms: 5000\n strategy: different_provider\n max_attempts: 2".to_string());
}
let yaml = if parts.is_empty() {
"{}".to_string()
} else {
parts.join("\n")
};
let parsed: super::RetryPolicy = serde_yaml::from_str(&yaml)
.expect("deserialization should succeed");
// Assert top-level defaults
prop_assert_eq!(parsed.default_strategy, super::RetryStrategy::DifferentProvider);
prop_assert_eq!(parsed.default_max_attempts, 2);
prop_assert!(parsed.fallback_models.is_empty());
prop_assert_eq!(parsed.max_retry_duration_ms, None);
// Assert on_status_codes defaults to empty vec
if !include_on_status_codes {
prop_assert!(parsed.on_status_codes.is_empty());
}
// Assert backoff defaults when present
if include_backoff {
let backoff = parsed.backoff.as_ref().unwrap();
prop_assert_eq!(backoff.base_ms, 100);
prop_assert_eq!(backoff.max_ms, 5000);
prop_assert_eq!(backoff.jitter, true);
} else {
prop_assert!(parsed.backoff.is_none());
}
// Assert retry_after_handling defaults when present
if include_retry_after {
let rah = parsed.retry_after_handling.as_ref().unwrap();
prop_assert_eq!(rah.scope, super::BlockScope::Provider); // explicitly set
prop_assert_eq!(rah.apply_to, super::ApplyTo::Global); // default
prop_assert_eq!(rah.max_retry_after_seconds, 300); // default
} else {
prop_assert!(parsed.retry_after_handling.is_none());
}
// Assert effective_retry_after_config always returns valid defaults
let effective = parsed.effective_retry_after_config();
if include_retry_after {
prop_assert_eq!(effective.scope, super::BlockScope::Provider);
} else {
prop_assert_eq!(effective.scope, super::BlockScope::Model);
}
prop_assert_eq!(effective.apply_to, super::ApplyTo::Global);
prop_assert_eq!(effective.max_retry_after_seconds, 300);
// Assert high latency defaults when present
if include_on_high_latency {
let hl = parsed.on_high_latency.as_ref().unwrap();
prop_assert_eq!(hl.measure, super::LatencyMeasure::Ttfb); // default
prop_assert_eq!(hl.min_triggers, 1); // default
prop_assert_eq!(hl.block_duration_seconds, 300); // default
prop_assert_eq!(hl.scope, super::BlockScope::Model); // default
prop_assert_eq!(hl.apply_to, super::ApplyTo::Global); // default
}
}
}
#[test]
fn test_deserialize_configuration() {
let ref_config = fs::read_to_string(
"../../docs/source/resources/includes/plano_config_full_reference_rendered.yaml",
)
.expect("reference config file not found");
let config: super::Configuration = serde_yaml::from_str(&ref_config).unwrap();
assert_eq!(config.version, "v0.3.0");
if let Some(prompt_targets) = &config.prompt_targets {
assert!(
!prompt_targets.is_empty(),
"prompt_targets should not be empty if present"
);
}
if let Some(tracing) = config.tracing.as_ref() {
if let Some(sampling_rate) = tracing.sampling_rate {
assert_eq!(sampling_rate, 0.1);
}
}
let mode = config.mode.as_ref().unwrap_or(&super::GatewayMode::Prompt);
assert_eq!(*mode, super::GatewayMode::Prompt);
}
#[test]
fn test_tool_conversion() {
let ref_config = fs::read_to_string(
"../../docs/source/resources/includes/plano_config_full_reference_rendered.yaml",
)
.expect("reference config file not found");
let config: super::Configuration = serde_yaml::from_str(&ref_config).unwrap();
if let Some(prompt_targets) = &config.prompt_targets {
if let Some(prompt_target) = prompt_targets
.iter()
.find(|p| p.name == "reboot_network_device")
{
let chat_completion_tool: super::ChatCompletionTool = prompt_target.into();
assert_eq!(chat_completion_tool.tool_type, ToolType::Function);
assert_eq!(chat_completion_tool.function.name, "reboot_network_device");
assert_eq!(
chat_completion_tool.function.description,
"Reboot a specific network device"
);
assert_eq!(chat_completion_tool.function.parameters.properties.len(), 2);
assert!(chat_completion_tool
.function
.parameters
.properties
.contains_key("device_id"));
let device_id_param = chat_completion_tool
.function
.parameters
.properties
.get("device_id")
.unwrap();
assert_eq!(
device_id_param.parameter_type,
crate::api::open_ai::ParameterType::String
);
assert_eq!(
device_id_param.description,
"Identifier of the network device to reboot.".to_string()
);
assert_eq!(device_id_param.required, Some(true));
let confirmation_param = chat_completion_tool
.function
.parameters
.properties
.get("confirmation")
.unwrap();
assert_eq!(
confirmation_param.parameter_type,
crate::api::open_ai::ParameterType::Bool
);
}
}
}
// Feature: retry-on-ratelimit, Property 4: Status Code Range Expansion
// **Validates: Requirements 1.8**
proptest! {
#![proptest_config(proptest::prelude::ProptestConfig::with_cases(100))]
/// Property 4: Status Code Range Expansion — degenerate range (start == end)
/// A range "N-N" should expand to a single-element vec containing N.
#[test]
fn prop_status_code_range_expansion(
code in 100u16..=599u16,
) {
let range_str = format!("{}-{}", code, code);
let entry = super::StatusCodeEntry::Range(range_str);
let expanded = entry.expand().expect("expand should succeed for valid range");
prop_assert_eq!(expanded.len(), 1);
prop_assert_eq!(expanded[0], code);
}
/// Property 4: Status Code Range Expansion — Single variant
/// Generate arbitrary code (100..=599), expand, assert vec of length 1 containing that code.
#[test]
fn prop_status_code_single_expansion(code in 100u16..=599u16) {
let entry = super::StatusCodeEntry::Single(code);
let expanded = entry.expand().expect("expand should succeed for Single");
prop_assert_eq!(expanded.len(), 1);
prop_assert_eq!(expanded[0], code);
}
}
proptest! {
#![proptest_config(proptest::prelude::ProptestConfig::with_cases(100))]
/// Property 4: Status Code Range Expansion — arbitrary start..=end range
/// Generate arbitrary valid range strings "start-end" (100 ≤ start ≤ end ≤ 599),
/// expand, and assert correct count and bounds.
#[test]
fn prop_status_code_range_expansion_full(
(start, end) in (100u16..=599u16).prop_flat_map(|s| (Just(s), s..=599u16))
) {
let range_str = format!("{}-{}", start, end);
let entry = super::StatusCodeEntry::Range(range_str);
let expanded = entry.expand().expect("expand should succeed for valid range");
let expected_len = (end - start + 1) as usize;
prop_assert_eq!(expanded.len(), expected_len, "length should be end - start + 1");
prop_assert_eq!(*expanded.first().unwrap(), start, "first element should be start");
prop_assert_eq!(*expanded.last().unwrap(), end, "last element should be end");
for &code in &expanded {
prop_assert!(code >= start && code <= end, "all codes should be in [start, end]");
}
}
}
#[test]
fn test_into_models_filters_internal_providers() {
let providers = vec![
LlmProvider {
name: "openai-gpt4".to_string(),
provider_interface: LlmProviderType::OpenAI,
model: Some("gpt-4".to_string()),
internal: None,
..Default::default()
},
LlmProvider {
name: "arch-router".to_string(),
provider_interface: LlmProviderType::Arch,
model: Some("Arch-Router".to_string()),
internal: Some(true),
..Default::default()
},
LlmProvider {
name: "plano-orchestrator".to_string(),
provider_interface: LlmProviderType::Arch,
model: Some("Plano-Orchestrator".to_string()),
internal: Some(true),
..Default::default()
},
];
let models = providers.into_models();
// Should only have 1 model: openai-gpt4
assert_eq!(models.data.len(), 1);
// Verify internal models are excluded from /v1/models
let model_ids: Vec<String> = models.data.iter().map(|m| m.id.clone()).collect();
assert!(model_ids.contains(&"openai-gpt4".to_string()));
assert!(!model_ids.contains(&"arch-router".to_string()));
assert!(!model_ids.contains(&"plano-orchestrator".to_string()));
}
// ── P0 Edge Case Tests: YAML Config Pattern Parsing ────────────────────
/// Helper to parse a RetryPolicy from a YAML string.
fn parse_retry_policy(yaml: &str) -> super::RetryPolicy {
serde_yaml::from_str(yaml).expect("YAML should parse into RetryPolicy")
}
#[test]
fn test_pattern1_multi_provider_failover_for_rate_limits() {
let yaml = r#"
fallback_models: [anthropic/claude-3-5-sonnet]
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2
"#;
let policy = parse_retry_policy(yaml);
assert_eq!(policy.fallback_models, vec!["anthropic/claude-3-5-sonnet"]);
assert_eq!(policy.on_status_codes.len(), 1);
assert_eq!(policy.on_status_codes[0].strategy, super::RetryStrategy::DifferentProvider);
assert_eq!(policy.on_status_codes[0].max_attempts, 2);
}
#[test]
fn test_pattern2_same_provider_failover_with_model_downgrade() {
let yaml = r#"
fallback_models: [openai/gpt-4o-mini, anthropic/claude-3-5-sonnet]
on_status_codes:
- codes: [429]
strategy: "same_provider"
max_attempts: 2
"#;
let policy = parse_retry_policy(yaml);
assert_eq!(policy.fallback_models.len(), 2);
assert_eq!(policy.on_status_codes[0].strategy, super::RetryStrategy::SameProvider);
}
#[test]
fn test_pattern3_single_model_with_backoff_on_multiple_error_types() {
let yaml = r#"
fallback_models: []
on_status_codes:
- codes: [429]
strategy: "same_model"
max_attempts: 3
- codes: [503]
strategy: "same_model"
max_attempts: 3
backoff:
apply_to: "same_model"
base_ms: 500
"#;
let policy = parse_retry_policy(yaml);
assert!(policy.fallback_models.is_empty());
assert_eq!(policy.on_status_codes.len(), 2);
let backoff = policy.backoff.unwrap();
assert_eq!(backoff.apply_to, super::BackoffApplyTo::SameModel);
assert_eq!(backoff.base_ms, 500);
// max_ms defaults to 5000
assert_eq!(backoff.max_ms, 5000);
}
#[test]
fn test_pattern4_per_status_code_strategy_customization() {
let yaml = r#"
fallback_models: [openai/gpt-4o-mini, anthropic/claude-3-5-sonnet]
default_strategy: "different_provider"
default_max_attempts: 2
on_status_codes:
- codes: [429]
strategy: "same_provider"
max_attempts: 2
- codes: [502]
strategy: "different_provider"
max_attempts: 3
- codes: [503]
strategy: "same_model"
max_attempts: 2
- codes: [504]
strategy: "different_provider"
max_attempts: 2
on_timeout:
strategy: "different_provider"
max_attempts: 2
"#;
let policy = parse_retry_policy(yaml);
assert_eq!(policy.default_strategy, super::RetryStrategy::DifferentProvider);
assert_eq!(policy.default_max_attempts, 2);
assert_eq!(policy.on_status_codes.len(), 4);
assert_eq!(policy.on_status_codes[2].strategy, super::RetryStrategy::SameModel);
let timeout = policy.on_timeout.unwrap();
assert_eq!(timeout.strategy, super::RetryStrategy::DifferentProvider);
assert_eq!(timeout.max_attempts, 2);
}
#[test]
fn test_pattern5_timeout_specific_configuration() {
let yaml = r#"
fallback_models: [anthropic/claude-3-5-sonnet]
default_strategy: "different_provider"
default_max_attempts: 2
on_status_codes:
- codes: [429]
strategy: "same_provider"
max_attempts: 2
on_timeout:
strategy: "different_provider"
max_attempts: 3
"#;
let policy = parse_retry_policy(yaml);
let timeout = policy.on_timeout.unwrap();
assert_eq!(timeout.max_attempts, 3);
}
#[test]
fn test_pattern6_no_retry_parses_as_empty() {
// Pattern 6: No retry_policy section. We test that an empty YAML
// object parses with all defaults.
let yaml = "{}";
let policy = parse_retry_policy(yaml);
assert!(policy.fallback_models.is_empty());
assert_eq!(policy.default_strategy, super::RetryStrategy::DifferentProvider);
assert_eq!(policy.default_max_attempts, 2);
assert!(policy.on_status_codes.is_empty());
assert!(policy.on_timeout.is_none());
assert!(policy.backoff.is_none());
assert!(policy.max_retry_duration_ms.is_none());
}
#[test]
fn test_pattern7_backoff_only_for_same_model() {
let yaml = r#"
fallback_models: [anthropic/claude-3-5-sonnet]
on_status_codes:
- codes: [429]
strategy: "same_model"
max_attempts: 2
backoff:
apply_to: "same_model"
base_ms: 100
max_ms: 5000
jitter: true
"#;
let policy = parse_retry_policy(yaml);
let backoff = policy.backoff.unwrap();
assert_eq!(backoff.apply_to, super::BackoffApplyTo::SameModel);
assert!(backoff.jitter);
}
#[test]
fn test_pattern8_backoff_for_same_provider() {
let yaml = r#"
fallback_models: [openai/gpt-4o-mini, anthropic/claude-3-5-sonnet]
on_status_codes:
- codes: [429]
strategy: "same_provider"
max_attempts: 2
backoff:
apply_to: "same_provider"
base_ms: 200
max_ms: 10000
jitter: true
"#;
let policy = parse_retry_policy(yaml);
let backoff = policy.backoff.unwrap();
assert_eq!(backoff.apply_to, super::BackoffApplyTo::SameProvider);
assert_eq!(backoff.base_ms, 200);
assert_eq!(backoff.max_ms, 10000);
}
#[test]
fn test_pattern9_global_backoff() {
let yaml = r#"
fallback_models: [anthropic/claude-3-5-sonnet]
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2
backoff:
apply_to: "global"
base_ms: 50
max_ms: 2000
jitter: true
"#;
let policy = parse_retry_policy(yaml);
let backoff = policy.backoff.unwrap();
assert_eq!(backoff.apply_to, super::BackoffApplyTo::Global);
assert_eq!(backoff.base_ms, 50);
assert_eq!(backoff.max_ms, 2000);
}
#[test]
fn test_pattern10_deterministic_backoff_without_jitter() {
let yaml = r#"
fallback_models: []
on_status_codes:
- codes: [429]
strategy: "same_model"
max_attempts: 3
backoff:
apply_to: "same_model"
base_ms: 1000
max_ms: 30000
jitter: false
"#;
let policy = parse_retry_policy(yaml);
let backoff = policy.backoff.unwrap();
assert!(!backoff.jitter);
assert_eq!(backoff.base_ms, 1000);
assert_eq!(backoff.max_ms, 30000);
}
#[test]
fn test_pattern11_no_backoff_fast_failover() {
let yaml = r#"
fallback_models: [anthropic/claude-3-5-sonnet]
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2
"#;
let policy = parse_retry_policy(yaml);
assert!(policy.backoff.is_none());
}
#[test]
fn test_pattern17_mixed_integer_and_range_codes() {
let yaml = r#"
fallback_models: [anthropic/claude-3-5-sonnet]
default_strategy: "different_provider"
default_max_attempts: 2
on_status_codes:
- codes: [429, "430-450", 526]
strategy: "same_provider"
max_attempts: 2
- codes: ["502-504"]
strategy: "different_provider"
max_attempts: 3
"#;
let policy = parse_retry_policy(yaml);
assert_eq!(policy.on_status_codes.len(), 2);
// Verify first entry: 429 + range 430-450 + 526
let first = &policy.on_status_codes[0];
assert_eq!(first.codes.len(), 3);
let expanded: Vec<u16> = first.codes.iter()
.flat_map(|c| c.expand().unwrap())
.collect();
// 429 + (430..=450 = 21 codes) + 526 = 23 codes
assert_eq!(expanded.len(), 23);
assert!(expanded.contains(&429));
assert!(expanded.contains(&430));
assert!(expanded.contains(&450));
assert!(expanded.contains(&526));
assert!(!expanded.contains(&451));
// Verify second entry: range 502-504
let second = &policy.on_status_codes[1];
let expanded2: Vec<u16> = second.codes.iter()
.flat_map(|c| c.expand().unwrap())
.collect();
assert_eq!(expanded2, vec![502, 503, 504]);
}
#[test]
fn test_pattern12_model_level_retry_after_blocking() {
let yaml = r#"
fallback_models: [openai/gpt-4o-mini, anthropic/claude-3-5-sonnet]
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2
- codes: [503]
strategy: "different_provider"
max_attempts: 2
retry_after_handling:
scope: "model"
apply_to: "global"
"#;
let policy = parse_retry_policy(yaml);
assert_eq!(policy.fallback_models.len(), 2);
assert_eq!(policy.on_status_codes.len(), 2);
let rah = policy.retry_after_handling.unwrap();
assert_eq!(rah.scope, super::BlockScope::Model);
assert_eq!(rah.apply_to, super::ApplyTo::Global);
// max_retry_after_seconds defaults to 300
assert_eq!(rah.max_retry_after_seconds, 300);
}
#[test]
fn test_pattern13_provider_level_retry_after_blocking() {
let yaml = r#"
fallback_models: [anthropic/claude-3-5-sonnet]
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2
- codes: [503]
strategy: "different_provider"
max_attempts: 2
- codes: [502]
strategy: "different_provider"
max_attempts: 2
retry_after_handling:
scope: "provider"
apply_to: "global"
"#;
let policy = parse_retry_policy(yaml);
assert_eq!(policy.on_status_codes.len(), 3);
let rah = policy.retry_after_handling.unwrap();
assert_eq!(rah.scope, super::BlockScope::Provider);
assert_eq!(rah.apply_to, super::ApplyTo::Global);
assert_eq!(rah.max_retry_after_seconds, 300);
}
#[test]
fn test_pattern14_request_level_retry_after() {
let yaml = r#"
fallback_models: [anthropic/claude-3-5-sonnet]
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2
- codes: [503]
strategy: "different_provider"
max_attempts: 2
retry_after_handling:
scope: "model"
apply_to: "request"
"#;
let policy = parse_retry_policy(yaml);
let rah = policy.retry_after_handling.unwrap();
assert_eq!(rah.scope, super::BlockScope::Model);
assert_eq!(rah.apply_to, super::ApplyTo::Request);
assert_eq!(rah.max_retry_after_seconds, 300);
}
#[test]
fn test_pattern15_no_custom_retry_after_config_defaults_plus_backoff() {
let yaml = r#"
fallback_models: []
on_status_codes:
- codes: [429]
strategy: "same_model"
max_attempts: 3
- codes: [503]
strategy: "same_model"
max_attempts: 3
backoff:
apply_to: "same_model"
base_ms: 1000
max_ms: 30000
jitter: true
"#;
let policy = parse_retry_policy(yaml);
// No retry_after_handling section → None
assert!(policy.retry_after_handling.is_none());
// But effective config should return defaults
let effective = policy.effective_retry_after_config();
assert_eq!(effective.scope, super::BlockScope::Model);
assert_eq!(effective.apply_to, super::ApplyTo::Global);
assert_eq!(effective.max_retry_after_seconds, 300);
// Backoff is present
let backoff = policy.backoff.unwrap();
assert_eq!(backoff.apply_to, super::BackoffApplyTo::SameModel);
assert_eq!(backoff.base_ms, 1000);
assert_eq!(backoff.max_ms, 30000);
assert!(backoff.jitter);
}
#[test]
fn test_pattern16_fallback_models_list_for_targeted_failover() {
let yaml = r#"
fallback_models: [openai/gpt-4o-mini, anthropic/claude-3-5-sonnet, anthropic/claude-3-opus]
default_strategy: "different_provider"
default_max_attempts: 2
on_status_codes:
- codes: [429]
strategy: "same_provider"
max_attempts: 2
"#;
let policy = parse_retry_policy(yaml);
assert_eq!(policy.fallback_models, vec![
"openai/gpt-4o-mini",
"anthropic/claude-3-5-sonnet",
"anthropic/claude-3-opus",
]);
assert_eq!(policy.default_strategy, super::RetryStrategy::DifferentProvider);
assert_eq!(policy.default_max_attempts, 2);
assert_eq!(policy.on_status_codes.len(), 1);
assert_eq!(policy.on_status_codes[0].strategy, super::RetryStrategy::SameProvider);
}
#[test]
fn test_backoff_without_apply_to_fails_deserialization() {
// backoff.apply_to is a required field (no serde default), so YAML
// without it should fail to deserialize.
let yaml = r#"
on_status_codes:
- codes: [429]
strategy: "same_model"
max_attempts: 2
backoff:
base_ms: 100
max_ms: 5000
"#;
let result: Result<super::RetryPolicy, _> = serde_yaml::from_str(yaml);
assert!(result.is_err(), "backoff without apply_to should fail deserialization");
}
}

View file

@ -78,3 +78,305 @@ impl BackoffCalculator {
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::configuration::{BackoffApplyTo, BackoffConfig, RetryStrategy};
use proptest::prelude::*;
fn make_config(apply_to: BackoffApplyTo, base_ms: u64, max_ms: u64, jitter: bool) -> BackoffConfig {
BackoffConfig { apply_to, base_ms, max_ms, jitter }
}
#[test]
fn no_backoff_config_returns_zero() {
let calc = BackoffCalculator;
let d = calc.calculate_delay(0, None, None, RetryStrategy::SameModel, "openai/gpt-4o", "openai/gpt-4o");
assert_eq!(d, Duration::ZERO);
}
#[test]
fn no_backoff_config_with_retry_after() {
let calc = BackoffCalculator;
let d = calc.calculate_delay(0, None, Some(5), RetryStrategy::SameModel, "openai/gpt-4o", "openai/gpt-4o");
assert_eq!(d, Duration::from_secs(5));
}
#[test]
fn exponential_backoff_no_jitter() {
let calc = BackoffCalculator;
let config = make_config(BackoffApplyTo::Global, 100, 5000, false);
// attempt 0: min(100 * 2^0, 5000) = 100
assert_eq!(calc.calculate_delay(0, Some(&config), None, RetryStrategy::SameModel, "a", "a"), Duration::from_millis(100));
// attempt 1: min(100 * 2^1, 5000) = 200
assert_eq!(calc.calculate_delay(1, Some(&config), None, RetryStrategy::SameModel, "a", "a"), Duration::from_millis(200));
// attempt 2: min(100 * 2^2, 5000) = 400
assert_eq!(calc.calculate_delay(2, Some(&config), None, RetryStrategy::SameModel, "a", "a"), Duration::from_millis(400));
// attempt 6: min(100 * 64, 5000) = 5000 (capped)
assert_eq!(calc.calculate_delay(6, Some(&config), None, RetryStrategy::SameModel, "a", "a"), Duration::from_millis(5000));
}
#[test]
fn jitter_stays_within_bounds() {
let calc = BackoffCalculator;
let config = make_config(BackoffApplyTo::Global, 1000, 50000, true);
for attempt in 0..5 {
for _ in 0..20 {
let d = calc.calculate_delay(attempt, Some(&config), None, RetryStrategy::SameModel, "a", "a");
let base = (1000u64.saturating_mul(1u64 << attempt)).min(50000);
// jitter: delay * (0.5 + random(0, 0.5)) => [0.5*base, 1.0*base]
assert!(d.as_millis() >= (base as f64 * 0.5) as u128, "delay {} too low for base {}", d.as_millis(), base);
assert!(d.as_millis() <= base as u128, "delay {} too high for base {}", d.as_millis(), base);
}
}
}
#[test]
fn scope_same_model_filters_different_providers() {
let calc = BackoffCalculator;
let config = make_config(BackoffApplyTo::SameModel, 100, 5000, false);
// Same model -> backoff applies
let d = calc.calculate_delay(0, Some(&config), None, RetryStrategy::SameModel, "openai/gpt-4o", "openai/gpt-4o");
assert_eq!(d, Duration::from_millis(100));
// Different model, same provider -> no backoff
let d = calc.calculate_delay(0, Some(&config), None, RetryStrategy::SameProvider, "openai/gpt-4o-mini", "openai/gpt-4o");
assert_eq!(d, Duration::ZERO);
// Different provider -> no backoff
let d = calc.calculate_delay(0, Some(&config), None, RetryStrategy::DifferentProvider, "anthropic/claude", "openai/gpt-4o");
assert_eq!(d, Duration::ZERO);
}
#[test]
fn scope_same_provider_filters_different_providers() {
let calc = BackoffCalculator;
let config = make_config(BackoffApplyTo::SameProvider, 100, 5000, false);
// Same provider -> backoff applies
let d = calc.calculate_delay(0, Some(&config), None, RetryStrategy::SameProvider, "openai/gpt-4o-mini", "openai/gpt-4o");
assert_eq!(d, Duration::from_millis(100));
// Same model (same provider) -> backoff applies
let d = calc.calculate_delay(0, Some(&config), None, RetryStrategy::SameModel, "openai/gpt-4o", "openai/gpt-4o");
assert_eq!(d, Duration::from_millis(100));
// Different provider -> no backoff
let d = calc.calculate_delay(0, Some(&config), None, RetryStrategy::DifferentProvider, "anthropic/claude", "openai/gpt-4o");
assert_eq!(d, Duration::ZERO);
}
#[test]
fn scope_global_always_applies() {
let calc = BackoffCalculator;
let config = make_config(BackoffApplyTo::Global, 100, 5000, false);
let d = calc.calculate_delay(0, Some(&config), None, RetryStrategy::DifferentProvider, "anthropic/claude", "openai/gpt-4o");
assert_eq!(d, Duration::from_millis(100));
}
#[test]
fn retry_after_wins_when_greater() {
let calc = BackoffCalculator;
let config = make_config(BackoffApplyTo::Global, 100, 5000, false);
// retry_after = 10s >> backoff attempt 0 = 100ms
let d = calc.calculate_delay(0, Some(&config), Some(10), RetryStrategy::SameModel, "a", "a");
assert_eq!(d, Duration::from_secs(10));
}
#[test]
fn backoff_wins_when_greater() {
let calc = BackoffCalculator;
// base_ms=10000, attempt 0 -> 10000ms = 10s
let config = make_config(BackoffApplyTo::Global, 10000, 50000, false);
// retry_after = 5s < backoff = 10s
let d = calc.calculate_delay(0, Some(&config), Some(5), RetryStrategy::SameModel, "a", "a");
assert_eq!(d, Duration::from_millis(10000));
}
#[test]
fn scope_mismatch_still_honors_retry_after() {
let calc = BackoffCalculator;
let config = make_config(BackoffApplyTo::SameModel, 100, 5000, false);
// Scope doesn't match (different providers) but retry_after is set
let d = calc.calculate_delay(0, Some(&config), Some(3), RetryStrategy::DifferentProvider, "anthropic/claude", "openai/gpt-4o");
assert_eq!(d, Duration::from_secs(3));
}
#[test]
fn large_attempt_number_saturates() {
let calc = BackoffCalculator;
let config = make_config(BackoffApplyTo::Global, 100, 5000, false);
// Very large attempt number should saturate and cap at max_ms
let d = calc.calculate_delay(63, Some(&config), None, RetryStrategy::SameModel, "a", "a");
assert_eq!(d, Duration::from_millis(5000));
}
// --- Proptest strategies ---
fn arb_provider() -> impl Strategy<Value = String> {
prop_oneof![
Just("openai/gpt-4o".to_string()),
Just("openai/gpt-4o-mini".to_string()),
Just("anthropic/claude-3".to_string()),
Just("azure/gpt-4o".to_string()),
Just("google/gemini-pro".to_string()),
]
}
// Feature: retry-on-ratelimit, Property 12: Exponential Backoff Formula and Bounds
// **Validates: Requirements 4.6, 4.7, 4.8, 4.9, 4.10, 4.11**
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
/// Property 12 Case 1: No-jitter delay equals min(base_ms * 2^attempt, max_ms) exactly.
#[test]
fn prop_backoff_no_jitter_exact(
attempt in 0u32..20,
base_ms in 1u64..10000,
extra in 1u64..40001u64,
) {
let max_ms = base_ms + extra;
let config = make_config(BackoffApplyTo::Global, base_ms, max_ms, false);
let calc = BackoffCalculator;
let d = calc.calculate_delay(attempt, Some(&config), None, RetryStrategy::SameModel, "a", "a");
let expected = if attempt >= 64 {
max_ms
} else {
base_ms.saturating_mul(1u64 << attempt).min(max_ms)
};
prop_assert_eq!(d, Duration::from_millis(expected));
}
/// Property 12 Case 2: Jitter delay is in [0.5 * computed_base, computed_base].
#[test]
fn prop_backoff_jitter_bounds(
attempt in 0u32..20,
base_ms in 1u64..10000,
extra in 1u64..40001u64,
) {
let max_ms = base_ms + extra;
let config = make_config(BackoffApplyTo::Global, base_ms, max_ms, true);
let calc = BackoffCalculator;
let d = calc.calculate_delay(attempt, Some(&config), None, RetryStrategy::SameModel, "a", "a");
let computed_base = if attempt >= 64 {
max_ms
} else {
base_ms.saturating_mul(1u64 << attempt).min(max_ms)
};
let lower = (computed_base as f64 * 0.5) as u64;
let upper = computed_base;
prop_assert!(
d.as_millis() >= lower as u128 && d.as_millis() <= upper as u128,
"delay {}ms not in [{}, {}] for attempt={}, base_ms={}, max_ms={}",
d.as_millis(), lower, upper, attempt, base_ms, max_ms
);
}
/// Property 12 Case 3: Delay is always <= max_ms.
#[test]
fn prop_backoff_delay_capped_at_max(
attempt in 0u32..20,
base_ms in 1u64..10000,
extra in 1u64..40001u64,
jitter in proptest::bool::ANY,
) {
let max_ms = base_ms + extra;
let config = make_config(BackoffApplyTo::Global, base_ms, max_ms, jitter);
let calc = BackoffCalculator;
let d = calc.calculate_delay(attempt, Some(&config), None, RetryStrategy::SameModel, "a", "a");
prop_assert!(
d.as_millis() <= max_ms as u128,
"delay {}ms exceeds max_ms {} for attempt={}, base_ms={}, jitter={}",
d.as_millis(), max_ms, attempt, base_ms, jitter
);
}
}
// Feature: retry-on-ratelimit, Property 13: Backoff Apply-To Scope Filtering
// **Validates: Requirements 4.3, 4.4, 4.5, 4.12, 4.13**
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
/// Property 13 Case 1: SameModel apply_to with different providers → zero delay.
#[test]
fn prop_scope_same_model_different_providers_zero(
attempt in 0u32..20,
base_ms in 1u64..10000,
extra in 1u64..40001u64,
current in arb_provider(),
previous in arb_provider(),
) {
// Only test when providers are actually different models
prop_assume!(current != previous);
let max_ms = base_ms + extra;
let config = make_config(BackoffApplyTo::SameModel, base_ms, max_ms, false);
let calc = BackoffCalculator;
let d = calc.calculate_delay(
attempt, Some(&config), None,
RetryStrategy::DifferentProvider, &current, &previous,
);
prop_assert_eq!(d, Duration::ZERO,
"Expected zero delay for SameModel apply_to with different models: {} vs {}",
current, previous
);
}
/// Property 13 Case 2: SameProvider apply_to with different provider prefixes → zero delay.
#[test]
fn prop_scope_same_provider_different_prefix_zero(
attempt in 0u32..20,
base_ms in 1u64..10000,
extra in 1u64..40001u64,
current in arb_provider(),
previous in arb_provider(),
) {
let current_prefix = extract_provider(&current);
let previous_prefix = extract_provider(&previous);
prop_assume!(current_prefix != previous_prefix);
let max_ms = base_ms + extra;
let config = make_config(BackoffApplyTo::SameProvider, base_ms, max_ms, false);
let calc = BackoffCalculator;
let d = calc.calculate_delay(
attempt, Some(&config), None,
RetryStrategy::DifferentProvider, &current, &previous,
);
prop_assert_eq!(d, Duration::ZERO,
"Expected zero delay for SameProvider apply_to with different prefixes: {} vs {}",
current_prefix, previous_prefix
);
}
/// Property 13 Case 3: Global apply_to always produces non-zero delay.
#[test]
fn prop_scope_global_always_nonzero(
attempt in 0u32..20,
base_ms in 1u64..10000,
extra in 1u64..40001u64,
current in arb_provider(),
previous in arb_provider(),
) {
let max_ms = base_ms + extra;
let config = make_config(BackoffApplyTo::Global, base_ms, max_ms, false);
let calc = BackoffCalculator;
let d = calc.calculate_delay(
attempt, Some(&config), None,
RetryStrategy::DifferentProvider, &current, &previous,
);
prop_assert!(d > Duration::ZERO,
"Expected non-zero delay for Global apply_to: current={}, previous={}",
current, previous
);
}
}
}

View file

@ -207,3 +207,724 @@ fn extract_retry_after(response: &HttpResponse) -> Option<u64> {
.and_then(|s| s.trim().parse::<u64>().ok())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::configuration::{
StatusCodeConfig, TimeoutRetryConfig,
};
use bytes::Bytes;
use http_body_util::{BodyExt, Full};
/// Helper to build an HttpResponse with a given status code.
fn make_response(status: u16) -> HttpResponse {
make_response_with_headers(status, vec![])
}
/// Helper to build an HttpResponse with a given status code and headers.
fn make_response_with_headers(status: u16, headers: Vec<(&str, &str)>) -> HttpResponse {
let body = Full::new(Bytes::from("test body"))
.map_err(|_| unreachable!())
.boxed();
let mut builder = Response::builder().status(status);
for (name, value) in headers {
builder = builder.header(name, value);
}
builder.body(body).unwrap()
}
fn basic_retry_policy() -> RetryPolicy {
RetryPolicy {
fallback_models: vec![],
default_strategy: RetryStrategy::DifferentProvider,
default_max_attempts: 2,
on_status_codes: vec![
StatusCodeConfig {
codes: vec![StatusCodeEntry::Single(429)],
strategy: RetryStrategy::SameProvider,
max_attempts: 3,
},
StatusCodeConfig {
codes: vec![StatusCodeEntry::Single(503)],
strategy: RetryStrategy::DifferentProvider,
max_attempts: 4,
},
],
on_timeout: Some(TimeoutRetryConfig {
strategy: RetryStrategy::DifferentProvider,
max_attempts: 2,
}),
on_high_latency: None,
backoff: None,
retry_after_handling: None,
max_retry_duration_ms: None,
}
}
// ── classify tests ─────────────────────────────────────────────────
#[test]
fn classify_2xx_returns_success() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let resp = make_response(200);
let result = detector.classify(Ok(resp), &policy, 0, 0);
assert!(matches!(result, ErrorClassification::Success(_)));
}
#[test]
fn classify_201_returns_success() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let resp = make_response(201);
let result = detector.classify(Ok(resp), &policy, 0, 0);
assert!(matches!(result, ErrorClassification::Success(_)));
}
#[test]
fn classify_429_returns_retriable_error() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let resp = make_response(429);
let result = detector.classify(Ok(resp), &policy, 0, 0);
match result {
ErrorClassification::RetriableError { status_code, .. } => {
assert_eq!(status_code, 429);
}
other => panic!("Expected RetriableError, got {:?}", other),
}
}
#[test]
fn classify_503_returns_retriable_error() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let resp = make_response(503);
let result = detector.classify(Ok(resp), &policy, 0, 0);
match result {
ErrorClassification::RetriableError { status_code, .. } => {
assert_eq!(status_code, 503);
}
other => panic!("Expected RetriableError, got {:?}", other),
}
}
#[test]
fn classify_unconfigured_4xx_returns_retriable_with_defaults() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let resp = make_response(400);
let result = detector.classify(Ok(resp), &policy, 0, 0);
match result {
ErrorClassification::RetriableError { status_code, .. } => {
assert_eq!(status_code, 400);
}
other => panic!("Expected RetriableError for unconfigured 4xx, got {:?}", other),
}
}
#[test]
fn classify_unconfigured_5xx_returns_retriable_with_defaults() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let resp = make_response(502);
let result = detector.classify(Ok(resp), &policy, 0, 0);
match result {
ErrorClassification::RetriableError { status_code, .. } => {
assert_eq!(status_code, 502);
}
other => panic!("Expected RetriableError for unconfigured 5xx, got {:?}", other),
}
}
#[test]
fn classify_3xx_returns_non_retriable() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let resp = make_response(301);
let result = detector.classify(Ok(resp), &policy, 0, 0);
assert!(matches!(result, ErrorClassification::NonRetriableError(_)));
}
#[test]
fn classify_1xx_returns_non_retriable() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let resp = make_response(100);
let result = detector.classify(Ok(resp), &policy, 0, 0);
assert!(matches!(result, ErrorClassification::NonRetriableError(_)));
}
#[test]
fn classify_timeout_returns_timeout_error() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let timeout = TimeoutError { duration_ms: 5000 };
let result = detector.classify(Err(timeout), &policy, 0, 0);
match result {
ErrorClassification::TimeoutError { duration_ms } => {
assert_eq!(duration_ms, 5000);
}
other => panic!("Expected TimeoutError, got {:?}", other),
}
}
#[test]
fn classify_extracts_retry_after_header() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let resp = make_response_with_headers(429, vec![("retry-after", "120")]);
let result = detector.classify(Ok(resp), &policy, 0, 0);
match result {
ErrorClassification::RetriableError {
retry_after_seconds, ..
} => {
assert_eq!(retry_after_seconds, Some(120));
}
other => panic!("Expected RetriableError, got {:?}", other),
}
}
#[test]
fn classify_ignores_malformed_retry_after() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let resp = make_response_with_headers(429, vec![("retry-after", "not-a-number")]);
let result = detector.classify(Ok(resp), &policy, 0, 0);
match result {
ErrorClassification::RetriableError {
retry_after_seconds, ..
} => {
assert_eq!(retry_after_seconds, None);
}
other => panic!("Expected RetriableError, got {:?}", other),
}
}
#[test]
fn classify_status_code_range() {
let detector = ErrorDetector;
let policy = RetryPolicy {
on_status_codes: vec![StatusCodeConfig {
codes: vec![StatusCodeEntry::Range("500-504".to_string())],
strategy: RetryStrategy::DifferentProvider,
max_attempts: 3,
}],
..basic_retry_policy()
};
// 502 is within the range
let resp = make_response(502);
let result = detector.classify(Ok(resp), &policy, 0, 0);
match result {
ErrorClassification::RetriableError { status_code, .. } => {
assert_eq!(status_code, 502);
}
other => panic!("Expected RetriableError, got {:?}", other),
}
}
// ── resolve_retry_params tests ─────────────────────────────────────
#[test]
fn resolve_params_for_configured_status_code() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let classification = ErrorClassification::RetriableError {
status_code: 429,
retry_after_seconds: None,
response_body: vec![],
};
let (strategy, max_attempts) = detector.resolve_retry_params(&classification, &policy);
assert_eq!(strategy, RetryStrategy::SameProvider);
assert_eq!(max_attempts, 3);
}
#[test]
fn resolve_params_for_unconfigured_status_code_uses_defaults() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let classification = ErrorClassification::RetriableError {
status_code: 400,
retry_after_seconds: None,
response_body: vec![],
};
let (strategy, max_attempts) = detector.resolve_retry_params(&classification, &policy);
assert_eq!(strategy, RetryStrategy::DifferentProvider);
assert_eq!(max_attempts, 2);
}
#[test]
fn resolve_params_for_timeout_with_config() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let classification = ErrorClassification::TimeoutError { duration_ms: 5000 };
let (strategy, max_attempts) = detector.resolve_retry_params(&classification, &policy);
assert_eq!(strategy, RetryStrategy::DifferentProvider);
assert_eq!(max_attempts, 2);
}
#[test]
fn resolve_params_for_timeout_without_config_uses_defaults() {
let detector = ErrorDetector;
let mut policy = basic_retry_policy();
policy.on_timeout = None;
let classification = ErrorClassification::TimeoutError { duration_ms: 5000 };
let (strategy, max_attempts) = detector.resolve_retry_params(&classification, &policy);
assert_eq!(strategy, RetryStrategy::DifferentProvider);
assert_eq!(max_attempts, 2);
}
#[test]
fn resolve_params_for_high_latency_with_config() {
let detector = ErrorDetector;
let mut policy = basic_retry_policy();
policy.on_high_latency = Some(crate::configuration::HighLatencyConfig {
threshold_ms: 5000,
measure: LatencyMeasure::Ttfb,
min_triggers: 1,
trigger_window_seconds: None,
strategy: RetryStrategy::SameProvider,
max_attempts: 5,
block_duration_seconds: 300,
scope: crate::configuration::BlockScope::Model,
apply_to: crate::configuration::ApplyTo::Global,
});
let classification = ErrorClassification::HighLatencyEvent {
measured_ms: 6000,
threshold_ms: 5000,
measure: LatencyMeasure::Ttfb,
response: None,
};
let (strategy, max_attempts) = detector.resolve_retry_params(&classification, &policy);
assert_eq!(strategy, RetryStrategy::SameProvider);
assert_eq!(max_attempts, 5);
}
#[test]
fn resolve_params_for_success_returns_defaults() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let resp = make_response(200);
let classification = ErrorClassification::Success(resp);
let (strategy, max_attempts) = detector.resolve_retry_params(&classification, &policy);
// Shouldn't normally be called for Success, but returns defaults safely
assert_eq!(strategy, RetryStrategy::DifferentProvider);
assert_eq!(max_attempts, 2);
}
#[test]
fn resolve_params_second_on_status_codes_entry() {
let detector = ErrorDetector;
let policy = basic_retry_policy();
let classification = ErrorClassification::RetriableError {
status_code: 503,
retry_after_seconds: None,
response_body: vec![],
};
let (strategy, max_attempts) = detector.resolve_retry_params(&classification, &policy);
assert_eq!(strategy, RetryStrategy::DifferentProvider);
assert_eq!(max_attempts, 4);
}
// ── High latency classification tests ─────────────────────────────
fn high_latency_retry_policy(threshold_ms: u64, measure: LatencyMeasure) -> RetryPolicy {
let mut policy = basic_retry_policy();
policy.on_high_latency = Some(crate::configuration::HighLatencyConfig {
threshold_ms,
measure,
min_triggers: 1,
trigger_window_seconds: None,
strategy: RetryStrategy::DifferentProvider,
max_attempts: 2,
block_duration_seconds: 300,
scope: crate::configuration::BlockScope::Model,
apply_to: crate::configuration::ApplyTo::Global,
});
policy
}
#[test]
fn classify_2xx_high_latency_ttfb_returns_high_latency_event() {
let detector = ErrorDetector;
let policy = high_latency_retry_policy(5000, LatencyMeasure::Ttfb);
let resp = make_response(200);
// TTFB = 6000ms exceeds threshold of 5000ms
let result = detector.classify(Ok(resp), &policy, 6000, 7000);
match result {
ErrorClassification::HighLatencyEvent {
measured_ms,
threshold_ms,
measure,
response,
} => {
assert_eq!(measured_ms, 6000);
assert_eq!(threshold_ms, 5000);
assert_eq!(measure, LatencyMeasure::Ttfb);
assert!(response.is_some(), "Completed response should be present");
}
other => panic!("Expected HighLatencyEvent, got {:?}", other),
}
}
#[test]
fn classify_2xx_high_latency_total_returns_high_latency_event() {
let detector = ErrorDetector;
let policy = high_latency_retry_policy(5000, LatencyMeasure::Total);
let resp = make_response(200);
// Total = 8000ms exceeds threshold, TTFB = 3000ms does not
let result = detector.classify(Ok(resp), &policy, 3000, 8000);
match result {
ErrorClassification::HighLatencyEvent {
measured_ms,
threshold_ms,
measure,
..
} => {
assert_eq!(measured_ms, 8000);
assert_eq!(threshold_ms, 5000);
assert_eq!(measure, LatencyMeasure::Total);
}
other => panic!("Expected HighLatencyEvent, got {:?}", other),
}
}
#[test]
fn classify_2xx_below_threshold_returns_success() {
let detector = ErrorDetector;
let policy = high_latency_retry_policy(5000, LatencyMeasure::Ttfb);
let resp = make_response(200);
// TTFB = 3000ms is below threshold of 5000ms
let result = detector.classify(Ok(resp), &policy, 3000, 4000);
assert!(matches!(result, ErrorClassification::Success(_)));
}
#[test]
fn classify_2xx_at_threshold_returns_success() {
let detector = ErrorDetector;
let policy = high_latency_retry_policy(5000, LatencyMeasure::Ttfb);
let resp = make_response(200);
// TTFB = 5000ms equals threshold — not exceeded
let result = detector.classify(Ok(resp), &policy, 5000, 6000);
assert!(matches!(result, ErrorClassification::Success(_)));
}
#[test]
fn classify_2xx_no_high_latency_config_returns_success() {
let detector = ErrorDetector;
let policy = basic_retry_policy(); // no on_high_latency
let resp = make_response(200);
// High latency values but no config → Success
let result = detector.classify(Ok(resp), &policy, 99999, 99999);
assert!(matches!(result, ErrorClassification::Success(_)));
}
#[test]
fn classify_timeout_takes_priority_over_high_latency() {
let detector = ErrorDetector;
let policy = high_latency_retry_policy(5000, LatencyMeasure::Ttfb);
let timeout = TimeoutError { duration_ms: 10000 };
// Even with high latency config, timeout returns TimeoutError
let result = detector.classify(Err(timeout), &policy, 10000, 10000);
match result {
ErrorClassification::TimeoutError { duration_ms } => {
assert_eq!(duration_ms, 10000);
}
other => panic!("Expected TimeoutError, got {:?}", other),
}
}
#[test]
fn classify_4xx_not_affected_by_high_latency() {
let detector = ErrorDetector;
let policy = high_latency_retry_policy(5000, LatencyMeasure::Ttfb);
let resp = make_response(429);
// Even with high latency, 4xx is still RetriableError
let result = detector.classify(Ok(resp), &policy, 6000, 7000);
assert!(matches!(
result,
ErrorClassification::RetriableError { status_code: 429, .. }
));
}
// ── P2 Edge Case: measure-specific classification tests ────────────
#[test]
fn classify_ttfb_measure_triggers_on_slow_ttfb_even_if_total_is_fast() {
let detector = ErrorDetector;
// measure: ttfb, threshold: 5000ms
let policy = high_latency_retry_policy(5000, LatencyMeasure::Ttfb);
let resp = make_response(200);
// TTFB = 6000ms exceeds threshold, but total = 4000ms is below threshold
let result = detector.classify(Ok(resp), &policy, 6000, 4000);
match result {
ErrorClassification::HighLatencyEvent {
measured_ms,
threshold_ms,
measure,
response,
} => {
assert_eq!(measured_ms, 6000, "Should measure TTFB, not total");
assert_eq!(threshold_ms, 5000);
assert_eq!(measure, LatencyMeasure::Ttfb);
assert!(response.is_some(), "Completed response should be present");
}
other => panic!("Expected HighLatencyEvent for slow TTFB, got {:?}", other),
}
}
#[test]
fn classify_total_measure_does_not_trigger_when_only_ttfb_is_slow() {
let detector = ErrorDetector;
// measure: total, threshold: 5000ms
let policy = high_latency_retry_policy(5000, LatencyMeasure::Total);
let resp = make_response(200);
// TTFB = 8000ms is slow, but total = 4000ms is below threshold
// With measure: "total", only total time matters
let result = detector.classify(Ok(resp), &policy, 8000, 4000);
assert!(
matches!(result, ErrorClassification::Success(_)),
"measure: total should NOT trigger when only TTFB is slow but total is below threshold, got {:?}",
result
);
}
#[test]
fn classify_total_measure_triggers_on_slow_total_even_if_ttfb_is_fast() {
let detector = ErrorDetector;
// measure: total, threshold: 5000ms
let policy = high_latency_retry_policy(5000, LatencyMeasure::Total);
let resp = make_response(200);
// TTFB = 1000ms is fast, total = 7000ms exceeds threshold
let result = detector.classify(Ok(resp), &policy, 1000, 7000);
match result {
ErrorClassification::HighLatencyEvent {
measured_ms,
threshold_ms,
measure,
response,
} => {
assert_eq!(measured_ms, 7000, "Should measure total, not TTFB");
assert_eq!(threshold_ms, 5000);
assert_eq!(measure, LatencyMeasure::Total);
assert!(response.is_some(), "Completed response should be present");
}
other => panic!("Expected HighLatencyEvent for slow total, got {:?}", other),
}
}
// ── Property-based tests ───────────────────────────────────────────
use proptest::prelude::*;
/// Generate an arbitrary RetryStrategy.
fn arb_retry_strategy() -> impl Strategy<Value = RetryStrategy> {
prop_oneof![
Just(RetryStrategy::SameModel),
Just(RetryStrategy::SameProvider),
Just(RetryStrategy::DifferentProvider),
]
}
/// Generate an arbitrary StatusCodeEntry (single code in 100-599).
fn arb_status_code_entry() -> impl Strategy<Value = StatusCodeEntry> {
(100u16..=599u16).prop_map(StatusCodeEntry::Single)
}
/// Generate an arbitrary StatusCodeConfig with 1-5 single status code entries.
fn arb_status_code_config() -> impl Strategy<Value = StatusCodeConfig> {
(
proptest::collection::vec(arb_status_code_entry(), 1..=5),
arb_retry_strategy(),
1u32..=10u32,
)
.prop_map(|(codes, strategy, max_attempts)| StatusCodeConfig {
codes,
strategy,
max_attempts,
})
}
/// Generate an arbitrary RetryPolicy with 0-3 on_status_codes entries.
fn arb_retry_policy() -> impl Strategy<Value = RetryPolicy> {
(
arb_retry_strategy(),
1u32..=10u32,
proptest::collection::vec(arb_status_code_config(), 0..=3),
)
.prop_map(|(default_strategy, default_max_attempts, on_status_codes)| {
RetryPolicy {
fallback_models: vec![],
default_strategy,
default_max_attempts,
on_status_codes,
on_timeout: None,
on_high_latency: None,
backoff: None,
retry_after_handling: None,
max_retry_duration_ms: None,
}
})
}
// Feature: retry-on-ratelimit, Property 5: Error Classification Correctness
// **Validates: Requirements 1.2**
proptest! {
#![proptest_config(proptest::prelude::ProptestConfig::with_cases(100))]
/// Property 5: For any status code in 100-599 and any RetryPolicy,
/// classify() returns the correct variant:
/// 2xx → Success
/// 4xx/5xx → RetriableError with matching status_code
/// 1xx/3xx → NonRetriableError
#[test]
fn prop_error_classification_correctness(
status_code in 100u16..=599u16,
policy in arb_retry_policy(),
) {
let detector = ErrorDetector;
let resp = make_response(status_code);
let result = detector.classify(Ok(resp), &policy, 0, 0);
match status_code {
200..=299 => {
prop_assert!(
matches!(result, ErrorClassification::Success(_)),
"Expected Success for status {}, got {:?}", status_code, result
);
}
400..=499 | 500..=599 => {
match &result {
ErrorClassification::RetriableError { status_code: sc, .. } => {
prop_assert_eq!(
*sc, status_code,
"RetriableError status_code mismatch: expected {}, got {}", status_code, sc
);
}
other => {
prop_assert!(false, "Expected RetriableError for status {}, got {:?}", status_code, other);
}
}
}
100..=199 | 300..=399 => {
prop_assert!(
matches!(result, ErrorClassification::NonRetriableError(_)),
"Expected NonRetriableError for status {}, got {:?}", status_code, result
);
}
_ => {
// Should not happen given our range 100-599
prop_assert!(false, "Unexpected status code: {}", status_code);
}
}
}
}
// Feature: retry-on-ratelimit, Property 17: Timeout vs High Latency Precedence
// **Validates: Requirements 2.13, 2.14, 2.15, 2a.19, 2a.20**
proptest! {
#![proptest_config(proptest::prelude::ProptestConfig::with_cases(100))]
/// Property 17: When both on_high_latency and on_timeout are configured:
/// - Timeout (Err) → always TimeoutError regardless of latency config
/// - Completed 2xx exceeding threshold → HighLatencyEvent with response present
/// - Completed 2xx below/at threshold → Success
#[test]
fn prop_timeout_vs_high_latency_precedence(
threshold_ms in 1u64..=30_000u64,
elapsed_ttfb_ms in 0u64..=60_000u64,
elapsed_total_ms in 0u64..=60_000u64,
timeout_duration_ms in 1u64..=60_000u64,
measure_is_ttfb in proptest::bool::ANY,
// 0 = timeout scenario, 1 = completed-above-threshold, 2 = completed-below-threshold
scenario in 0u8..=2u8,
) {
let measure = if measure_is_ttfb { LatencyMeasure::Ttfb } else { LatencyMeasure::Total };
let mut policy = basic_retry_policy();
policy.on_high_latency = Some(crate::configuration::HighLatencyConfig {
threshold_ms,
measure,
min_triggers: 1,
trigger_window_seconds: None,
strategy: RetryStrategy::DifferentProvider,
max_attempts: 2,
block_duration_seconds: 300,
scope: crate::configuration::BlockScope::Model,
apply_to: crate::configuration::ApplyTo::Global,
});
// Ensure on_timeout is configured
policy.on_timeout = Some(TimeoutRetryConfig {
strategy: RetryStrategy::DifferentProvider,
max_attempts: 2,
});
let detector = ErrorDetector;
match scenario {
0 => {
// Timeout scenario: Err(TimeoutError) → always TimeoutError
let timeout = TimeoutError { duration_ms: timeout_duration_ms };
let result = detector.classify(Err(timeout), &policy, elapsed_ttfb_ms, elapsed_total_ms);
match result {
ErrorClassification::TimeoutError { duration_ms } => {
prop_assert_eq!(duration_ms, timeout_duration_ms,
"TimeoutError duration should match input");
}
other => {
prop_assert!(false,
"Timeout should always produce TimeoutError, got {:?}", other);
}
}
}
1 => {
// Completed 2xx with latency ABOVE threshold → HighLatencyEvent
// Force the measured value to exceed threshold
let forced_ttfb = if measure_is_ttfb { threshold_ms + 1 + (elapsed_ttfb_ms % 30_000) } else { elapsed_ttfb_ms };
let forced_total = if !measure_is_ttfb { threshold_ms + 1 + (elapsed_total_ms % 30_000) } else { elapsed_total_ms };
let resp = make_response(200);
let result = detector.classify(Ok(resp), &policy, forced_ttfb, forced_total);
match result {
ErrorClassification::HighLatencyEvent {
measured_ms: actual_ms,
threshold_ms: actual_threshold,
measure: actual_measure,
response,
} => {
let expected_measured = if measure_is_ttfb { forced_ttfb } else { forced_total };
prop_assert_eq!(actual_ms, expected_measured,
"HighLatencyEvent measured_ms should match the selected measure");
prop_assert_eq!(actual_threshold, threshold_ms,
"HighLatencyEvent threshold_ms should match config");
prop_assert_eq!(actual_measure, measure,
"HighLatencyEvent measure should match config");
prop_assert!(response.is_some(),
"Completed response should be present in HighLatencyEvent");
}
other => {
prop_assert!(false,
"Completed 2xx above threshold should produce HighLatencyEvent, got {:?}", other);
}
}
}
2 => {
// Completed 2xx with latency AT or BELOW threshold → Success
// Force the measured value to be at or below threshold
let forced_ttfb = if measure_is_ttfb { threshold_ms.min(elapsed_ttfb_ms) } else { elapsed_ttfb_ms };
let forced_total = if !measure_is_ttfb { threshold_ms.min(elapsed_total_ms) } else { elapsed_total_ms };
let resp = make_response(200);
let result = detector.classify(Ok(resp), &policy, forced_ttfb, forced_total);
prop_assert!(
matches!(result, ErrorClassification::Success(_)),
"Completed 2xx at/below threshold should be Success, got {:?}", result
);
}
_ => {} // unreachable given range 0..=2
}
}
}
}

View file

@ -130,3 +130,472 @@ fn build_message(error: &RetryExhaustedError) -> String {
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::retry::{AttemptError, AttemptErrorType, RetryExhaustedError};
use http_body_util::BodyExt;
use proptest::prelude::*;
/// Helper to extract the JSON body from a response.
async fn response_json(resp: Response<Full<Bytes>>) -> serde_json::Value {
let body = resp.into_body().collect().await.unwrap().to_bytes();
serde_json::from_slice(&body).unwrap()
}
#[tokio::test]
async fn test_basic_http_error_response() {
let error = RetryExhaustedError {
attempts: vec![
AttemptError {
model_id: "openai/gpt-4o".to_string(),
error_type: AttemptErrorType::HttpError {
status_code: 429,
body: b"rate limited".to_vec(),
},
attempt_number: 1,
},
AttemptError {
model_id: "anthropic/claude-3-5-sonnet".to_string(),
error_type: AttemptErrorType::HttpError {
status_code: 503,
body: b"unavailable".to_vec(),
},
attempt_number: 2,
},
],
max_retry_after_seconds: Some(30),
shortest_remaining_block_seconds: Some(12),
retry_budget_exhausted: false,
};
let resp = build_error_response(&error, "req-123");
assert_eq!(resp.status().as_u16(), 503); // most recent error
assert_eq!(
resp.headers().get("x-request-id").unwrap().to_str().unwrap(),
"req-123"
);
assert_eq!(
resp.headers().get("content-type").unwrap().to_str().unwrap(),
"application/json"
);
let json = response_json(resp).await;
let err = &json["error"];
assert_eq!(err["type"], "retry_exhausted");
assert_eq!(err["total_attempts"], 2);
assert_eq!(err["observed_max_retry_after_seconds"], 30);
assert_eq!(err["shortest_remaining_block_seconds"], 12);
assert_eq!(err["retry_budget_exhausted"], false);
let attempts = err["attempts"].as_array().unwrap();
assert_eq!(attempts.len(), 2);
assert_eq!(attempts[0]["model"], "openai/gpt-4o");
assert_eq!(attempts[0]["error_type"], "http_429");
assert_eq!(attempts[0]["attempt"], 1);
assert_eq!(attempts[1]["model"], "anthropic/claude-3-5-sonnet");
assert_eq!(attempts[1]["error_type"], "http_503");
assert_eq!(attempts[1]["attempt"], 2);
}
#[tokio::test]
async fn test_timeout_returns_504() {
let error = RetryExhaustedError {
attempts: vec![AttemptError {
model_id: "openai/gpt-4o".to_string(),
error_type: AttemptErrorType::Timeout { duration_ms: 30000 },
attempt_number: 1,
}],
max_retry_after_seconds: None,
shortest_remaining_block_seconds: None,
retry_budget_exhausted: false,
};
let resp = build_error_response(&error, "req-timeout");
assert_eq!(resp.status().as_u16(), 504);
let json = response_json(resp).await;
let err = &json["error"];
assert_eq!(err["attempts"][0]["error_type"], "timeout_30000ms");
assert!(err["message"]
.as_str()
.unwrap()
.contains("timed out"));
}
#[tokio::test]
async fn test_high_latency_returns_504() {
let error = RetryExhaustedError {
attempts: vec![AttemptError {
model_id: "openai/gpt-4o".to_string(),
error_type: AttemptErrorType::HighLatency {
measured_ms: 8000,
threshold_ms: 5000,
},
attempt_number: 1,
}],
max_retry_after_seconds: None,
shortest_remaining_block_seconds: None,
retry_budget_exhausted: false,
};
let resp = build_error_response(&error, "req-latency");
assert_eq!(resp.status().as_u16(), 504);
let json = response_json(resp).await;
let err = &json["error"];
assert_eq!(
err["attempts"][0]["error_type"],
"high_latency_8000ms_threshold_5000ms"
);
assert!(err["message"]
.as_str()
.unwrap()
.contains("high latency"));
}
#[tokio::test]
async fn test_optional_fields_omitted_when_none() {
let error = RetryExhaustedError {
attempts: vec![AttemptError {
model_id: "openai/gpt-4o".to_string(),
error_type: AttemptErrorType::HttpError {
status_code: 429,
body: vec![],
},
attempt_number: 1,
}],
max_retry_after_seconds: None,
shortest_remaining_block_seconds: None,
retry_budget_exhausted: false,
};
let resp = build_error_response(&error, "req-456");
let json = response_json(resp).await;
let err = &json["error"];
// These fields should not be present
assert!(err.get("observed_max_retry_after_seconds").is_none());
assert!(err.get("shortest_remaining_block_seconds").is_none());
// These should always be present
assert!(err.get("retry_budget_exhausted").is_some());
assert!(err.get("total_attempts").is_some());
assert!(err.get("type").is_some());
assert!(err.get("message").is_some());
assert!(err.get("attempts").is_some());
}
#[tokio::test]
async fn test_retry_budget_exhausted_message() {
let error = RetryExhaustedError {
attempts: vec![AttemptError {
model_id: "openai/gpt-4o".to_string(),
error_type: AttemptErrorType::HttpError {
status_code: 429,
body: vec![],
},
attempt_number: 1,
}],
max_retry_after_seconds: None,
shortest_remaining_block_seconds: None,
retry_budget_exhausted: true,
};
let resp = build_error_response(&error, "req-budget");
let json = response_json(resp).await;
let err = &json["error"];
assert_eq!(err["retry_budget_exhausted"], true);
assert!(err["message"]
.as_str()
.unwrap()
.contains("budget exceeded"));
}
#[tokio::test]
async fn test_empty_attempts_returns_502() {
let error = RetryExhaustedError {
attempts: vec![],
max_retry_after_seconds: None,
shortest_remaining_block_seconds: None,
retry_budget_exhausted: false,
};
let resp = build_error_response(&error, "req-empty");
assert_eq!(resp.status().as_u16(), 502);
let json = response_json(resp).await;
assert_eq!(json["error"]["total_attempts"], 0);
assert_eq!(json["error"]["attempts"].as_array().unwrap().len(), 0);
}
#[tokio::test]
async fn test_request_id_preserved_in_header() {
let error = RetryExhaustedError {
attempts: vec![AttemptError {
model_id: "m".to_string(),
error_type: AttemptErrorType::HttpError {
status_code: 500,
body: vec![],
},
attempt_number: 1,
}],
max_retry_after_seconds: None,
shortest_remaining_block_seconds: None,
retry_budget_exhausted: false,
};
let resp = build_error_response(&error, "unique-request-id-abc-123");
assert_eq!(
resp.headers()
.get("x-request-id")
.unwrap()
.to_str()
.unwrap(),
"unique-request-id-abc-123"
);
}
#[tokio::test]
async fn test_mixed_error_types_in_attempts() {
let error = RetryExhaustedError {
attempts: vec![
AttemptError {
model_id: "openai/gpt-4o".to_string(),
error_type: AttemptErrorType::HttpError {
status_code: 429,
body: vec![],
},
attempt_number: 1,
},
AttemptError {
model_id: "anthropic/claude".to_string(),
error_type: AttemptErrorType::Timeout { duration_ms: 5000 },
attempt_number: 2,
},
AttemptError {
model_id: "gemini/pro".to_string(),
error_type: AttemptErrorType::HighLatency {
measured_ms: 10000,
threshold_ms: 3000,
},
attempt_number: 3,
},
],
max_retry_after_seconds: Some(60),
shortest_remaining_block_seconds: Some(5),
retry_budget_exhausted: false,
};
// Last attempt is HighLatency → 504
let resp = build_error_response(&error, "req-mixed");
assert_eq!(resp.status().as_u16(), 504);
let json = response_json(resp).await;
let err = &json["error"];
assert_eq!(err["total_attempts"], 3);
assert_eq!(err["observed_max_retry_after_seconds"], 60);
assert_eq!(err["shortest_remaining_block_seconds"], 5);
let attempts = err["attempts"].as_array().unwrap();
assert_eq!(attempts[0]["error_type"], "http_429");
assert_eq!(attempts[1]["error_type"], "timeout_5000ms");
assert_eq!(attempts[2]["error_type"], "high_latency_10000ms_threshold_3000ms");
}
// ── Proptest strategies ────────────────────────────────────────────────
/// Generate an arbitrary AttemptErrorType.
fn arb_attempt_error_type() -> impl Strategy<Value = AttemptErrorType> {
prop_oneof![
(100u16..=599u16, proptest::collection::vec(any::<u8>(), 0..32))
.prop_map(|(status_code, body)| AttemptErrorType::HttpError { status_code, body }),
(1u64..=120_000u64)
.prop_map(|duration_ms| AttemptErrorType::Timeout { duration_ms }),
(1u64..=120_000u64, 1u64..=120_000u64)
.prop_map(|(measured_ms, threshold_ms)| AttemptErrorType::HighLatency {
measured_ms,
threshold_ms,
}),
]
}
/// Generate an arbitrary AttemptError with a model_id from a small set of
/// realistic provider/model identifiers.
fn arb_attempt_error() -> impl Strategy<Value = AttemptError> {
let model_ids = prop_oneof![
Just("openai/gpt-4o".to_string()),
Just("openai/gpt-4o-mini".to_string()),
Just("anthropic/claude-3-5-sonnet".to_string()),
Just("gemini/pro".to_string()),
Just("azure/gpt-4o".to_string()),
];
(model_ids, arb_attempt_error_type(), 1u32..=10u32).prop_map(
|(model_id, error_type, attempt_number)| AttemptError {
model_id,
error_type,
attempt_number,
},
)
}
/// Generate an arbitrary RetryExhaustedError with 1..=8 attempts.
fn arb_retry_exhausted_error() -> impl Strategy<Value = RetryExhaustedError> {
(
proptest::collection::vec(arb_attempt_error(), 1..=8),
proptest::option::of(1u64..=600u64),
proptest::option::of(1u64..=600u64),
any::<bool>(),
)
.prop_map(
|(attempts, max_retry_after_seconds, shortest_remaining_block_seconds, retry_budget_exhausted)| {
RetryExhaustedError {
attempts,
max_retry_after_seconds,
shortest_remaining_block_seconds,
retry_budget_exhausted,
}
},
)
}
/// Generate an arbitrary request_id (non-empty ASCII string valid for HTTP headers).
fn arb_request_id() -> impl Strategy<Value = String> {
"[a-zA-Z0-9_-]{1,64}"
}
// Feature: retry-on-ratelimit, Property 21: Error Response Contains Attempt Details
// **Validates: Requirements 10.4, 10.5, 10.7**
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
/// Property 21: For any exhausted retry sequence, the error response
/// must include all attempted model identifiers and their error types,
/// and must preserve the original request_id.
#[test]
fn prop_error_response_contains_attempt_details(
error in arb_retry_exhausted_error(),
request_id in arb_request_id(),
) {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
rt.block_on(async {
let resp = build_error_response(&error, &request_id);
// request_id preserved in x-request-id header
let header_val = resp.headers().get("x-request-id")
.expect("x-request-id header must be present");
prop_assert_eq!(header_val.to_str().unwrap(), request_id.as_str());
// Content-Type is application/json
let ct = resp.headers().get("content-type")
.expect("content-type header must be present");
prop_assert_eq!(ct.to_str().unwrap(), "application/json");
// Parse JSON body
let body = resp.into_body().collect().await.unwrap().to_bytes();
let json: serde_json::Value = serde_json::from_slice(&body)
.expect("response body must be valid JSON");
let err_obj = &json["error"];
// type is always "retry_exhausted"
prop_assert_eq!(err_obj["type"].as_str().unwrap(), "retry_exhausted");
// total_attempts matches input
prop_assert_eq!(
err_obj["total_attempts"].as_u64().unwrap(),
error.attempts.len() as u64
);
// retry_budget_exhausted matches input
prop_assert_eq!(
err_obj["retry_budget_exhausted"].as_bool().unwrap(),
error.retry_budget_exhausted
);
// attempts array has correct length
let attempts_arr = err_obj["attempts"].as_array()
.expect("attempts must be an array");
prop_assert_eq!(attempts_arr.len(), error.attempts.len());
// Every attempt's model_id and error_type are present and correct
for (i, attempt) in error.attempts.iter().enumerate() {
let json_attempt = &attempts_arr[i];
// model_id preserved
prop_assert_eq!(
json_attempt["model"].as_str().unwrap(),
attempt.model_id.as_str()
);
// attempt_number preserved
prop_assert_eq!(
json_attempt["attempt"].as_u64().unwrap(),
attempt.attempt_number as u64
);
// error_type string matches the variant
let error_type_str = json_attempt["error_type"].as_str().unwrap();
match &attempt.error_type {
AttemptErrorType::HttpError { status_code, .. } => {
prop_assert_eq!(
error_type_str,
&format!("http_{}", status_code)
);
}
AttemptErrorType::Timeout { duration_ms } => {
prop_assert_eq!(
error_type_str,
&format!("timeout_{}ms", duration_ms)
);
}
AttemptErrorType::HighLatency { measured_ms, threshold_ms } => {
prop_assert_eq!(
error_type_str,
&format!("high_latency_{}ms_threshold_{}ms", measured_ms, threshold_ms)
);
}
}
}
// Optional fields: observed_max_retry_after_seconds
match error.max_retry_after_seconds {
Some(v) => {
prop_assert_eq!(
err_obj["observed_max_retry_after_seconds"].as_u64().unwrap(),
v
);
}
None => {
prop_assert!(err_obj.get("observed_max_retry_after_seconds").is_none()
|| err_obj["observed_max_retry_after_seconds"].is_null());
}
}
// Optional fields: shortest_remaining_block_seconds
match error.shortest_remaining_block_seconds {
Some(v) => {
prop_assert_eq!(
err_obj["shortest_remaining_block_seconds"].as_u64().unwrap(),
v
);
}
None => {
prop_assert!(err_obj.get("shortest_remaining_block_seconds").is_none()
|| err_obj["shortest_remaining_block_seconds"].is_null());
}
}
// message is a non-empty string
let message = err_obj["message"].as_str()
.expect("message must be a string");
prop_assert!(!message.is_empty());
Ok(())
})?;
}
}
}

View file

@ -118,3 +118,265 @@ impl Default for LatencyBlockStateManager {
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
use std::time::Duration;
#[test]
fn test_new_manager_has_no_blocks() {
let mgr = LatencyBlockStateManager::new();
assert!(!mgr.is_blocked("openai/gpt-4o"));
assert!(mgr.remaining_block_duration("openai/gpt-4o").is_none());
}
#[test]
fn test_record_block_and_is_blocked() {
let mgr = LatencyBlockStateManager::new();
mgr.record_block("openai/gpt-4o", 60, 5500);
assert!(mgr.is_blocked("openai/gpt-4o"));
assert!(!mgr.is_blocked("anthropic/claude"));
}
#[test]
fn test_remaining_block_duration() {
let mgr = LatencyBlockStateManager::new();
mgr.record_block("openai/gpt-4o", 10, 5000);
let remaining = mgr.remaining_block_duration("openai/gpt-4o").unwrap();
assert!(remaining <= Duration::from_secs(11));
assert!(remaining > Duration::from_secs(8));
}
#[test]
fn test_expired_entry_cleaned_up_on_is_blocked() {
let mgr = LatencyBlockStateManager::new();
mgr.record_block("openai/gpt-4o", 0, 5000);
thread::sleep(Duration::from_millis(10));
assert!(!mgr.is_blocked("openai/gpt-4o"));
}
#[test]
fn test_expired_entry_cleaned_up_on_remaining() {
let mgr = LatencyBlockStateManager::new();
mgr.record_block("openai/gpt-4o", 0, 5000);
thread::sleep(Duration::from_millis(10));
assert!(mgr.remaining_block_duration("openai/gpt-4o").is_none());
}
#[test]
fn test_max_expiration_semantics_longer_wins() {
let mgr = LatencyBlockStateManager::new();
mgr.record_block("openai/gpt-4o", 10, 5000);
let first_remaining = mgr.remaining_block_duration("openai/gpt-4o").unwrap();
mgr.record_block("openai/gpt-4o", 60, 6000);
let second_remaining = mgr.remaining_block_duration("openai/gpt-4o").unwrap();
assert!(second_remaining > first_remaining);
}
#[test]
fn test_max_expiration_semantics_shorter_does_not_overwrite() {
let mgr = LatencyBlockStateManager::new();
mgr.record_block("openai/gpt-4o", 60, 5000);
let first_remaining = mgr.remaining_block_duration("openai/gpt-4o").unwrap();
mgr.record_block("openai/gpt-4o", 5, 6000);
let second_remaining = mgr.remaining_block_duration("openai/gpt-4o").unwrap();
// Should still be close to the original 60s
assert!(second_remaining > Duration::from_secs(50));
let diff = if first_remaining > second_remaining {
first_remaining - second_remaining
} else {
second_remaining - first_remaining
};
assert!(diff < Duration::from_secs(2));
}
#[test]
fn test_is_model_blocked_model_scope() {
let mgr = LatencyBlockStateManager::new();
mgr.record_block("openai/gpt-4o", 60, 5000);
assert!(mgr.is_model_blocked("openai/gpt-4o", BlockScope::Model));
assert!(!mgr.is_model_blocked("openai/gpt-4o-mini", BlockScope::Model));
}
#[test]
fn test_is_model_blocked_provider_scope() {
let mgr = LatencyBlockStateManager::new();
mgr.record_block("openai", 60, 5000);
assert!(mgr.is_model_blocked("openai/gpt-4o", BlockScope::Provider));
assert!(mgr.is_model_blocked("openai/gpt-4o-mini", BlockScope::Provider));
assert!(!mgr.is_model_blocked("anthropic/claude", BlockScope::Provider));
}
#[test]
fn test_multiple_identifiers_independent() {
let mgr = LatencyBlockStateManager::new();
mgr.record_block("openai/gpt-4o", 60, 5000);
mgr.record_block("anthropic/claude", 30, 4000);
assert!(mgr.is_blocked("openai/gpt-4o"));
assert!(mgr.is_blocked("anthropic/claude"));
assert!(!mgr.is_blocked("azure/gpt-4o"));
}
#[test]
fn test_record_block_stores_measured_latency() {
let mgr = LatencyBlockStateManager::new();
mgr.record_block("openai/gpt-4o", 60, 5500);
// Verify the entry exists and has the correct latency
let entry = mgr.global_state.get("openai/gpt-4o").unwrap();
assert_eq!(entry.1, 5500);
}
#[test]
fn test_latency_updated_when_expiration_extended() {
let mgr = LatencyBlockStateManager::new();
mgr.record_block("openai/gpt-4o", 10, 5000);
// Extend with longer duration and different latency
mgr.record_block("openai/gpt-4o", 60, 7000);
let entry = mgr.global_state.get("openai/gpt-4o").unwrap();
assert_eq!(entry.1, 7000);
}
#[test]
fn test_latency_not_updated_when_expiration_not_extended() {
let mgr = LatencyBlockStateManager::new();
mgr.record_block("openai/gpt-4o", 60, 5000);
// Shorter duration — should NOT update
mgr.record_block("openai/gpt-4o", 5, 9000);
let entry = mgr.global_state.get("openai/gpt-4o").unwrap();
// Latency should remain 5000 since expiration wasn't extended
assert_eq!(entry.1, 5000);
}
#[test]
fn test_zero_duration_block_expires_immediately() {
let mgr = LatencyBlockStateManager::new();
mgr.record_block("openai/gpt-4o", 0, 5000);
thread::sleep(Duration::from_millis(5));
assert!(!mgr.is_blocked("openai/gpt-4o"));
}
#[test]
fn test_default_trait() {
let mgr = LatencyBlockStateManager::default();
assert!(!mgr.is_blocked("anything"));
}
// --- Property-based tests ---
use proptest::prelude::*;
fn arb_identifier() -> impl Strategy<Value = String> {
prop_oneof![
"[a-z]{3,8}/[a-z0-9\\-]{3,12}".prop_map(|s| s),
"[a-z]{3,8}".prop_map(|s| s),
]
}
/// A single block recording: (block_duration_seconds, measured_latency_ms)
fn arb_block_recording() -> impl Strategy<Value = (u64, u64)> {
(1u64..=600, 100u64..=30_000)
}
// Feature: retry-on-ratelimit, Property 22: Latency Block State Max Expiration Update
// **Validates: Requirements 14.15**
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
/// Property 22 Case 1: After recording multiple blocks for the same identifier
/// with different durations, the remaining block duration reflects the maximum
/// duration recorded (max-expiration semantics).
#[test]
fn prop_latency_block_max_expiration_update(
identifier in arb_identifier(),
recordings in prop::collection::vec(arb_block_recording(), 2..=10),
) {
let mgr = LatencyBlockStateManager::new();
for &(duration, latency) in &recordings {
mgr.record_block(&identifier, duration, latency);
}
let max_duration = recordings.iter().map(|&(d, _)| d).max().unwrap();
// The identifier should still be blocked
let remaining = mgr.remaining_block_duration(&identifier);
prop_assert!(
remaining.is_some(),
"Identifier {} should be blocked after {} recordings (max_duration={}s)",
identifier, recordings.len(), max_duration
);
let remaining_secs = remaining.unwrap().as_secs();
// Remaining should be close to max_duration (allow 2s tolerance for execution time)
prop_assert!(
remaining_secs >= max_duration.saturating_sub(2),
"Remaining {}s should reflect the max duration ({}s), not a smaller value. Recordings: {:?}",
remaining_secs, max_duration, recordings
);
prop_assert!(
remaining_secs <= max_duration + 1,
"Remaining {}s should not exceed max duration {}s + tolerance. Recordings: {:?}",
remaining_secs, max_duration, recordings
);
}
/// Property 22 Case 2: measured_latency_ms is updated when expiration is extended
/// but NOT when a shorter duration is recorded.
#[test]
fn prop_latency_block_measured_latency_update_semantics(
identifier in arb_identifier(),
first_duration in 10u64..=300,
first_latency in 100u64..=30_000,
extra_duration in 1u64..=300,
longer_latency in 100u64..=30_000,
shorter_duration in 1u64..=9,
shorter_latency in 100u64..=30_000,
) {
let mgr = LatencyBlockStateManager::new();
// Record initial block
mgr.record_block(&identifier, first_duration, first_latency);
{
let entry = mgr.global_state.get(&identifier).unwrap();
prop_assert_eq!(entry.1, first_latency);
}
// Record a longer duration — latency SHOULD be updated
let longer_duration = first_duration + extra_duration;
mgr.record_block(&identifier, longer_duration, longer_latency);
{
let entry = mgr.global_state.get(&identifier).unwrap();
prop_assert_eq!(
entry.1, longer_latency,
"Latency should be updated to {} when expiration is extended (duration {} > {})",
longer_latency, longer_duration, first_duration
);
}
// Record a shorter duration — latency should NOT be updated
mgr.record_block(&identifier, shorter_duration, shorter_latency);
{
let entry = mgr.global_state.get(&identifier).unwrap();
prop_assert_eq!(
entry.1, longer_latency,
"Latency should remain {} (not {}) when shorter duration {} < {} doesn't extend expiration",
longer_latency, shorter_latency, shorter_duration, longer_duration
);
}
}
}
}

View file

@ -57,3 +57,175 @@ impl Default for LatencyTriggerCounter {
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread::sleep;
use std::time::Duration;
#[test]
fn test_record_event_returns_true_when_threshold_met() {
let counter = LatencyTriggerCounter::new();
assert!(!counter.record_event("model-a", 3, 60));
assert!(!counter.record_event("model-a", 3, 60));
assert!(counter.record_event("model-a", 3, 60));
}
#[test]
fn test_record_event_single_trigger_always_fires() {
let counter = LatencyTriggerCounter::new();
assert!(counter.record_event("model-a", 1, 60));
}
#[test]
fn test_events_expire_outside_window() {
let counter = LatencyTriggerCounter::new();
// Record 2 events
counter.record_event("model-a", 3, 1);
counter.record_event("model-a", 3, 1);
// Wait for them to expire
sleep(Duration::from_millis(1100));
// Third event should not meet threshold since previous two expired
assert!(!counter.record_event("model-a", 3, 1));
}
#[test]
fn test_reset_clears_counter() {
let counter = LatencyTriggerCounter::new();
counter.record_event("model-a", 3, 60);
counter.record_event("model-a", 3, 60);
counter.reset("model-a");
// After reset, need 3 fresh events again
assert!(!counter.record_event("model-a", 3, 60));
assert!(!counter.record_event("model-a", 3, 60));
assert!(counter.record_event("model-a", 3, 60));
}
#[test]
fn test_reset_nonexistent_identifier_is_noop() {
let counter = LatencyTriggerCounter::new();
// Should not panic
counter.reset("nonexistent");
}
#[test]
fn test_separate_identifiers_are_independent() {
let counter = LatencyTriggerCounter::new();
counter.record_event("model-a", 2, 60);
counter.record_event("model-b", 2, 60);
// model-a has 1 event, model-b has 1 event — neither at threshold of 2
assert!(!counter.record_event("model-b", 3, 60));
// model-a reaches threshold
assert!(counter.record_event("model-a", 2, 60));
}
#[test]
fn test_threshold_exceeded_still_returns_true() {
let counter = LatencyTriggerCounter::new();
assert!(counter.record_event("model-a", 1, 60));
// Already past threshold, still returns true
assert!(counter.record_event("model-a", 1, 60));
assert!(counter.record_event("model-a", 1, 60));
}
// --- Property-based tests ---
use proptest::prelude::*;
// Feature: retry-on-ratelimit, Property 18: Latency Trigger Counter Sliding Window
// **Validates: Requirements 2a.6, 2a.7, 2a.8, 2a.21, 14.1, 14.2, 14.3, 14.12**
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
/// Property 18 Case 1: Recording N events in quick succession (all within window)
/// returns true iff N >= min_triggers.
#[test]
fn prop_sliding_window_threshold(
min_triggers in 1u32..=10,
trigger_window_seconds in 1u64..=60,
num_events in 1u32..=20,
) {
let counter = LatencyTriggerCounter::new();
let identifier = "test-model";
let mut last_result = false;
for i in 1..=num_events {
last_result = counter.record_event(identifier, min_triggers, trigger_window_seconds);
// Before reaching threshold, should be false
if i < min_triggers {
prop_assert!(!last_result, "Expected false at event {} with min_triggers {}", i, min_triggers);
} else {
// At or past threshold, should be true
prop_assert!(last_result, "Expected true at event {} with min_triggers {}", i, min_triggers);
}
}
// Final result should match whether we recorded enough events
prop_assert_eq!(last_result, num_events >= min_triggers);
}
/// Property 18 Case 2: After reset, counter starts fresh and previous events
/// do not count toward the threshold.
#[test]
fn prop_reset_clears_counter(
min_triggers in 2u32..=10,
trigger_window_seconds in 1u64..=60,
events_before_reset in 1u32..=10,
) {
let counter = LatencyTriggerCounter::new();
let identifier = "test-model";
// Record some events before reset
for _ in 0..events_before_reset {
counter.record_event(identifier, min_triggers, trigger_window_seconds);
}
// Reset the counter
counter.reset(identifier);
// After reset, a single event should not meet threshold (min_triggers >= 2)
let result = counter.record_event(identifier, min_triggers, trigger_window_seconds);
prop_assert!(!result, "After reset, first event should not meet threshold of {}", min_triggers);
// Need min_triggers - 1 more events to reach threshold again
let mut final_result = result;
for _ in 1..min_triggers {
final_result = counter.record_event(identifier, min_triggers, trigger_window_seconds);
}
prop_assert!(final_result, "After reset + {} events, should meet threshold", min_triggers);
}
/// Property 18 Case 3: Different identifiers are independent — events for one
/// identifier do not affect the count for another.
#[test]
fn prop_identifiers_independent(
min_triggers in 1u32..=10,
trigger_window_seconds in 1u64..=60,
events_a in 1u32..=20,
events_b in 1u32..=20,
) {
let counter = LatencyTriggerCounter::new();
let id_a = "model-a";
let id_b = "model-b";
// Record events for identifier A
let mut result_a = false;
for _ in 0..events_a {
result_a = counter.record_event(id_a, min_triggers, trigger_window_seconds);
}
// Record events for identifier B
let mut result_b = false;
for _ in 0..events_b {
result_b = counter.record_event(id_b, min_triggers, trigger_window_seconds);
}
// Each identifier's result depends only on its own event count
prop_assert_eq!(result_a, events_a >= min_triggers,
"id_a: events={}, min_triggers={}", events_a, min_triggers);
prop_assert_eq!(result_b, events_b >= min_triggers,
"id_b: events={}, min_triggers={}", events_b, min_triggers);
}
}
} // mod tests

View file

@ -331,3 +331,455 @@ pub enum ValidationWarning {
}
#[cfg(test)]
mod tests {
use super::*;
use crate::configuration::{LlmProviderType, LlmProvider};
use bytes::Bytes;
use hyper::header::{HeaderMap, HeaderValue, AUTHORIZATION};
use proptest::prelude::*;
fn make_provider(name: &str, interface: LlmProviderType, key: Option<&str>) -> LlmProvider {
LlmProvider {
name: name.to_string(),
provider_interface: interface,
access_key: key.map(|k| k.to_string()),
model: Some(name.to_string()),
default: None,
stream: None,
endpoint: None,
port: None,
rate_limits: None,
usage: None,
routing_preferences: None,
cluster_name: None,
base_url_path_prefix: None,
internal: None,
passthrough_auth: None,
retry_policy: None,
}
}
// ── RequestSignature tests ─────────────────────────────────────────
#[test]
fn test_request_signature_computes_hash() {
let body = b"hello world";
let headers = HeaderMap::new();
let sig = RequestSignature::new(body, &headers, false, "openai/gpt-4o".to_string());
// SHA-256 of "hello world" is deterministic
let mut hasher = Sha256::new();
hasher.update(b"hello world");
let expected: [u8; 32] = hasher.finalize().into();
assert_eq!(sig.body_hash, expected);
assert!(!sig.streaming);
assert_eq!(sig.original_model, "openai/gpt-4o");
}
#[test]
fn test_request_signature_preserves_headers() {
let mut headers = HeaderMap::new();
headers.insert("x-custom", HeaderValue::from_static("value"));
let sig = RequestSignature::new(b"body", &headers, true, "model".to_string());
assert_eq!(sig.headers.get("x-custom").unwrap(), "value");
assert!(sig.streaming);
}
#[test]
fn test_request_signature_different_bodies_different_hashes() {
let headers = HeaderMap::new();
let sig1 = RequestSignature::new(b"body1", &headers, false, "m".to_string());
let sig2 = RequestSignature::new(b"body2", &headers, false, "m".to_string());
assert_ne!(sig1.body_hash, sig2.body_hash);
}
// ── RetryGate tests ────────────────────────────────────────────────
#[test]
fn test_retry_gate_default_permits() {
let gate = RetryGate::default();
// Should be able to acquire at least one permit
assert!(gate.try_acquire().is_some());
}
#[test]
fn test_retry_gate_exhaustion() {
let gate = RetryGate::new(1);
let permit = gate.try_acquire();
assert!(permit.is_some());
// Second acquire should fail (only 1 permit)
assert!(gate.try_acquire().is_none());
// Drop permit, should be able to acquire again
drop(permit);
assert!(gate.try_acquire().is_some());
}
#[test]
fn test_retry_gate_custom_capacity() {
let gate = RetryGate::new(3);
let _p1 = gate.try_acquire().unwrap();
let _p2 = gate.try_acquire().unwrap();
let _p3 = gate.try_acquire().unwrap();
assert!(gate.try_acquire().is_none());
}
// ── rebuild_request_for_provider tests ─────────────────────────────
#[test]
fn test_rebuild_updates_model_field() {
let body = Bytes::from(r#"{"model":"gpt-4o","messages":[]}"#);
let headers = HeaderMap::new();
let provider = make_provider("openai/gpt-4o-mini", LlmProviderType::OpenAI, Some("sk-test"));
let (new_body, _) = rebuild_request_for_provider(&body, &provider, &headers).unwrap();
let json: serde_json::Value = serde_json::from_slice(&new_body).unwrap();
assert_eq!(json["model"], "gpt-4o-mini");
}
#[test]
fn test_rebuild_preserves_other_fields() {
let body = Bytes::from(r#"{"model":"gpt-4o","messages":[{"role":"user","content":"hi"}],"temperature":0.7}"#);
let headers = HeaderMap::new();
let provider = make_provider("openai/gpt-4o-mini", LlmProviderType::OpenAI, Some("sk-test"));
let (new_body, _) = rebuild_request_for_provider(&body, &provider, &headers).unwrap();
let json: serde_json::Value = serde_json::from_slice(&new_body).unwrap();
assert_eq!(json["messages"][0]["role"], "user");
assert_eq!(json["messages"][0]["content"], "hi");
assert_eq!(json["temperature"], 0.7);
}
#[test]
fn test_rebuild_sets_openai_auth() {
let body = Bytes::from(r#"{"model":"old"}"#);
let mut headers = HeaderMap::new();
headers.insert(AUTHORIZATION, HeaderValue::from_static("Bearer old-key"));
let provider = make_provider("openai/gpt-4o", LlmProviderType::OpenAI, Some("sk-new"));
let (_, new_headers) = rebuild_request_for_provider(&body, &provider, &headers).unwrap();
assert_eq!(
new_headers.get(AUTHORIZATION).unwrap().to_str().unwrap(),
"Bearer sk-new"
);
assert!(new_headers.get("x-api-key").is_none());
}
#[test]
fn test_rebuild_sets_anthropic_auth() {
let body = Bytes::from(r#"{"model":"old"}"#);
let mut headers = HeaderMap::new();
headers.insert(AUTHORIZATION, HeaderValue::from_static("Bearer old-key"));
let provider = make_provider(
"anthropic/claude-3-5-sonnet",
LlmProviderType::Anthropic,
Some("ant-key"),
);
let (_, new_headers) = rebuild_request_for_provider(&body, &provider, &headers).unwrap();
// Anthropic uses x-api-key, not Authorization
assert!(new_headers.get(AUTHORIZATION).is_none());
assert_eq!(
new_headers.get("x-api-key").unwrap().to_str().unwrap(),
"ant-key"
);
assert_eq!(
new_headers.get("anthropic-version").unwrap().to_str().unwrap(),
"2023-06-01"
);
}
#[test]
fn test_rebuild_sanitizes_old_auth_headers() {
let body = Bytes::from(r#"{"model":"old"}"#);
let mut headers = HeaderMap::new();
headers.insert(AUTHORIZATION, HeaderValue::from_static("Bearer old-key"));
headers.insert("x-api-key", HeaderValue::from_static("old-api-key"));
headers.insert("anthropic-version", HeaderValue::from_static("old-version"));
headers.insert("x-custom", HeaderValue::from_static("keep-me"));
let provider = make_provider("openai/gpt-4o", LlmProviderType::OpenAI, Some("sk-new"));
let (_, new_headers) = rebuild_request_for_provider(&body, &provider, &headers).unwrap();
// Old x-api-key and anthropic-version should be removed
assert!(new_headers.get("anthropic-version").is_none());
// New auth should be set
assert_eq!(
new_headers.get(AUTHORIZATION).unwrap().to_str().unwrap(),
"Bearer sk-new"
);
// Custom headers preserved
assert_eq!(
new_headers.get("x-custom").unwrap().to_str().unwrap(),
"keep-me"
);
}
#[test]
fn test_rebuild_passthrough_auth_skips_credentials() {
let body = Bytes::from(r#"{"model":"old"}"#);
let mut headers = HeaderMap::new();
headers.insert(AUTHORIZATION, HeaderValue::from_static("Bearer client-key"));
let mut provider = make_provider("openai/gpt-4o", LlmProviderType::OpenAI, Some("sk-new"));
provider.passthrough_auth = Some(true);
let (_, new_headers) = rebuild_request_for_provider(&body, &provider, &headers).unwrap();
// Auth headers are sanitized, and passthrough_auth means no new ones are set
assert!(new_headers.get(AUTHORIZATION).is_none());
}
#[test]
fn test_rebuild_missing_access_key_errors() {
let body = Bytes::from(r#"{"model":"old"}"#);
let headers = HeaderMap::new();
let provider = make_provider("openai/gpt-4o", LlmProviderType::OpenAI, None);
let result = rebuild_request_for_provider(&body, &provider, &headers);
assert!(matches!(result, Err(RebuildError::MissingAccessKey(_))));
}
#[test]
fn test_rebuild_invalid_json_errors() {
let body = Bytes::from("not json");
let headers = HeaderMap::new();
let provider = make_provider("openai/gpt-4o", LlmProviderType::OpenAI, Some("key"));
let result = rebuild_request_for_provider(&body, &provider, &headers);
assert!(matches!(result, Err(RebuildError::InvalidJson(_))));
}
#[test]
fn test_rebuild_model_without_provider_prefix() {
let body = Bytes::from(r#"{"model":"old"}"#);
let headers = HeaderMap::new();
let mut provider = make_provider("gpt-4o", LlmProviderType::OpenAI, Some("key"));
provider.model = Some("gpt-4o".to_string());
let (new_body, _) = rebuild_request_for_provider(&body, &provider, &headers).unwrap();
let json: serde_json::Value = serde_json::from_slice(&new_body).unwrap();
// No prefix to strip, model name used as-is
assert_eq!(json["model"], "gpt-4o");
}
// --- Proptest strategies ---
fn arb_provider_type() -> impl Strategy<Value = LlmProviderType> {
prop_oneof![
Just(LlmProviderType::OpenAI),
Just(LlmProviderType::Anthropic),
Just(LlmProviderType::Gemini),
Just(LlmProviderType::Deepseek),
]
}
fn arb_model_name() -> impl Strategy<Value = String> {
prop_oneof![
Just("openai/gpt-4o".to_string()),
Just("openai/gpt-4o-mini".to_string()),
Just("anthropic/claude-3-5-sonnet".to_string()),
Just("gemini/gemini-pro".to_string()),
Just("deepseek/deepseek-chat".to_string()),
]
}
fn arb_target_provider() -> impl Strategy<Value = LlmProvider> {
(arb_model_name(), arb_provider_type()).prop_map(|(model, iface)| {
make_provider(&model, iface, Some("test-key-123"))
})
}
fn arb_message_content() -> impl Strategy<Value = String> {
"[a-zA-Z0-9 ]{1,50}"
}
fn arb_messages() -> impl Strategy<Value = Vec<serde_json::Value>> {
prop::collection::vec(
(
prop_oneof![Just("user"), Just("assistant"), Just("system")],
arb_message_content(),
)
.prop_map(|(role, content)| {
serde_json::json!({"role": role, "content": content})
}),
1..5,
)
}
fn arb_json_body() -> impl Strategy<Value = serde_json::Value> {
(
arb_model_name(),
arb_messages(),
prop::option::of(0.0f64..2.0),
prop::option::of(1u32..4096),
proptest::bool::ANY,
)
.prop_map(|(model, messages, temperature, max_tokens, stream)| {
let model_only = model.split('/').nth(1).unwrap_or(&model);
let mut obj = serde_json::json!({
"model": model_only,
"messages": messages,
});
if let Some(t) = temperature {
obj["temperature"] = serde_json::json!(t);
}
if let Some(mt) = max_tokens {
obj["max_tokens"] = serde_json::json!(mt);
}
if stream {
obj["stream"] = serde_json::json!(true);
}
obj
})
}
fn arb_custom_headers() -> impl Strategy<Value = Vec<(String, String)>> {
prop::collection::vec(
(
prop_oneof![
Just("x-request-id".to_string()),
Just("x-custom-header".to_string()),
Just("x-trace-id".to_string()),
Just("content-type".to_string()),
],
"[a-zA-Z0-9-]{1,30}",
),
0..4,
)
}
// Feature: retry-on-ratelimit, Property 14: Request Preservation Across Retries
// **Validates: Requirements 5.1, 5.2, 5.3, 5.4, 5.5, 3.15**
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
/// Property 14 The original body bytes are unchanged after rebuild (body is passed by reference).
/// The rebuilt body has the model field updated to the target provider's model.
/// All other JSON fields are preserved. The RequestSignature hash matches the original body hash.
/// Custom headers are preserved while auth headers are sanitized.
#[test]
fn prop_request_preservation_across_retries(
json_body in arb_json_body(),
custom_headers in arb_custom_headers(),
streaming in proptest::bool::ANY,
target_provider in arb_target_provider(),
) {
let body_bytes = serde_json::to_vec(&json_body).unwrap();
let body = Bytes::from(body_bytes.clone());
// Build original headers with custom + auth headers
let mut original_headers = HeaderMap::new();
for (name, value) in &custom_headers {
if let (Ok(hn), Ok(hv)) = (
hyper::header::HeaderName::from_bytes(name.as_bytes()),
HeaderValue::from_str(value),
) {
original_headers.insert(hn, hv);
}
}
// Add auth headers that should be sanitized
original_headers.insert(AUTHORIZATION, HeaderValue::from_static("Bearer old-secret"));
original_headers.insert("x-api-key", HeaderValue::from_static("old-api-key"));
let original_model = json_body["model"].as_str().unwrap_or("unknown").to_string();
// Create RequestSignature from original body
let sig = RequestSignature::new(&body, &original_headers, streaming, original_model.clone());
// Assert: body bytes are unchanged (passed by reference, not modified)
prop_assert_eq!(&body[..], &body_bytes[..], "Original body bytes must be unchanged");
// Assert: RequestSignature hash matches a fresh hash of the same body
let mut hasher = Sha256::new();
hasher.update(&body);
let expected_hash: [u8; 32] = hasher.finalize().into();
prop_assert_eq!(sig.body_hash, expected_hash, "RequestSignature hash must match original body hash");
// Assert: streaming flag preserved
prop_assert_eq!(sig.streaming, streaming, "Streaming flag must be preserved in signature");
// Rebuild for target provider
let result = rebuild_request_for_provider(&body, &target_provider, &original_headers);
prop_assert!(result.is_ok(), "rebuild_request_for_provider should succeed for valid JSON body");
let (rebuilt_body, rebuilt_headers) = result.unwrap();
// Parse rebuilt body
let rebuilt_json: serde_json::Value = serde_json::from_slice(&rebuilt_body).unwrap();
// Assert: model field updated to target provider's model (without prefix)
let target_model = target_provider.model.as_deref().unwrap_or(&target_provider.name);
let expected_model = target_model.split_once('/').map(|(_, m)| m).unwrap_or(target_model);
prop_assert_eq!(
rebuilt_json["model"].as_str().unwrap(),
expected_model,
"Model field must be updated to target provider's model"
);
// Assert: messages array preserved
prop_assert_eq!(
&rebuilt_json["messages"],
&json_body["messages"],
"Messages array must be preserved across rebuild"
);
// Assert: other JSON fields preserved (temperature, max_tokens, stream)
// The rebuild function does a JSON round-trip (deserialize → modify model → serialize),
// so we compare against a round-tripped version of the original to account for
// any f64 precision changes inherent to JSON serialization.
let original_round_tripped: serde_json::Value = serde_json::from_slice(
&serde_json::to_vec(&json_body).unwrap()
).unwrap();
for key in ["temperature", "max_tokens", "stream"] {
if let Some(original_val) = original_round_tripped.get(key) {
prop_assert_eq!(
&rebuilt_json[key],
original_val,
"Field '{}' must be preserved across rebuild",
key
);
}
}
// Assert: custom headers preserved (non-auth headers)
// Note: HeaderMap::insert overwrites, so only the last value for each name survives
let mut last_custom: std::collections::HashMap<String, String> = std::collections::HashMap::new();
for (name, value) in &custom_headers {
let lower = name.to_lowercase();
if lower == "authorization" || lower == "x-api-key" || lower == "anthropic-version" {
continue;
}
last_custom.insert(lower, value.clone());
}
for (name, value) in &last_custom {
if let Some(hv) = rebuilt_headers.get(name.as_str()) {
prop_assert_eq!(
hv.to_str().unwrap(),
value.as_str(),
"Custom header '{}' must be preserved",
name
);
}
}
// Assert: old auth headers are sanitized (not leaked to target provider)
// The old "Bearer old-secret" and "old-api-key" should NOT appear
if let Some(auth) = rebuilt_headers.get(AUTHORIZATION) {
prop_assert_ne!(
auth.to_str().unwrap(),
"Bearer old-secret",
"Old authorization header must be sanitized"
);
}
if let Some(api_key) = rebuilt_headers.get("x-api-key") {
prop_assert_ne!(
api_key.to_str().unwrap(),
"old-api-key",
"Old x-api-key header must be sanitized"
);
}
// Assert: original body is still unchanged after rebuild
prop_assert_eq!(&body[..], &body_bytes[..], "Original body bytes must remain unchanged after rebuild");
}
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -106,3 +106,410 @@ impl Default for RetryAfterStateManager {
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread;
use std::time::Duration;
#[test]
fn test_new_manager_has_no_blocks() {
let mgr = RetryAfterStateManager::new();
assert!(!mgr.is_blocked("openai/gpt-4o"));
assert!(mgr.remaining_block_duration("openai/gpt-4o").is_none());
}
#[test]
fn test_record_and_is_blocked() {
let mgr = RetryAfterStateManager::new();
mgr.record("openai/gpt-4o", 60, 300);
assert!(mgr.is_blocked("openai/gpt-4o"));
assert!(!mgr.is_blocked("anthropic/claude"));
}
#[test]
fn test_record_caps_at_max() {
let mgr = RetryAfterStateManager::new();
// Retry-After of 600 seconds, but max is 300
mgr.record("openai/gpt-4o", 600, 300);
let remaining = mgr.remaining_block_duration("openai/gpt-4o").unwrap();
// Should be capped at ~300 seconds (allow some tolerance)
assert!(remaining <= Duration::from_secs(301));
assert!(remaining > Duration::from_secs(298));
}
#[test]
fn test_remaining_block_duration() {
let mgr = RetryAfterStateManager::new();
mgr.record("openai/gpt-4o", 10, 300);
let remaining = mgr.remaining_block_duration("openai/gpt-4o").unwrap();
assert!(remaining <= Duration::from_secs(11));
assert!(remaining > Duration::from_secs(8));
}
#[test]
fn test_expired_entry_cleaned_up_on_is_blocked() {
let mgr = RetryAfterStateManager::new();
// Record with 0 seconds — effectively expires immediately
mgr.record("openai/gpt-4o", 0, 300);
// Sleep briefly to ensure expiration
thread::sleep(Duration::from_millis(10));
assert!(!mgr.is_blocked("openai/gpt-4o"));
}
#[test]
fn test_expired_entry_cleaned_up_on_remaining() {
let mgr = RetryAfterStateManager::new();
mgr.record("openai/gpt-4o", 0, 300);
thread::sleep(Duration::from_millis(10));
assert!(mgr.remaining_block_duration("openai/gpt-4o").is_none());
}
#[test]
fn test_max_expiration_semantics_longer_wins() {
let mgr = RetryAfterStateManager::new();
mgr.record("openai/gpt-4o", 10, 300);
let first_remaining = mgr.remaining_block_duration("openai/gpt-4o").unwrap();
// Record a longer duration — should update
mgr.record("openai/gpt-4o", 60, 300);
let second_remaining = mgr.remaining_block_duration("openai/gpt-4o").unwrap();
assert!(second_remaining > first_remaining);
}
#[test]
fn test_max_expiration_semantics_shorter_does_not_overwrite() {
let mgr = RetryAfterStateManager::new();
mgr.record("openai/gpt-4o", 60, 300);
let first_remaining = mgr.remaining_block_duration("openai/gpt-4o").unwrap();
// Record a shorter duration — should NOT overwrite
mgr.record("openai/gpt-4o", 5, 300);
let second_remaining = mgr.remaining_block_duration("openai/gpt-4o").unwrap();
// The remaining should still be close to the original 60s
assert!(second_remaining > Duration::from_secs(50));
// Allow small timing variance
let diff = if first_remaining > second_remaining {
first_remaining - second_remaining
} else {
second_remaining - first_remaining
};
assert!(diff < Duration::from_secs(2));
}
#[test]
fn test_is_model_blocked_model_scope() {
let mgr = RetryAfterStateManager::new();
mgr.record("openai/gpt-4o", 60, 300);
assert!(mgr.is_model_blocked("openai/gpt-4o", BlockScope::Model));
assert!(!mgr.is_model_blocked("openai/gpt-4o-mini", BlockScope::Model));
}
#[test]
fn test_is_model_blocked_provider_scope() {
let mgr = RetryAfterStateManager::new();
// Block at provider level by recording with provider prefix
mgr.record("openai", 60, 300);
// Both openai models should be blocked
assert!(mgr.is_model_blocked("openai/gpt-4o", BlockScope::Provider));
assert!(mgr.is_model_blocked("openai/gpt-4o-mini", BlockScope::Provider));
// Anthropic should not be blocked
assert!(!mgr.is_model_blocked("anthropic/claude", BlockScope::Provider));
}
#[test]
fn test_model_scope_does_not_block_other_models() {
let mgr = RetryAfterStateManager::new();
mgr.record("openai/gpt-4o", 60, 300);
// Model scope: only exact match is blocked
assert!(mgr.is_model_blocked("openai/gpt-4o", BlockScope::Model));
assert!(!mgr.is_model_blocked("openai/gpt-4o-mini", BlockScope::Model));
}
#[test]
fn test_multiple_identifiers_independent() {
let mgr = RetryAfterStateManager::new();
mgr.record("openai/gpt-4o", 60, 300);
mgr.record("anthropic/claude", 30, 300);
assert!(mgr.is_blocked("openai/gpt-4o"));
assert!(mgr.is_blocked("anthropic/claude"));
assert!(!mgr.is_blocked("azure/gpt-4o"));
}
#[test]
fn test_record_with_zero_seconds() {
let mgr = RetryAfterStateManager::new();
mgr.record("openai/gpt-4o", 0, 300);
// With 0 seconds, the entry expires at Instant::now() + 0,
// which is effectively immediately
thread::sleep(Duration::from_millis(5));
assert!(!mgr.is_blocked("openai/gpt-4o"));
}
#[test]
fn test_max_retry_after_seconds_zero_caps_to_zero() {
let mgr = RetryAfterStateManager::new();
// Even with retry_after_seconds=60, max=0 caps to 0
mgr.record("openai/gpt-4o", 60, 0);
thread::sleep(Duration::from_millis(5));
assert!(!mgr.is_blocked("openai/gpt-4o"));
}
#[test]
fn test_default_trait() {
let mgr = RetryAfterStateManager::default();
assert!(!mgr.is_blocked("anything"));
}
// --- Proptest strategies ---
use proptest::prelude::*;
fn arb_provider_prefix() -> impl Strategy<Value = String> {
prop_oneof![
Just("openai".to_string()),
Just("anthropic".to_string()),
Just("azure".to_string()),
Just("google".to_string()),
Just("cohere".to_string()),
]
}
fn arb_model_suffix() -> impl Strategy<Value = String> {
prop_oneof![
Just("gpt-4o".to_string()),
Just("gpt-4o-mini".to_string()),
Just("claude-3".to_string()),
Just("gemini-pro".to_string()),
]
}
fn arb_model_id() -> impl Strategy<Value = String> {
(arb_provider_prefix(), arb_model_suffix())
.prop_map(|(prefix, suffix)| format!("{}/{}", prefix, suffix))
}
fn arb_scope() -> impl Strategy<Value = BlockScope> {
prop_oneof![Just(BlockScope::Model), Just(BlockScope::Provider),]
}
// Feature: retry-on-ratelimit, Property 15: Retry_After_State Scope Behavior
// **Validates: Requirements 11.5, 11.6, 11.7, 11.8, 12.9, 12.10, 13.10, 13.11**
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
/// Property 15 Case 1: Model scope blocks only the exact model_id.
#[test]
fn prop_model_scope_blocks_exact_model_only(
model_id in arb_model_id(),
other_model_id in arb_model_id(),
retry_after in 1u64..300,
) {
prop_assume!(model_id != other_model_id);
let mgr = RetryAfterStateManager::new();
// Record with the exact model_id (model scope records the full model ID)
mgr.record(&model_id, retry_after, 300);
// The exact model should be blocked
prop_assert!(
mgr.is_model_blocked(&model_id, BlockScope::Model),
"Model {} should be blocked with Model scope after recording",
model_id
);
// A different model should NOT be blocked (even if same provider)
prop_assert!(
!mgr.is_model_blocked(&other_model_id, BlockScope::Model),
"Model {} should NOT be blocked when {} was recorded with Model scope",
other_model_id, model_id
);
}
/// Property 15 Case 2: Provider scope blocks all models from the same provider.
#[test]
fn prop_provider_scope_blocks_all_same_provider_models(
provider in arb_provider_prefix(),
suffix1 in arb_model_suffix(),
suffix2 in arb_model_suffix(),
other_provider in arb_provider_prefix(),
other_suffix in arb_model_suffix(),
retry_after in 1u64..300,
) {
let model1 = format!("{}/{}", provider, suffix1);
let model2 = format!("{}/{}", provider, suffix2);
let other_model = format!("{}/{}", other_provider, other_suffix);
prop_assume!(provider != other_provider);
let mgr = RetryAfterStateManager::new();
// Record at provider level (provider scope records the provider prefix)
mgr.record(&provider, retry_after, 300);
// Both models from the same provider should be blocked
prop_assert!(
mgr.is_model_blocked(&model1, BlockScope::Provider),
"Model {} should be blocked with Provider scope after recording provider {}",
model1, provider
);
prop_assert!(
mgr.is_model_blocked(&model2, BlockScope::Provider),
"Model {} should be blocked with Provider scope after recording provider {}",
model2, provider
);
// Model from a different provider should NOT be blocked
prop_assert!(
!mgr.is_model_blocked(&other_model, BlockScope::Provider),
"Model {} should NOT be blocked when provider {} was recorded",
other_model, provider
);
}
/// Property 15 Case 3: Global state is visible across different "requests"
/// (same manager instance is shared).
#[test]
fn prop_global_state_shared_across_requests(
model_id in arb_model_id(),
scope in arb_scope(),
retry_after in 1u64..300,
) {
let mgr = RetryAfterStateManager::new();
// Determine the identifier to record based on scope
let identifier = match scope {
BlockScope::Model => model_id.clone(),
BlockScope::Provider => extract_provider(&model_id).to_string(),
};
mgr.record(&identifier, retry_after, 300);
// Simulate "different requests" by checking from the same manager instance.
// Global state means any check against the same manager sees the block.
// Check 1 (simulating request A)
let blocked_a = mgr.is_model_blocked(&model_id, scope);
// Check 2 (simulating request B)
let blocked_b = mgr.is_model_blocked(&model_id, scope);
prop_assert!(
blocked_a && blocked_b,
"Global state should be visible to all requests: request_a={}, request_b={}",
blocked_a, blocked_b
);
}
/// Property 15 Case 4: Request-scoped state (HashMap) is isolated per request.
/// Two separate HashMaps don't share state.
#[test]
fn prop_request_scoped_state_isolated(
model_id in arb_model_id(),
retry_after in 1u64..300,
) {
use std::collections::HashMap;
use std::time::Instant;
// Simulate request-scoped state using separate HashMaps
// (as RequestContext.request_retry_after_state would be)
let mut request_a_state: HashMap<String, Instant> = HashMap::new();
let mut request_b_state: HashMap<String, Instant> = HashMap::new();
// Request A records a Retry-After entry
let expiration = Instant::now() + Duration::from_secs(retry_after);
request_a_state.insert(model_id.clone(), expiration);
// Request A should see the block
let a_blocked = request_a_state
.get(&model_id)
.map_or(false, |exp| Instant::now() < *exp);
// Request B should NOT see the block (separate HashMap)
let b_blocked = request_b_state
.get(&model_id)
.map_or(false, |exp| Instant::now() < *exp);
prop_assert!(
a_blocked,
"Request A should see its own block for {}",
model_id
);
prop_assert!(
!b_blocked,
"Request B should NOT see Request A's block for {}",
model_id
);
// Recording in request B should not affect request A
let expiration_b = Instant::now() + Duration::from_secs(retry_after);
request_b_state.insert(model_id.clone(), expiration_b);
// Both should now be blocked independently
let a_still_blocked = request_a_state
.get(&model_id)
.map_or(false, |exp| Instant::now() < *exp);
let b_now_blocked = request_b_state
.get(&model_id)
.map_or(false, |exp| Instant::now() < *exp);
prop_assert!(a_still_blocked, "Request A should still be blocked");
prop_assert!(b_now_blocked, "Request B should now be blocked independently");
}
}
// Feature: retry-on-ratelimit, Property 16: Retry_After_State Max Expiration Update
// **Validates: Requirements 12.11**
proptest! {
#![proptest_config(ProptestConfig::with_cases(100))]
/// Property 16: Recording multiple Retry-After values for the same identifier
/// should result in the expiration reflecting the maximum value, not the most recent.
#[test]
fn prop_max_expiration_update(
identifier in arb_model_id(),
// Generate 2..=10 Retry-After values, each between 1 and 600 seconds
retry_after_values in prop::collection::vec(1u64..=600, 2..=10),
max_cap in 300u64..=600,
) {
let mgr = RetryAfterStateManager::new();
// Record all values for the same identifier
for &val in &retry_after_values {
mgr.record(&identifier, val, max_cap);
}
// The effective maximum is the max of all capped values
let effective_max = retry_after_values
.iter()
.map(|&v| v.min(max_cap))
.max()
.unwrap();
// The remaining block duration should be close to the effective maximum
let remaining = mgr.remaining_block_duration(&identifier);
prop_assert!(
remaining.is_some(),
"Identifier {} should still be blocked after recording {} values (effective_max={}s)",
identifier, retry_after_values.len(), effective_max
);
let remaining_secs = remaining.unwrap().as_secs();
// The remaining duration should be within a reasonable tolerance of the
// effective maximum (allow up to 2 seconds for test execution time).
// It must be at least (effective_max - 2) to prove the max won.
prop_assert!(
remaining_secs >= effective_max.saturating_sub(2),
"Remaining {}s should reflect the max ({}s), not a smaller value. Values: {:?}",
remaining_secs, effective_max, retry_after_values
);
// It should not exceed the effective max (plus small tolerance for timing)
prop_assert!(
remaining_secs <= effective_max + 1,
"Remaining {}s should not exceed effective max {}s + tolerance. Values: {:?}",
remaining_secs, effective_max, retry_after_values
);
}
}
}

View file

@ -311,3 +311,794 @@ impl ConfigValidator {
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::configuration::{
ApplyTo, BackoffConfig, BackoffApplyTo, BlockScope, HighLatencyConfig,
LatencyMeasure, RetryAfterHandlingConfig,
RetryPolicy, RetryStrategy, StatusCodeConfig, StatusCodeEntry,
TimeoutRetryConfig,
};
use proptest::prelude::*;
fn make_provider(model: &str, policy: Option<RetryPolicy>) -> LlmProvider {
LlmProvider {
model: Some(model.to_string()),
retry_policy: policy,
..LlmProvider::default()
}
}
fn basic_policy() -> RetryPolicy {
RetryPolicy {
fallback_models: vec![],
default_strategy: RetryStrategy::DifferentProvider,
default_max_attempts: 2,
on_status_codes: vec![],
on_timeout: None,
on_high_latency: None,
backoff: None,
retry_after_handling: None,
max_retry_duration_ms: None,
}
}
#[test]
fn test_valid_basic_policy_no_errors() {
let providers = vec![
make_provider("openai/gpt-4o", Some(basic_policy())),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
}
#[test]
fn test_no_retry_policy_skipped() {
let providers = vec![make_provider("openai/gpt-4o", None)];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
assert!(result.unwrap().is_empty());
}
#[test]
fn test_status_code_out_of_range() {
let mut policy = basic_policy();
policy.on_status_codes = vec![StatusCodeConfig {
codes: vec![StatusCodeEntry::Single(600)],
strategy: RetryStrategy::SameModel,
max_attempts: 2,
}];
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(e, ValidationError::StatusCodeOutOfRange { code: 600, .. })));
}
#[test]
fn test_status_code_range_inverted() {
let mut policy = basic_policy();
policy.on_status_codes = vec![StatusCodeConfig {
codes: vec![StatusCodeEntry::Range("504-502".to_string())],
strategy: RetryStrategy::SameModel,
max_attempts: 2,
}];
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(e, ValidationError::StatusCodeRangeInverted { .. })));
}
#[test]
fn test_backoff_max_ms_not_greater_than_base_ms() {
let mut policy = basic_policy();
policy.backoff = Some(BackoffConfig {
apply_to: BackoffApplyTo::SameModel,
base_ms: 5000,
max_ms: 5000,
jitter: true,
});
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(e, ValidationError::MaxMsNotGreaterThanBaseMs { .. })));
}
#[test]
fn test_backoff_zero_base_ms() {
let mut policy = basic_policy();
policy.backoff = Some(BackoffConfig {
apply_to: BackoffApplyTo::SameModel,
base_ms: 0,
max_ms: 5000,
jitter: true,
});
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(e, ValidationError::NonPositiveValue { field, .. } if field == "backoff.base_ms")));
}
#[test]
fn test_max_retry_duration_ms_zero() {
let mut policy = basic_policy();
policy.max_retry_duration_ms = Some(0);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(e, ValidationError::NonPositiveValue { field, .. } if field == "max_retry_duration_ms")));
}
#[test]
fn test_single_provider_failover_warning() {
let policy = basic_policy(); // default_strategy is DifferentProvider
let providers = vec![make_provider("openai/gpt-4o", Some(policy))];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
let warnings = result.unwrap();
assert!(warnings.iter().any(|w| matches!(w, ValidationWarning::SingleProviderWithFailover { .. })));
}
#[test]
fn test_overlapping_status_codes_warning() {
let mut policy = basic_policy();
policy.on_status_codes = vec![
StatusCodeConfig {
codes: vec![StatusCodeEntry::Single(429)],
strategy: RetryStrategy::SameModel,
max_attempts: 2,
},
StatusCodeConfig {
codes: vec![StatusCodeEntry::Single(429)],
strategy: RetryStrategy::DifferentProvider,
max_attempts: 3,
},
];
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
let warnings = result.unwrap();
assert!(warnings.iter().any(|w| matches!(w, ValidationWarning::OverlappingStatusCodes { code: 429, .. })));
}
#[test]
fn test_backoff_apply_to_mismatch_warning() {
let mut policy = basic_policy();
policy.default_strategy = RetryStrategy::DifferentProvider;
policy.backoff = Some(BackoffConfig {
apply_to: BackoffApplyTo::SameModel,
base_ms: 100,
max_ms: 5000,
jitter: true,
});
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
let warnings = result.unwrap();
assert!(warnings.iter().any(|w| matches!(w, ValidationWarning::BackoffApplyToMismatch { .. })));
}
#[test]
fn test_fallback_model_not_in_provider_list_warning() {
let mut policy = basic_policy();
policy.fallback_models = vec!["nonexistent/model".to_string()];
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
let warnings = result.unwrap();
assert!(warnings.iter().any(|w| matches!(w, ValidationWarning::FallbackModelNotInProviderList { fallback, .. } if fallback == "nonexistent/model")));
}
#[test]
fn test_expand_status_codes_mixed() {
let codes = vec![
StatusCodeEntry::Single(429),
StatusCodeEntry::Range("502-504".to_string()),
StatusCodeEntry::Single(526),
];
let result = ConfigValidator::expand_status_codes(&codes);
assert!(result.is_ok());
let expanded = result.unwrap();
assert_eq!(expanded, vec![429, 502, 503, 504, 526]);
}
#[test]
fn test_valid_range_expansion() {
let codes = vec![StatusCodeEntry::Range("500-503".to_string())];
let result = ConfigValidator::expand_status_codes(&codes);
assert!(result.is_ok());
assert_eq!(result.unwrap(), vec![500, 501, 502, 503]);
}
#[test]
fn test_valid_policy_with_backoff_and_status_codes() {
let mut policy = basic_policy();
policy.default_strategy = RetryStrategy::SameModel;
policy.on_status_codes = vec![
StatusCodeConfig {
codes: vec![StatusCodeEntry::Single(429), StatusCodeEntry::Range("502-504".to_string())],
strategy: RetryStrategy::SameModel,
max_attempts: 3,
},
];
policy.backoff = Some(BackoffConfig {
apply_to: BackoffApplyTo::SameModel,
base_ms: 100,
max_ms: 5000,
jitter: true,
});
policy.max_retry_duration_ms = Some(30000);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
assert!(result.unwrap().is_empty());
}
// ── P1 Validation Tests ───────────────────────────────────────────────
#[test]
fn test_on_timeout_zero_max_attempts_rejected() {
let mut policy = basic_policy();
policy.on_timeout = Some(TimeoutRetryConfig {
strategy: RetryStrategy::DifferentProvider,
max_attempts: 0,
});
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(
e,
ValidationError::NonPositiveValue { field, .. } if field == "on_timeout.max_attempts"
)));
}
#[test]
fn test_on_timeout_valid_max_attempts_accepted() {
let mut policy = basic_policy();
policy.on_timeout = Some(TimeoutRetryConfig {
strategy: RetryStrategy::DifferentProvider,
max_attempts: 2,
});
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
}
#[test]
fn test_retry_after_handling_zero_max_seconds_rejected() {
let mut policy = basic_policy();
policy.retry_after_handling = Some(RetryAfterHandlingConfig {
scope: BlockScope::Model,
apply_to: crate::configuration::ApplyTo::Global,
max_retry_after_seconds: 0,
});
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(
e,
ValidationError::NonPositiveValue { field, .. }
if field == "retry_after_handling.max_retry_after_seconds"
)));
}
#[test]
fn test_retry_after_handling_valid_max_seconds_accepted() {
let mut policy = basic_policy();
policy.retry_after_handling = Some(RetryAfterHandlingConfig {
scope: BlockScope::Model,
apply_to: crate::configuration::ApplyTo::Global,
max_retry_after_seconds: 300,
});
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
}
#[test]
fn test_fallback_model_empty_string_rejected() {
let mut policy = basic_policy();
policy.fallback_models = vec!["".to_string()];
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(
e,
ValidationError::InvalidFallbackModel { fallback, .. } if fallback.is_empty()
)));
}
#[test]
fn test_fallback_model_no_slash_rejected() {
let mut policy = basic_policy();
policy.fallback_models = vec!["just-a-model-name".to_string()];
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(
e,
ValidationError::InvalidFallbackModel { fallback, .. } if fallback == "just-a-model-name"
)));
}
#[test]
fn test_fallback_model_valid_format_accepted() {
let mut policy = basic_policy();
policy.fallback_models = vec!["anthropic/claude-3".to_string()];
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
}
#[test]
fn test_provider_scope_ra_with_same_model_strategy_warning() {
let mut policy = basic_policy();
policy.default_strategy = RetryStrategy::SameModel;
policy.retry_after_handling = Some(RetryAfterHandlingConfig {
scope: BlockScope::Provider,
apply_to: crate::configuration::ApplyTo::Global,
max_retry_after_seconds: 300,
});
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
let warnings = result.unwrap();
assert!(warnings.iter().any(|w| matches!(
w,
ValidationWarning::ProviderScopeWithSameModel { .. }
)));
}
#[test]
fn test_model_scope_ra_with_same_model_no_warning() {
let mut policy = basic_policy();
policy.default_strategy = RetryStrategy::SameModel;
policy.retry_after_handling = Some(RetryAfterHandlingConfig {
scope: BlockScope::Model,
apply_to: crate::configuration::ApplyTo::Global,
max_retry_after_seconds: 300,
});
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
let warnings = result.unwrap();
assert!(!warnings.iter().any(|w| matches!(
w,
ValidationWarning::ProviderScopeWithSameModel { .. }
)));
}
// ── P2 Validation Tests ───────────────────────────────────────────────
fn hl_config_valid() -> HighLatencyConfig {
HighLatencyConfig {
threshold_ms: 5000,
measure: LatencyMeasure::Ttfb,
min_triggers: 1,
trigger_window_seconds: None,
strategy: RetryStrategy::DifferentProvider,
max_attempts: 2,
block_duration_seconds: 300,
scope: BlockScope::Model,
apply_to: ApplyTo::Global,
}
}
#[test]
fn test_on_high_latency_valid_config_accepted() {
let mut policy = basic_policy();
policy.on_high_latency = Some(hl_config_valid());
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
}
#[test]
fn test_on_high_latency_zero_threshold_ms_rejected() {
let mut policy = basic_policy();
let mut hl = hl_config_valid();
hl.threshold_ms = 0;
policy.on_high_latency = Some(hl);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(
e,
ValidationError::NonPositiveValue { field, .. }
if field == "on_high_latency.threshold_ms"
)));
}
#[test]
fn test_on_high_latency_zero_max_attempts_rejected() {
let mut policy = basic_policy();
let mut hl = hl_config_valid();
hl.max_attempts = 0;
policy.on_high_latency = Some(hl);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(
e,
ValidationError::NonPositiveValue { field, .. }
if field == "on_high_latency.max_attempts"
)));
}
#[test]
fn test_on_high_latency_zero_block_duration_rejected() {
let mut policy = basic_policy();
let mut hl = hl_config_valid();
hl.block_duration_seconds = 0;
policy.on_high_latency = Some(hl);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(
e,
ValidationError::NonPositiveValue { field, .. }
if field == "on_high_latency.block_duration_seconds"
)));
}
#[test]
fn test_on_high_latency_min_triggers_gt1_without_window_rejected() {
let mut policy = basic_policy();
let mut hl = hl_config_valid();
hl.min_triggers = 3;
hl.trigger_window_seconds = None;
policy.on_high_latency = Some(hl);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(
e,
ValidationError::LatencyMissingTriggerWindow { .. }
)));
}
#[test]
fn test_on_high_latency_min_triggers_gt1_with_window_accepted() {
let mut policy = basic_policy();
let mut hl = hl_config_valid();
hl.min_triggers = 3;
hl.trigger_window_seconds = Some(60);
policy.on_high_latency = Some(hl);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
}
#[test]
fn test_on_high_latency_zero_trigger_window_rejected() {
let mut policy = basic_policy();
let mut hl = hl_config_valid();
hl.trigger_window_seconds = Some(0);
policy.on_high_latency = Some(hl);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_err());
let errs = result.unwrap_err();
assert!(errs.iter().any(|e| matches!(
e,
ValidationError::NonPositiveTriggerWindow { .. }
)));
}
#[test]
fn test_on_high_latency_provider_scope_same_model_warning() {
let mut policy = basic_policy();
let mut hl = hl_config_valid();
hl.scope = BlockScope::Provider;
hl.strategy = RetryStrategy::SameModel;
policy.on_high_latency = Some(hl);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
let warnings = result.unwrap();
assert!(warnings.iter().any(|w| matches!(
w,
ValidationWarning::LatencyScopeStrategyMismatch { .. }
)));
}
#[test]
fn test_on_high_latency_model_scope_same_model_no_warning() {
let mut policy = basic_policy();
let mut hl = hl_config_valid();
hl.scope = BlockScope::Model;
hl.strategy = RetryStrategy::SameModel;
policy.on_high_latency = Some(hl);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
let warnings = result.unwrap();
assert!(!warnings.iter().any(|w| matches!(
w,
ValidationWarning::LatencyScopeStrategyMismatch { .. }
)));
}
#[test]
fn test_on_high_latency_threshold_below_1000_warning() {
let mut policy = basic_policy();
let mut hl = hl_config_valid();
hl.threshold_ms = 500;
policy.on_high_latency = Some(hl);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
let warnings = result.unwrap();
assert!(warnings.iter().any(|w| matches!(
w,
ValidationWarning::AggressiveLatencyThreshold { threshold_ms: 500, .. }
)));
}
#[test]
fn test_on_high_latency_threshold_1000_no_warning() {
let mut policy = basic_policy();
let mut hl = hl_config_valid();
hl.threshold_ms = 1000;
policy.on_high_latency = Some(hl);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
let warnings = result.unwrap();
assert!(!warnings.iter().any(|w| matches!(
w,
ValidationWarning::AggressiveLatencyThreshold { .. }
)));
}
#[test]
fn test_on_high_latency_total_measure_accepted() {
let mut policy = basic_policy();
let mut hl = hl_config_valid();
hl.measure = LatencyMeasure::Total;
policy.on_high_latency = Some(hl);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
}
#[test]
fn test_on_high_latency_request_apply_to_accepted() {
let mut policy = basic_policy();
let mut hl = hl_config_valid();
hl.apply_to = ApplyTo::Request;
policy.on_high_latency = Some(hl);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
assert!(result.is_ok());
}
// ── Strategies for invalid config generation ───────────────────────────
/// Generates a status code outside the valid 100-599 range.
fn arb_out_of_range_code() -> impl Strategy<Value = u16> {
prop_oneof![
(0u16..100u16), // below 100
(600u16..=u16::MAX), // above 599
]
}
/// Generates a range string where start > end (both within valid range).
fn arb_inverted_range() -> impl Strategy<Value = String> {
(101u16..=599u16).prop_flat_map(|start| {
(100u16..start).prop_map(move |end| format!("{}-{}", start, end))
})
}
/// Generates a backoff config where max_ms <= base_ms.
fn arb_backoff_max_lte_base() -> impl Strategy<Value = BackoffConfig> {
(1u64..=10000u64).prop_flat_map(|base_ms| {
(0u64..=base_ms).prop_map(move |max_ms| BackoffConfig {
apply_to: BackoffApplyTo::Global,
base_ms,
max_ms,
jitter: true,
})
})
}
/// Generates a backoff config where base_ms = 0.
fn arb_backoff_zero_base() -> impl Strategy<Value = BackoffConfig> {
(1u64..=10000u64).prop_map(|max_ms| BackoffConfig {
apply_to: BackoffApplyTo::Global,
base_ms: 0,
max_ms,
jitter: true,
})
}
// Feature: retry-on-ratelimit, Property 3: Invalid Configuration Rejected
// **Validates: Requirements 8.27**
proptest! {
#![proptest_config(proptest::prelude::ProptestConfig::with_cases(100))]
/// Property 3 Case 1: Status codes outside 100-599 are rejected.
#[test]
fn prop_invalid_status_code_out_of_range(code in arb_out_of_range_code()) {
let mut policy = basic_policy();
policy.on_status_codes = vec![StatusCodeConfig {
codes: vec![StatusCodeEntry::Single(code)],
strategy: RetryStrategy::SameModel,
max_attempts: 2,
}];
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
prop_assert!(result.is_err(), "Expected Err for out-of-range code {}", code);
}
/// Property 3 Case 2: Range strings with start > end are rejected.
#[test]
fn prop_invalid_range_start_gt_end(range in arb_inverted_range()) {
let mut policy = basic_policy();
policy.on_status_codes = vec![StatusCodeConfig {
codes: vec![StatusCodeEntry::Range(range.clone())],
strategy: RetryStrategy::SameModel,
max_attempts: 2,
}];
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
prop_assert!(result.is_err(), "Expected Err for inverted range {}", range);
}
/// Property 3 Case 3: Backoff with max_ms <= base_ms is rejected.
#[test]
fn prop_invalid_backoff_max_lte_base(backoff in arb_backoff_max_lte_base()) {
let mut policy = basic_policy();
policy.backoff = Some(backoff.clone());
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
prop_assert!(
result.is_err(),
"Expected Err for max_ms ({}) <= base_ms ({})",
backoff.max_ms, backoff.base_ms
);
}
/// Property 3 Case 4: Backoff with base_ms = 0 is rejected.
#[test]
fn prop_invalid_backoff_zero_base(backoff in arb_backoff_zero_base()) {
let mut policy = basic_policy();
policy.backoff = Some(backoff);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
prop_assert!(result.is_err(), "Expected Err for base_ms = 0");
}
/// Property 3 Case 5: max_retry_duration_ms = 0 is rejected.
#[test]
fn prop_invalid_max_retry_duration_zero(_dummy in Just(())) {
let mut policy = basic_policy();
policy.max_retry_duration_ms = Some(0);
let providers = vec![
make_provider("openai/gpt-4o", Some(policy)),
make_provider("anthropic/claude-3", None),
];
let result = ConfigValidator::validate_retry_policies(&providers);
prop_assert!(result.is_err(), "Expected Err for max_retry_duration_ms = 0");
}
}
}

View file

@ -0,0 +1,27 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
- model: openai/gpt-4o
base_url: http://host.docker.internal:${MOCK_PRIMARY_PORT}
access_key: test-key-primary
default: true
retry_policy:
fallback_models: [anthropic/claude-3-5-sonnet]
default_strategy: "different_provider"
default_max_attempts: 2
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2
on_timeout:
strategy: "different_provider"
max_attempts: 2
- model: anthropic/claude-3-5-sonnet
base_url: http://host.docker.internal:${MOCK_SECONDARY_PORT}
access_key: test-key-secondary

View file

@ -0,0 +1,33 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
- model: openai/gpt-4o
base_url: http://host.docker.internal:${MOCK_PRIMARY_PORT}
access_key: test-key-primary
default: true
retry_policy:
fallback_models: [anthropic/claude-3-5-sonnet]
default_strategy: "different_provider"
default_max_attempts: 2
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2
on_high_latency:
threshold_ms: 1000
measure: "total"
min_triggers: 1
strategy: "different_provider"
max_attempts: 2
block_duration_seconds: 60
scope: "model"
apply_to: "global"
- model: anthropic/claude-3-5-sonnet
base_url: http://host.docker.internal:${MOCK_SECONDARY_PORT}
access_key: test-key-secondary

View file

@ -0,0 +1,23 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
- model: openai/gpt-4o
base_url: http://host.docker.internal:${MOCK_PRIMARY_PORT}
access_key: test-key-primary
default: true
retry_policy:
default_strategy: "different_provider"
default_max_attempts: 2
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2
- model: anthropic/claude-3-5-sonnet
base_url: http://host.docker.internal:${MOCK_SECONDARY_PORT}
access_key: test-key-secondary

View file

@ -0,0 +1,23 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
- model: openai/gpt-4o
base_url: http://host.docker.internal:${MOCK_PRIMARY_PORT}
access_key: test-key-primary
default: true
retry_policy:
default_strategy: "different_provider"
default_max_attempts: 2
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2
- model: anthropic/claude-3-5-sonnet
base_url: http://host.docker.internal:${MOCK_SECONDARY_PORT}
access_key: test-key-secondary

View file

@ -0,0 +1,23 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
- model: openai/gpt-4o
base_url: http://host.docker.internal:${MOCK_PRIMARY_PORT}
access_key: test-key-primary
default: true
retry_policy:
default_strategy: "different_provider"
default_max_attempts: 2
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2
- model: anthropic/claude-3-5-sonnet
base_url: http://host.docker.internal:${MOCK_SECONDARY_PORT}
access_key: test-key-secondary

View file

@ -0,0 +1,23 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
- model: openai/gpt-4o
base_url: http://host.docker.internal:${MOCK_PRIMARY_PORT}
access_key: test-key-primary
default: true
retry_policy:
default_strategy: "different_provider"
default_max_attempts: 2
on_status_codes:
- codes: [503]
strategy: "different_provider"
max_attempts: 2
- model: anthropic/claude-3-5-sonnet
base_url: http://host.docker.internal:${MOCK_SECONDARY_PORT}
access_key: test-key-secondary

View file

@ -0,0 +1,23 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
- model: openai/gpt-4o
base_url: http://host.docker.internal:${MOCK_PRIMARY_PORT}
access_key: test-key-primary
default: true
retry_policy:
default_strategy: "different_provider"
default_max_attempts: 2
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2
- model: anthropic/claude-3-5-sonnet
base_url: http://host.docker.internal:${MOCK_SECONDARY_PORT}
access_key: test-key-secondary

View file

@ -0,0 +1,17 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
- model: openai/gpt-4o
base_url: http://host.docker.internal:${MOCK_PRIMARY_PORT}
access_key: test-key-primary
default: true
# No retry_policy — errors should be returned directly to client
- model: anthropic/claude-3-5-sonnet
base_url: http://host.docker.internal:${MOCK_SECONDARY_PORT}
access_key: test-key-secondary

View file

@ -0,0 +1,27 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
- model: openai/gpt-4o
base_url: http://host.docker.internal:${MOCK_PRIMARY_PORT}
access_key: test-key-primary
default: true
retry_policy:
default_strategy: "different_provider"
default_max_attempts: 1
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 1
- model: anthropic/claude-3-5-sonnet
base_url: http://host.docker.internal:${MOCK_SECONDARY_PORT}
access_key: test-key-secondary
- model: mistral/mistral-large
base_url: http://host.docker.internal:${MOCK_TERTIARY_PORT}
access_key: test-key-tertiary

View file

@ -0,0 +1,24 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
- model: openai/gpt-4o
base_url: http://host.docker.internal:${MOCK_PRIMARY_PORT}
access_key: test-key-primary
default: true
retry_policy:
default_strategy: "same_model"
default_max_attempts: 3
on_status_codes:
- codes: [429]
strategy: "same_model"
max_attempts: 3
backoff:
apply_to: "same_model"
base_ms: 500
max_ms: 5000
jitter: false

View file

@ -0,0 +1,28 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
- model: openai/gpt-4o
base_url: http://host.docker.internal:${MOCK_PRIMARY_PORT}
access_key: test-key-primary
default: true
retry_policy:
fallback_models: [anthropic/claude-3-5-sonnet, mistral/mistral-large]
default_strategy: "different_provider"
default_max_attempts: 3
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 3
- model: anthropic/claude-3-5-sonnet
base_url: http://host.docker.internal:${MOCK_FALLBACK1_PORT}
access_key: test-key-fallback1
- model: mistral/mistral-large
base_url: http://host.docker.internal:${MOCK_FALLBACK2_PORT}
access_key: test-key-fallback2

View file

@ -0,0 +1,23 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
- model: openai/gpt-4o
base_url: http://host.docker.internal:${MOCK_PRIMARY_PORT}
access_key: test-key-primary
default: true
retry_policy:
default_strategy: "same_model"
default_max_attempts: 2
on_status_codes:
- codes: [429]
strategy: "same_model"
max_attempts: 2
retry_after_handling:
scope: "model"
apply_to: "request"
max_retry_after_seconds: 300

View file

@ -0,0 +1,36 @@
version: v0.3.0
listeners:
- type: model
name: model_listener
port: 12000
model_providers:
- model: openai/gpt-4o
base_url: http://host.docker.internal:${MOCK_PRIMARY_PORT}
access_key: test-key-primary
default: true
retry_policy:
fallback_models: [anthropic/claude-3-5-sonnet]
default_strategy: "different_provider"
default_max_attempts: 2
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2
retry_after_handling:
scope: "model"
apply_to: "global"
max_retry_after_seconds: 300
- model: anthropic/claude-3-5-sonnet
base_url: http://host.docker.internal:${MOCK_SECONDARY_PORT}
access_key: test-key-secondary
default: false
retry_policy:
default_strategy: "different_provider"
default_max_attempts: 2
on_status_codes:
- codes: [429]
strategy: "different_provider"
max_attempts: 2

File diff suppressed because it is too large Load diff