diff --git a/crates/brightstaff/src/main.rs b/crates/brightstaff/src/main.rs index f179dc4b..36484059 100644 --- a/crates/brightstaff/src/main.rs +++ b/crates/brightstaff/src/main.rs @@ -192,10 +192,13 @@ async fn init_app_state( // Validate that all models referenced in top-level routing_preferences exist in model_providers. // The CLI renders model_providers with `name` = "openai/gpt-4o" and `model` = "gpt-4o", // so we accept a match against either field. + // Internal providers (arch-router, arch-function, etc.) are excluded since they are not + // valid routing targets for user-defined routing_preferences. if let Some(ref route_prefs) = config.routing_preferences { let provider_model_names: std::collections::HashSet<&str> = config .model_providers .iter() + .filter(|p| p.internal != Some(true)) .flat_map(|p| std::iter::once(p.name.as_str()).chain(p.model.as_deref())) .collect(); for pref in route_prefs { diff --git a/crates/llm_gateway/src/stream_context.rs b/crates/llm_gateway/src/stream_context.rs index afb0b050..c5bc064b 100644 --- a/crates/llm_gateway/src/stream_context.rs +++ b/crates/llm_gateway/src/stream_context.rs @@ -809,6 +809,10 @@ impl HttpContext for StreamContext { return Action::Continue; } + // Capture request ID and traceparent early so all subsequent log messages include them + self.request_id = self.get_http_request_header(REQUEST_ID_HEADER); + self.traceparent = self.get_http_request_header(TRACE_PARENT_HEADER); + // Capture HTTP method and protocol for tracing self.http_method = self.get_http_request_header(":method"); self.http_protocol = self.get_http_request_header(":scheme"); @@ -884,9 +888,6 @@ impl HttpContext for StreamContext { self.delete_content_length_header(); self.save_ratelimit_header(); - self.request_id = self.get_http_request_header(REQUEST_ID_HEADER); - self.traceparent = self.get_http_request_header(TRACE_PARENT_HEADER); - Action::Continue }