fixed reasoning failures (#634)

* fixed reasoning failures

* adding debugging

* made several fixes for transmission isses for SSeEvents, incomplete handling of json types by anthropic, and wrote a bunch of tests

* removed debugging from supervisord.conf

---------

Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-342.local>
This commit is contained in:
Salman Paracha 2025-12-18 11:02:59 -08:00 committed by GitHub
parent 2f9121407b
commit 48bbc7cce7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 360 additions and 68 deletions

View file

@ -20,9 +20,8 @@ use common::ratelimit::Header;
use common::stats::{IncrementingMetric, RecordingMetric};
use common::{ratelimit, routing, tokenizer};
use hermesllm::apis::streaming_shapes::amazon_bedrock_binary_frame::BedrockBinaryFrameDecoder;
use hermesllm::apis::streaming_shapes::sse::{
SseEvent, SseStreamBuffer, SseStreamBufferTrait, SseStreamIter,
};
use hermesllm::apis::streaming_shapes::sse::{SseEvent, SseStreamBuffer, SseStreamBufferTrait};
use hermesllm::apis::streaming_shapes::sse_chunk_processor::SseChunkProcessor;
use hermesllm::clients::endpoints::SupportedAPIsFromClient;
use hermesllm::providers::response::ProviderResponse;
use hermesllm::providers::streaming_response::ProviderStreamResponse;
@ -55,6 +54,7 @@ pub struct StreamContext {
http_method: Option<String>,
http_protocol: Option<String>,
sse_buffer: Option<SseStreamBuffer>,
sse_chunk_processor: Option<SseChunkProcessor>,
}
impl StreamContext {
@ -85,6 +85,7 @@ impl StreamContext {
http_method: None,
http_protocol: None,
sse_buffer: None,
sse_chunk_processor: None,
}
}
@ -138,7 +139,7 @@ impl StreamContext {
));
info!(
"[ARCHGW_REQ_ID:{}] PROVIDER_SELECTION: Hint='{}' -> Selected='{}'",
"[PLANO_REQ_ID:{}] PROVIDER_SELECTION: Hint='{}' -> Selected='{}'",
self.request_identifier(),
self.get_http_request_header(ARCH_PROVIDER_HINT_HEADER)
.unwrap_or("none".to_string()),
@ -223,7 +224,7 @@ impl StreamContext {
let token_count = tokenizer::token_count(model, json_string).unwrap_or(0);
debug!(
"[ARCHGW_REQ_ID:{}] TOKEN_COUNT: model='{}' input_tokens={}",
"[PLANO_REQ_ID:{}] TOKEN_COUNT: model='{}' input_tokens={}",
self.request_identifier(),
model,
token_count
@ -237,7 +238,7 @@ impl StreamContext {
// Check if rate limiting needs to be applied.
if let Some(selector) = self.ratelimit_selector.take() {
info!(
"[ARCHGW_REQ_ID:{}] RATELIMIT_CHECK: model='{}' selector='{}:{}'",
"[PLANO_REQ_ID:{}] RATELIMIT_CHECK: model='{}' selector='{}:{}'",
self.request_identifier(),
model,
selector.key,
@ -250,7 +251,7 @@ impl StreamContext {
)?;
} else {
debug!(
"[ARCHGW_REQ_ID:{}] RATELIMIT_SKIP: model='{}' (no selector)",
"[PLANO_REQ_ID:{}] RATELIMIT_SKIP: model='{}' (no selector)",
self.request_identifier(),
model
);
@ -269,7 +270,7 @@ impl StreamContext {
Ok(duration) => {
let duration_ms = duration.as_millis();
info!(
"[ARCHGW_REQ_ID:{}] TIME_TO_FIRST_TOKEN: {}ms",
"[PLANO_REQ_ID:{}] TIME_TO_FIRST_TOKEN: {}ms",
self.request_identifier(),
duration_ms
);
@ -278,7 +279,7 @@ impl StreamContext {
}
Err(e) => {
warn!(
"[ARCHGW_REQ_ID:{}] TIME_MEASUREMENT_ERROR: {:?}",
"[PLANO_REQ_ID:{}] TIME_MEASUREMENT_ERROR: {:?}",
self.request_identifier(),
e
);
@ -294,7 +295,7 @@ impl StreamContext {
// Convert the duration to milliseconds
let duration_ms = duration.as_millis();
info!(
"[ARCHGW_REQ_ID:{}] REQUEST_COMPLETE: latency={}ms tokens={}",
"[PLANO_REQ_ID:{}] REQUEST_COMPLETE: latency={}ms tokens={}",
self.request_identifier(),
duration_ms,
self.response_tokens
@ -310,7 +311,7 @@ impl StreamContext {
self.metrics.time_per_output_token.record(tpot);
info!(
"[ARCHGW_REQ_ID:{}] TOKEN_THROUGHPUT: time_per_token={}ms tokens_per_second={}",
"[PLANO_REQ_ID:{}] TOKEN_THROUGHPUT: time_per_token={}ms tokens_per_second={}",
self.request_identifier(),
tpot,
1000 / tpot
@ -333,7 +334,7 @@ impl StreamContext {
if self.streaming_response {
let chunk_size = body_size;
debug!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_RESPONSE_CHUNK: streaming=true chunk_size={}",
"[PLANO_REQ_ID:{}] UPSTREAM_RESPONSE_CHUNK: streaming=true chunk_size={}",
self.request_identifier(),
chunk_size
);
@ -341,7 +342,7 @@ impl StreamContext {
Some(chunk) => chunk,
None => {
warn!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_RESPONSE_ERROR: empty chunk, size={}",
"[PLANO_REQ_ID:{}] UPSTREAM_RESPONSE_ERROR: empty chunk, size={}",
self.request_identifier(),
chunk_size
);
@ -351,7 +352,7 @@ impl StreamContext {
if streaming_chunk.len() != chunk_size {
warn!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_RESPONSE_MISMATCH: expected={} actual={}",
"[PLANO_REQ_ID:{}] UPSTREAM_RESPONSE_MISMATCH: expected={} actual={}",
self.request_identifier(),
chunk_size,
streaming_chunk.len()
@ -363,7 +364,7 @@ impl StreamContext {
return Err(Action::Continue);
}
debug!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_RESPONSE_COMPLETE: streaming=false body_size={}",
"[PLANO_REQ_ID:{}] UPSTREAM_RESPONSE_COMPLETE: streaming=false body_size={}",
self.request_identifier(),
body_size
);
@ -383,7 +384,7 @@ impl StreamContext {
provider_id: ProviderId,
) -> Result<Vec<u8>, Action> {
debug!(
"[ARCHGW_REQ_ID:{}] STREAMING_PROCESS: client={:?} provider_id={:?} chunk_size={}",
"[PLANO_REQ_ID:{}] STREAMING_PROCESS: client={:?} provider_id={:?} chunk_size={}",
self.request_identifier(),
self.client_api,
provider_id,
@ -403,15 +404,10 @@ impl StreamContext {
return self.handle_bedrock_binary_stream(body, &client_api, &upstream_api);
}
// Parse body into SSE iterator using TryFrom
let sse_iter: SseStreamIter<std::vec::IntoIter<String>> =
match SseStreamIter::try_from(body) {
Ok(iter) => iter,
Err(e) => {
warn!("Failed to parse body into SSE iterator: {}", e);
return Err(Action::Continue);
}
};
// Initialize SSE chunk processor if not present
if self.sse_chunk_processor.is_none() {
self.sse_chunk_processor = Some(SseChunkProcessor::new());
}
// Initialize SSE buffer if not present
if self.sse_buffer.is_none() {
@ -425,18 +421,42 @@ impl StreamContext {
};
}
// Process each SSE event
for sse_event in sse_iter {
// Transform event if upstream API != client API
let transformed_event: SseEvent =
match SseEvent::try_from((sse_event, &client_api, &upstream_api)) {
Ok(event) => event,
// Process chunk through SSE processor (handles incomplete events)
let transformed_events = match self.sse_chunk_processor.as_mut() {
Some(processor) => {
let result = processor.process_chunk(body, &client_api, &upstream_api);
let has_buffered = processor.has_buffered_data();
let buffered_size = processor.buffered_size();
match result {
Ok(events) => {
if has_buffered {
debug!(
"[PLANO_REQ_ID:{}] SSE_INCOMPLETE_BUFFERED: {} bytes buffered for next chunk",
self.request_identifier(),
buffered_size
);
}
events
}
Err(e) => {
warn!("Failed to transform SSE event: {}", e);
warn!(
"[PLANO_REQ_ID:{}] SSE_CHUNK_PROCESS_ERROR: {}",
self.request_identifier(),
e
);
return Err(Action::Continue);
}
};
}
}
None => {
warn!("SSE chunk processor unexpectedly missing");
return Err(Action::Continue);
}
};
// Process each successfully transformed SSE event
for transformed_event in transformed_events {
// Extract ProviderStreamResponse for processing (token counting, etc.)
if !transformed_event.is_done() && !transformed_event.is_event_only() {
match transformed_event.provider_response() {
@ -445,7 +465,7 @@ impl StreamContext {
if provider_response.is_final() {
debug!(
"[ARCHGW_REQ_ID:{}] STREAMING_FINAL_CHUNK: total_tokens={}",
"[PLANO_REQ_ID:{}] STREAMING_FINAL_CHUNK: total_tokens={}",
self.request_identifier(),
self.response_tokens
);
@ -455,7 +475,7 @@ impl StreamContext {
let estimated_tokens = content.len() / 4;
self.response_tokens += estimated_tokens.max(1);
debug!(
"[ARCHGW_REQ_ID:{}] STREAMING_TOKEN_UPDATE: delta_chars={} estimated_tokens={} total_tokens={}",
"[PLANO_REQ_ID:{}] STREAMING_TOKEN_UPDATE: delta_chars={} estimated_tokens={} total_tokens={}",
self.request_identifier(),
content.len(),
estimated_tokens.max(1),
@ -465,7 +485,7 @@ impl StreamContext {
}
Err(e) => {
warn!(
"[ARCHGW_REQ_ID:{}] STREAMING_CHUNK_ERROR: {}",
"[PLANO_REQ_ID:{}] STREAMING_CHUNK_ERROR: {}",
self.request_identifier(),
e
);
@ -487,7 +507,7 @@ impl StreamContext {
if !bytes.is_empty() {
let content = String::from_utf8_lossy(&bytes);
debug!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_TRANSFORMED_CLIENT_RESPONSE: size={} content={}",
"[PLANO_REQ_ID:{}] UPSTREAM_TRANSFORMED_CLIENT_RESPONSE: size={} content={}",
self.request_identifier(),
bytes.len(),
content
@ -525,7 +545,7 @@ impl StreamContext {
Ok(buffer) => Some(buffer),
Err(e) => {
warn!(
"[ARCHGW_REQ_ID:{}] BEDROCK_BUFFER_INIT_ERROR: {}",
"[PLANO_REQ_ID:{}] BEDROCK_BUFFER_INIT_ERROR: {}",
self.request_identifier(),
e
);
@ -555,7 +575,7 @@ impl StreamContext {
let estimated_tokens = content.len() / 4;
self.response_tokens += estimated_tokens.max(1);
debug!(
"[ARCHGW_REQ_ID:{}] BEDROCK_TOKEN_UPDATE: delta_chars={} estimated_tokens={} total_tokens={}",
"[PLANO_REQ_ID:{}] BEDROCK_TOKEN_UPDATE: delta_chars={} estimated_tokens={} total_tokens={}",
self.request_identifier(),
content.len(),
estimated_tokens.max(1),
@ -573,7 +593,7 @@ impl StreamContext {
}
Err(e) => {
warn!(
"[ARCHGW_REQ_ID:{}] BEDROCK_FRAME_CONVERSION_ERROR: {}",
"[PLANO_REQ_ID:{}] BEDROCK_FRAME_CONVERSION_ERROR: {}",
self.request_identifier(),
e
);
@ -583,7 +603,7 @@ impl StreamContext {
Some(DecodedFrame::Incomplete) => {
// Incomplete frame - buffer retains partial data, wait for more bytes
debug!(
"[ARCHGW_REQ_ID:{}] BEDROCK_INCOMPLETE_FRAME: waiting for more data",
"[PLANO_REQ_ID:{}] BEDROCK_INCOMPLETE_FRAME: waiting for more data",
self.request_identifier()
);
break;
@ -591,7 +611,7 @@ impl StreamContext {
None => {
// Decode error
warn!(
"[ARCHGW_REQ_ID:{}] BEDROCK_DECODE_ERROR",
"[PLANO_REQ_ID:{}] BEDROCK_DECODE_ERROR",
self.request_identifier()
);
return Err(Action::Continue);
@ -606,7 +626,7 @@ impl StreamContext {
if !bytes.is_empty() {
let content = String::from_utf8_lossy(&bytes);
debug!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_TRANSFORMED_CLIENT_RESPONSE: size={} content={}",
"[PLANO_REQ_ID:{}] UPSTREAM_TRANSFORMED_CLIENT_RESPONSE: size={} content={}",
self.request_identifier(),
bytes.len(),
content
@ -616,7 +636,7 @@ impl StreamContext {
}
None => {
warn!(
"[ARCHGW_REQ_ID:{}] BEDROCK_BUFFER_MISSING",
"[PLANO_REQ_ID:{}] BEDROCK_BUFFER_MISSING",
self.request_identifier()
);
Err(Action::Continue)
@ -630,7 +650,7 @@ impl StreamContext {
provider_id: ProviderId,
) -> Result<Vec<u8>, Action> {
debug!(
"[ARCHGW_REQ_ID:{}] NON_STREAMING_PROCESS: provider_id={:?} body_size={}",
"[PLANO_REQ_ID:{}] NON_STREAMING_PROCESS: provider_id={:?} body_size={}",
self.request_identifier(),
provider_id,
body.len()
@ -642,7 +662,7 @@ impl StreamContext {
Ok(response) => response,
Err(e) => {
warn!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_RESPONSE_PARSE_ERROR: {} | body: {}",
"[PLANO_REQ_ID:{}] UPSTREAM_RESPONSE_PARSE_ERROR: {} | body: {}",
self.request_identifier(),
e,
String::from_utf8_lossy(body)
@ -657,7 +677,7 @@ impl StreamContext {
}
None => {
warn!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_RESPONSE_ERROR: missing client_api",
"[PLANO_REQ_ID:{}] UPSTREAM_RESPONSE_ERROR: missing client_api",
self.request_identifier()
);
return Err(Action::Continue);
@ -669,7 +689,7 @@ impl StreamContext {
response.extract_usage_counts()
{
debug!(
"[ARCHGW_REQ_ID:{}] RESPONSE_USAGE: prompt_tokens={} completion_tokens={} total_tokens={}",
"[PLANO_REQ_ID:{}] RESPONSE_USAGE: prompt_tokens={} completion_tokens={} total_tokens={}",
self.request_identifier(),
prompt_tokens,
completion_tokens,
@ -678,7 +698,7 @@ impl StreamContext {
self.response_tokens = completion_tokens;
} else {
warn!(
"[ARCHGW_REQ_ID:{}] RESPONSE_USAGE: no usage information found",
"[PLANO_REQ_ID:{}] RESPONSE_USAGE: no usage information found",
self.request_identifier()
);
}
@ -686,7 +706,7 @@ impl StreamContext {
match serde_json::to_vec(&response) {
Ok(bytes) => {
debug!(
"[ARCHGW_REQ_ID:{}] CLIENT_RESPONSE_PAYLOAD: {}",
"[PLANO_REQ_ID:{}] CLIENT_RESPONSE_PAYLOAD: {}",
self.request_identifier(),
String::from_utf8_lossy(&bytes)
);
@ -763,7 +783,7 @@ impl HttpContext for StreamContext {
Some(provider_id.compatible_api_for_client(api, self.streaming_response));
debug!(
"[ARCHGW_REQ_ID:{}] ROUTING_INFO: provider='{}' client_api={:?} resolved_api={:?} request_path='{}'",
"[PLANO_REQ_ID:{}] ROUTING_INFO: provider='{}' client_api={:?} resolved_api={:?} request_path='{}'",
self.request_identifier(),
provider.to_provider_id(),
api,
@ -816,7 +836,7 @@ impl HttpContext for StreamContext {
fn on_http_request_body(&mut self, body_size: usize, end_of_stream: bool) -> Action {
debug!(
"[ARCHGW_REQ_ID:{}] REQUEST_BODY_CHUNK: bytes={} end_stream={}",
"[PLANO_REQ_ID:{}] REQUEST_BODY_CHUNK: bytes={} end_stream={}",
self.request_identifier(),
body_size,
end_of_stream
@ -855,14 +875,14 @@ impl HttpContext for StreamContext {
let mut deserialized_client_request: ProviderRequestType = match self.client_api.as_ref() {
Some(the_client_api) => {
info!(
"[ARCHGW_REQ_ID:{}] CLIENT_REQUEST_RECEIVED: api={:?} body_size={}",
"[PLANO_REQ_ID:{}] CLIENT_REQUEST_RECEIVED: api={:?} body_size={}",
self.request_identifier(),
the_client_api,
body_bytes.len()
);
debug!(
"[ARCHGW_REQ_ID:{}] CLIENT_REQUEST_PAYLOAD: {}",
"[PLANO_REQ_ID:{}] CLIENT_REQUEST_PAYLOAD: {}",
self.request_identifier(),
String::from_utf8_lossy(&body_bytes)
);
@ -871,7 +891,7 @@ impl HttpContext for StreamContext {
Ok(deserialized) => deserialized,
Err(e) => {
warn!(
"[ARCHGW_REQ_ID:{}] CLIENT_REQUEST_PARSE_ERROR: {} | body: {}",
"[PLANO_REQ_ID:{}] CLIENT_REQUEST_PARSE_ERROR: {} | body: {}",
self.request_identifier(),
e,
String::from_utf8_lossy(&body_bytes)
@ -914,7 +934,7 @@ impl HttpContext for StreamContext {
"agent_orchestrator".to_string()
} else {
warn!(
"[ARCHGW_REQ_ID:{}] MODEL_RESOLUTION_ERROR: no model specified | req_model='{}' provider='{}' config_model={:?}",
"[PLANO_REQ_ID:{}] MODEL_RESOLUTION_ERROR: no model specified | req_model='{}' provider='{}' config_model={:?}",
self.request_identifier(),
model_requested,
self.llm_provider().name,
@ -943,7 +963,7 @@ impl HttpContext for StreamContext {
self.user_message = deserialized_client_request.get_recent_user_message();
info!(
"[ARCHGW_REQ_ID:{}] MODEL_RESOLUTION: req_model='{}' -> resolved_model='{}' provider='{}' streaming={}",
"[PLANO_REQ_ID:{}] MODEL_RESOLUTION: req_model='{}' -> resolved_model='{}' provider='{}' streaming={}",
self.request_identifier(),
model_requested,
resolved_model,
@ -974,14 +994,14 @@ impl HttpContext for StreamContext {
match self.resolved_api.as_ref() {
Some(upstream) => {
info!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_TRANSFORM: client_api={:?} -> upstream_api={:?}",
"[PLANO_REQ_ID:{}] UPSTREAM_TRANSFORM: client_api={:?} -> upstream_api={:?}",
self.request_identifier(), self.client_api, upstream
);
match ProviderRequestType::try_from((deserialized_client_request, upstream)) {
Ok(request) => {
debug!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_REQUEST_PAYLOAD: {}",
"[PLANO_REQ_ID:{}] UPSTREAM_REQUEST_PAYLOAD: {}",
self.request_identifier(),
String::from_utf8_lossy(&request.to_bytes().unwrap_or_default())
);
@ -1032,7 +1052,7 @@ impl HttpContext for StreamContext {
self.upstream_status_code = StatusCode::from_u16(status_code).ok();
debug!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_RESPONSE_STATUS: {}",
"[PLANO_REQ_ID:{}] UPSTREAM_RESPONSE_STATUS: {}",
self.request_identifier(),
status_code
);
@ -1059,7 +1079,7 @@ impl HttpContext for StreamContext {
let current_time = get_current_time().unwrap();
if end_of_stream && body_size == 0 {
debug!(
"[ARCHGW_REQ_ID:{}] RESPONSE_BODY_COMPLETE: total_bytes={}",
"[PLANO_REQ_ID:{}] RESPONSE_BODY_COMPLETE: total_bytes={}",
self.request_identifier(),
body_size
);
@ -1071,7 +1091,7 @@ impl HttpContext for StreamContext {
if let Some(status_code) = &self.upstream_status_code {
if status_code.is_client_error() || status_code.is_server_error() {
info!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_ERROR_RESPONSE: status={} body_size={}",
"[PLANO_REQ_ID:{}] UPSTREAM_ERROR_RESPONSE: status={} body_size={}",
self.request_identifier(),
status_code.as_u16(),
body_size
@ -1081,7 +1101,7 @@ impl HttpContext for StreamContext {
if body_size > 0 {
if let Ok(body) = self.read_raw_response_body(body_size) {
debug!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_ERROR_BODY: {}",
"[PLANO_REQ_ID:{}] UPSTREAM_ERROR_BODY: {}",
self.request_identifier(),
String::from_utf8_lossy(&body)
);
@ -1103,7 +1123,7 @@ impl HttpContext for StreamContext {
None => "None".to_string(),
};
info!(
"[ARCHGW_REQ_ID:{}], UNSUPPORTED API: {}",
"[PLANO_REQ_ID:{}], UNSUPPORTED API: {}",
self.request_identifier(),
api_info
);
@ -1117,7 +1137,7 @@ impl HttpContext for StreamContext {
};
debug!(
"[ARCHGW_REQ_ID:{}] UPSTREAM_RAW_RESPONSE: body_size={} content={}",
"[PLANO_REQ_ID:{}] UPSTREAM_RAW_RESPONSE: body_size={} content={}",
self.request_identifier(),
body.len(),
String::from_utf8_lossy(&body)