plano orchestration using plano orchestration 4b model (#637)

This commit is contained in:
Adil Hafeez 2025-12-22 18:05:49 -08:00 committed by GitHub
parent 60162e0575
commit 15fbb6c3af
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
40 changed files with 4054 additions and 449 deletions

View file

@ -36,6 +36,7 @@ properties:
type: string
enum:
- mcp
- rest
transport:
type: string
enum:
@ -61,6 +62,10 @@ properties:
type: string
timeout:
type: string
router:
type: string
enum:
- plano_orchestrator_v1
type:
type: string
enum:

View file

@ -2,7 +2,7 @@
nodaemon=true
[program:brightstaff]
command=sh -c "envsubst < /app/arch_config_rendered.yaml > /app/arch_config_rendered.env_sub.yaml && RUST_LOG=debug ARCH_CONFIG_PATH_RENDERED=/app/arch_config_rendered.env_sub.yaml /app/brightstaff 2>&1 | tee /var/log/brightstaff.log | while IFS= read -r line; do echo '[brightstaff]' \"$line\"; done"
command=sh -c "envsubst < /app/arch_config_rendered.yaml > /app/arch_config_rendered.env_sub.yaml && RUST_LOG=info ARCH_CONFIG_PATH_RENDERED=/app/arch_config_rendered.env_sub.yaml /app/brightstaff 2>&1 | tee /var/log/brightstaff.log | while IFS= read -r line; do echo '[brightstaff]' \"$line\"; done"
stdout_logfile=/dev/stdout
redirect_stderr=true
stdout_logfile_maxbytes=0

View file

@ -323,6 +323,15 @@ def validate_and_render_schema():
}
)
if "plano-orchestrator" not in model_provider_name_set:
updated_model_providers.append(
{
"name": "plano-orchestrator",
"provider_interface": "arch",
"model": "Plano-Orchestrator",
}
)
config_yaml["model_providers"] = deepcopy(updated_model_providers)
listeners_with_provider = 0

View file

@ -5,7 +5,7 @@ failed_files=()
for file in $(find . -name arch_config.yaml -o -name arch_config_full_reference.yaml); do
echo "Validating ${file}..."
touch $(pwd)/${file}_rendered
if ! docker run --rm -v "$(pwd)/${file}:/app/arch_config.yaml:ro" -v "$(pwd)/${file}_rendered:/app/arch_config_rendered.yaml:rw" --entrypoint /bin/sh katanemo/archgw:latest -c "python -m cli.config_generator" 2>&1 > /dev/null ; then
if ! docker run --rm -v "$(pwd)/${file}:/app/arch_config.yaml:ro" -v "$(pwd)/${file}_rendered:/app/arch_config_rendered.yaml:rw" --entrypoint /bin/sh katanemo/archgw:0.3.22 -c "python -m cli.config_generator" 2>&1 > /dev/null ; then
echo "Validation failed for $file"
failed_files+=("$file")
fi

View file

@ -17,7 +17,7 @@ use tracing::{debug, info, warn};
use super::agent_selector::{AgentSelectionError, AgentSelector};
use super::pipeline_processor::{PipelineError, PipelineProcessor};
use super::response_handler::ResponseHandler;
use crate::router::llm_router::RouterService;
use crate::router::plano_orchestrator::OrchestratorService;
use crate::tracing::{OperationNameBuilder, operation_component, http};
/// Main errors for agent chat completions
@ -37,7 +37,7 @@ pub enum AgentFilterChainError {
pub async fn agent_chat(
request: Request<hyper::body::Incoming>,
router_service: Arc<RouterService>,
orchestrator_service: Arc<OrchestratorService>,
_: String,
agents_list: Arc<tokio::sync::RwLock<Option<Vec<common::configuration::Agent>>>>,
listeners: Arc<tokio::sync::RwLock<Vec<common::configuration::Listener>>>,
@ -45,7 +45,7 @@ pub async fn agent_chat(
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
match handle_agent_chat(
request,
router_service,
orchestrator_service,
agents_list,
listeners,
trace_collector,
@ -123,13 +123,13 @@ pub async fn agent_chat(
async fn handle_agent_chat(
request: Request<hyper::body::Incoming>,
router_service: Arc<RouterService>,
orchestrator_service: Arc<OrchestratorService>,
agents_list: Arc<tokio::sync::RwLock<Option<Vec<common::configuration::Agent>>>>,
listeners: Arc<tokio::sync::RwLock<Vec<common::configuration::Listener>>>,
trace_collector: Arc<common::traces::TraceCollector>,
) -> Result<Response<BoxBody<Bytes, hyper::Error>>, AgentFilterChainError> {
// Initialize services
let agent_selector = AgentSelector::new(router_service);
let agent_selector = AgentSelector::new(orchestrator_service);
let mut pipeline_processor = PipelineProcessor::default();
let response_handler = ResponseHandler::new();
@ -186,15 +186,6 @@ async fn handle_agent_chat(
let message: Vec<OpenAIMessage> = client_request.get_messages();
// let chat_completions_request: ChatCompletionsRequest =
// serde_json::from_slice(&chat_request_bytes).map_err(|err| {
// warn!(
// "Failed to parse request body as ChatCompletionsRequest: {}",
// err
// );
// AgentFilterChainError::RequestParsing(err)
// })?;
// Extract trace parent for routing
let trace_parent = request_headers
.iter()
@ -215,94 +206,166 @@ async fn handle_agent_chat(
(String::new(), None)
};
// Select appropriate agent using arch router llm model
let selected_agent = agent_selector
.select_agent(&message, &listener, trace_parent.clone())
// Select appropriate agents using arch orchestrator llm model
let selection_span_id = generate_random_span_id();
let selection_start_time = SystemTime::now();
let selection_start_instant = Instant::now();
let selected_agents = agent_selector
.select_agents(&message, &listener, trace_parent.clone())
.await?;
debug!("Processing agent pipeline: {}", selected_agent.id);
// Record the start time for agent span
let agent_start_time = SystemTime::now();
let agent_start_instant = Instant::now();
// let (span_id, trace_id) = trace_collector.start_span(
// trace_parent.clone(),
// operation_component::AGENT,
// &format!("/agents{}", request_path),
// &selected_agent.id,
// );
let span_id = generate_random_span_id();
// Process the filter chain
let chat_history = pipeline_processor
.process_filter_chain(
&message,
&selected_agent,
&agent_map,
&request_headers,
Some(&trace_collector),
trace_id.clone(),
span_id.clone(),
)
.await?;
// Get terminal agent and send final response
let terminal_agent_name = selected_agent.id.clone();
let terminal_agent = agent_map.get(&terminal_agent_name).unwrap();
debug!("Processing terminal agent: {}", terminal_agent_name);
debug!("Terminal agent details: {:?}", terminal_agent);
let llm_response = pipeline_processor
.invoke_agent(
&chat_history,
client_request,
terminal_agent,
&request_headers,
trace_id.clone(),
span_id.clone(),
)
.await?;
// Record agent span after processing is complete
let agent_end_time = SystemTime::now();
let agent_elapsed = agent_start_instant.elapsed();
// Build full path with /agents prefix
let full_path = format!("/agents{}", request_path);
// Build operation name: POST {full_path} {agent_name}
let operation_name = OperationNameBuilder::new()
// Record agent selection span
let selection_end_time = SystemTime::now();
let selection_elapsed = selection_start_instant.elapsed();
let selection_operation_name = OperationNameBuilder::new()
.with_method("POST")
.with_path(&full_path)
.with_target(&terminal_agent_name)
.with_path("/agents/select")
.with_target(&listener.name)
.build();
let mut span_builder = SpanBuilder::new(&operation_name)
.with_span_id(span_id)
let mut selection_span_builder = SpanBuilder::new(&selection_operation_name)
.with_span_id(selection_span_id)
.with_kind(SpanKind::Internal)
.with_start_time(agent_start_time)
.with_end_time(agent_end_time)
.with_start_time(selection_start_time)
.with_end_time(selection_end_time)
.with_attribute(http::METHOD, "POST")
.with_attribute(http::TARGET, full_path)
.with_attribute("agent.name", terminal_agent_name.clone())
.with_attribute("duration_ms", format!("{:.2}", agent_elapsed.as_secs_f64() * 1000.0));
.with_attribute(http::TARGET, "/agents/select")
.with_attribute("selection.listener", listener.name.clone())
.with_attribute("selection.agent_count", selected_agents.len().to_string())
.with_attribute("selection.agents", selected_agents.iter().map(|a| a.id.as_str()).collect::<Vec<_>>().join(","))
.with_attribute("duration_ms", format!("{:.2}", selection_elapsed.as_secs_f64() * 1000.0));
if !trace_id.is_empty() {
span_builder = span_builder.with_trace_id(trace_id);
selection_span_builder = selection_span_builder.with_trace_id(trace_id.clone());
}
if let Some(parent_id) = parent_span_id {
span_builder = span_builder.with_parent_span_id(parent_id);
if let Some(parent_id) = parent_span_id.clone() {
selection_span_builder = selection_span_builder.with_parent_span_id(parent_id);
}
let span = span_builder.build();
// Use plano(agent) as service name for the agent processing span
trace_collector.record_span(operation_component::AGENT, span);
let selection_span = selection_span_builder.build();
trace_collector.record_span(operation_component::ORCHESTRATOR, selection_span);
// Create streaming response
response_handler
.create_streaming_response(llm_response)
.await
.map_err(AgentFilterChainError::from)
info!("Selected {} agent(s) for execution", selected_agents.len());
// Execute agents sequentially, passing output from one to the next
let mut current_messages = message.clone();
let agent_count = selected_agents.len();
for (agent_index, selected_agent) in selected_agents.iter().enumerate() {
let is_last_agent = agent_index == agent_count - 1;
debug!(
"Processing agent {}/{}: {}",
agent_index + 1,
agent_count,
selected_agent.id
);
// Record the start time for agent span
let agent_start_time = SystemTime::now();
let agent_start_instant = Instant::now();
let span_id = generate_random_span_id();
// Get agent name
let agent_name = selected_agent.id.clone();
// Process the filter chain
let chat_history = pipeline_processor
.process_filter_chain(
&current_messages,
selected_agent,
&agent_map,
&request_headers,
Some(&trace_collector),
trace_id.clone(),
span_id.clone(),
)
.await?;
// Get agent details and invoke
let agent = agent_map.get(&agent_name).unwrap();
debug!("Invoking agent: {}", agent_name);
let llm_response = pipeline_processor
.invoke_agent(
&chat_history,
client_request.clone(),
agent,
&request_headers,
trace_id.clone(),
span_id.clone(),
)
.await?;
// Record agent span
let agent_end_time = SystemTime::now();
let agent_elapsed = agent_start_instant.elapsed();
let full_path = format!("/agents{}", request_path);
let operation_name = OperationNameBuilder::new()
.with_method("POST")
.with_path(&full_path)
.with_target(&agent_name)
.build();
let mut span_builder = SpanBuilder::new(&operation_name)
.with_span_id(span_id)
.with_kind(SpanKind::Internal)
.with_start_time(agent_start_time)
.with_end_time(agent_end_time)
.with_attribute(http::METHOD, "POST")
.with_attribute(http::TARGET, full_path)
.with_attribute("agent.name", agent_name.clone())
.with_attribute("agent.sequence", format!("{}/{}", agent_index + 1, agent_count))
.with_attribute("duration_ms", format!("{:.2}", agent_elapsed.as_secs_f64() * 1000.0));
if !trace_id.is_empty() {
span_builder = span_builder.with_trace_id(trace_id.clone());
}
if let Some(parent_id) = parent_span_id.clone() {
span_builder = span_builder.with_parent_span_id(parent_id);
}
let span = span_builder.build();
trace_collector.record_span(operation_component::AGENT, span);
// If this is the last agent, return the streaming response
if is_last_agent {
info!("Completed agent chain, returning response from last agent: {}", agent_name);
return response_handler
.create_streaming_response(llm_response)
.await
.map_err(AgentFilterChainError::from);
}
// For intermediate agents, collect the full response and pass to next agent
debug!("Collecting response from intermediate agent: {}", agent_name);
let response_text = response_handler.collect_full_response(llm_response).await?;
info!(
"Agent {} completed, passing {} character response to next agent",
agent_name,
response_text.len()
);
// remove last message and add new one at the end
let last_message = current_messages.pop().unwrap();
// Create a new message with the agent's response as assistant message
// and add it to the conversation history
current_messages.push(OpenAIMessage {
role: hermesllm::apis::openai::Role::Assistant,
content: hermesllm::apis::openai::MessageContent::Text(response_text),
name: Some(agent_name.clone()),
tool_calls: None,
tool_call_id: None,
});
current_messages.push(last_message);
}
// This should never be reached since we return in the last agent iteration
unreachable!("Agent execution loop should have returned a response")
}

View file

@ -2,12 +2,12 @@ use std::collections::HashMap;
use std::sync::Arc;
use common::configuration::{
Agent, AgentFilterChain, Listener, ModelUsagePreference, RoutingPreference,
Agent, AgentFilterChain, Listener, AgentUsagePreference, OrchestrationPreference,
};
use hermesllm::apis::openai::Message;
use tracing::{debug, warn};
use crate::router::llm_router::RouterService;
use crate::router::plano_orchestrator::OrchestratorService;
/// Errors that can occur during agent selection
#[derive(Debug, thiserror::Error)]
@ -16,23 +16,23 @@ pub enum AgentSelectionError {
ListenerNotFound(String),
#[error("No agents configured for listener: {0}")]
NoAgentsConfigured(String),
#[error("Routing service error: {0}")]
RoutingError(String),
#[error("Default agent not found for listener: {0}")]
DefaultAgentNotFound(String),
#[error("MCP client error: {0}")]
McpError(String),
#[error("Orchestration service error: {0}")]
OrchestrationError(String),
}
/// Service for selecting agents based on routing preferences and listener configuration
/// Service for selecting agents based on orchestration preferences and listener configuration
pub struct AgentSelector {
router_service: Arc<RouterService>,
orchestrator_service: Arc<OrchestratorService>,
}
impl AgentSelector {
pub fn new(router_service: Arc<RouterService>) -> Self {
pub fn new(orchestrator_service: Arc<OrchestratorService>) -> Self {
Self {
router_service,
orchestrator_service,
}
}
@ -63,59 +63,6 @@ impl AgentSelector {
.collect()
}
/// Select appropriate agent based on routing preferences
pub async fn select_agent(
&self,
messages: &[Message],
listener: &Listener,
trace_parent: Option<String>,
) -> Result<AgentFilterChain, AgentSelectionError> {
let agents = listener
.agents
.as_ref()
.ok_or_else(|| AgentSelectionError::NoAgentsConfigured(listener.name.clone()))?;
// If only one agent, skip routing
if agents.len() == 1 {
debug!("Only one agent available, skipping routing");
return Ok(agents[0].clone());
}
let usage_preferences = self
.convert_agent_description_to_routing_preferences(agents)
.await;
debug!(
"Agents usage preferences for agent routing str: {}",
serde_json::to_string(&usage_preferences).unwrap_or_default()
);
match self
.router_service
.determine_route(messages, trace_parent, Some(usage_preferences))
.await
{
Ok(Some((_, agent_name))) => {
debug!("Determined agent: {}", agent_name);
let selected_agent = agents
.iter()
.find(|a| a.id == agent_name)
.cloned()
.ok_or_else(|| {
AgentSelectionError::RoutingError(format!(
"Selected agent '{}' not found in listener agents",
agent_name
))
})?;
Ok(selected_agent)
}
Ok(None) => {
debug!("No agent determined using routing preferences, using default agent");
self.get_default_agent(agents, &listener.name)
}
Err(err) => Err(AgentSelectionError::RoutingError(err.to_string())),
}
}
/// Get the default agent or the first agent if no default is specified
fn get_default_agent(
&self,
@ -136,17 +83,17 @@ impl AgentSelector {
.ok_or_else(|| AgentSelectionError::DefaultAgentNotFound(listener_name.to_string()))
}
/// Convert agent descriptions to routing preferences
async fn convert_agent_description_to_routing_preferences(
/// Convert agent descriptions to orchestration preferences
async fn convert_agent_description_to_orchestration_preferences(
&self,
agents: &[AgentFilterChain],
) -> Vec<ModelUsagePreference> {
) -> Vec<AgentUsagePreference> {
let mut preferences = Vec::new();
for agent_chain in agents {
preferences.push(ModelUsagePreference {
preferences.push(AgentUsagePreference {
model: agent_chain.id.clone(),
routing_preferences: vec![RoutingPreference {
orchestration_preferences: vec![OrchestrationPreference {
name: agent_chain.id.clone(),
description: agent_chain.description.clone().unwrap_or_default(),
}],
@ -155,6 +102,71 @@ impl AgentSelector {
preferences
}
/// Select multiple agents using orchestration
pub async fn select_agents(
&self,
messages: &[Message],
listener: &Listener,
trace_parent: Option<String>,
) -> Result<Vec<AgentFilterChain>, AgentSelectionError> {
let agents = listener
.agents
.as_ref()
.ok_or_else(|| AgentSelectionError::NoAgentsConfigured(listener.name.clone()))?;
// If only one agent, skip orchestration
if agents.len() == 1 {
debug!("Only one agent available, skipping orchestration");
return Ok(vec![agents[0].clone()]);
}
let usage_preferences = self
.convert_agent_description_to_orchestration_preferences(agents)
.await;
debug!(
"Agents usage preferences for orchestration: {}",
serde_json::to_string(&usage_preferences).unwrap_or_default()
);
match self
.orchestrator_service
.determine_orchestration(messages, trace_parent, Some(usage_preferences))
.await
{
Ok(Some(routes)) => {
debug!("Determined {} agent(s) via orchestration", routes.len());
let mut selected_agents = Vec::new();
for (route_name, agent_name) in routes {
debug!("Processing route: {}, agent: {}", route_name, agent_name);
let selected_agent = agents
.iter()
.find(|a| a.id == agent_name)
.cloned()
.ok_or_else(|| {
AgentSelectionError::OrchestrationError(format!(
"Selected agent '{}' not found in listener agents",
agent_name
))
})?;
selected_agents.push(selected_agent);
}
if selected_agents.is_empty() {
debug!("No agents determined using orchestration, using default agent");
Ok(vec![self.get_default_agent(agents, &listener.name)?])
} else {
Ok(selected_agents)
}
}
Ok(None) => {
debug!("No agents determined using orchestration, using default agent");
Ok(vec![self.get_default_agent(agents, &listener.name)?])
}
Err(err) => Err(AgentSelectionError::OrchestrationError(err.to_string())),
}
}
}
#[cfg(test)]
@ -162,12 +174,10 @@ mod tests {
use super::*;
use common::configuration::{AgentFilterChain, Listener};
fn create_test_router_service() -> Arc<RouterService> {
Arc::new(RouterService::new(
vec![], // empty providers for testing
fn create_test_orchestrator_service() -> Arc<OrchestratorService> {
Arc::new(OrchestratorService::new(
"http://localhost:8080".to_string(),
"test-model".to_string(),
"test-provider".to_string(),
))
}
@ -176,7 +186,7 @@ mod tests {
id: name.to_string(),
description: Some(description.to_string()),
default: Some(is_default),
filter_chain: vec![name.to_string()],
filter_chain: Some(vec![name.to_string()]),
}
}
@ -201,8 +211,8 @@ mod tests {
#[tokio::test]
async fn test_find_listener_success() {
let router_service = create_test_router_service();
let selector = AgentSelector::new(router_service);
let orchestrator_service = create_test_orchestrator_service();
let selector = AgentSelector::new(orchestrator_service);
let listener1 = create_test_listener("test-listener", vec![]);
let listener2 = create_test_listener("other-listener", vec![]);
@ -218,8 +228,8 @@ mod tests {
#[tokio::test]
async fn test_find_listener_not_found() {
let router_service = create_test_router_service();
let selector = AgentSelector::new(router_service);
let orchestrator_service = create_test_orchestrator_service();
let selector = AgentSelector::new(orchestrator_service);
let listeners = vec![create_test_listener("other-listener", vec![])];
@ -236,8 +246,8 @@ mod tests {
#[test]
fn test_create_agent_map() {
let router_service = create_test_router_service();
let selector = AgentSelector::new(router_service);
let orchestrator_service = create_test_orchestrator_service();
let selector = AgentSelector::new(orchestrator_service);
let agents = vec![
create_test_agent_struct("agent1"),
@ -251,33 +261,10 @@ mod tests {
assert!(agent_map.contains_key("agent2"));
}
#[tokio::test]
async fn test_convert_agent_description_to_routing_preferences() {
let router_service = create_test_router_service();
let selector = AgentSelector::new(router_service);
let agents = vec![
create_test_agent("agent1", "First agent description", true),
create_test_agent("agent2", "Second agent description", false),
];
let preferences = selector
.convert_agent_description_to_routing_preferences(&agents)
.await;
assert_eq!(preferences.len(), 2);
assert_eq!(preferences[0].model, "agent1");
assert_eq!(preferences[0].routing_preferences[0].name, "agent1");
assert_eq!(
preferences[0].routing_preferences[0].description,
"First agent description"
);
}
#[test]
fn test_get_default_agent() {
let router_service = create_test_router_service();
let selector = AgentSelector::new(router_service);
let orchestrator_service = create_test_orchestrator_service();
let selector = AgentSelector::new(orchestrator_service);
let agents = vec![
create_test_agent("agent1", "First agent", false),
@ -293,8 +280,8 @@ mod tests {
#[test]
fn test_get_default_agent_fallback_to_first() {
let router_service = create_test_router_service();
let selector = AgentSelector::new(router_service);
let orchestrator_service = create_test_orchestrator_service();
let selector = AgentSelector::new(orchestrator_service);
let agents = vec![
create_test_agent("agent1", "First agent", false),

View file

@ -6,11 +6,11 @@ use hyper::header::HeaderMap;
use crate::handlers::agent_selector::{AgentSelectionError, AgentSelector};
use crate::handlers::pipeline_processor::PipelineProcessor;
use crate::handlers::response_handler::ResponseHandler;
use crate::router::llm_router::RouterService;
use crate::router::plano_orchestrator::OrchestratorService;
/// Integration test that demonstrates the modular agent chat flow
/// This test shows how the three main components work together:
/// 1. AgentSelector - selects the appropriate agent based on routing
/// 1. AgentSelector - selects the appropriate agents based on orchestration
/// 2. PipelineProcessor - executes the agent pipeline
/// 3. ResponseHandler - handles response streaming
#[cfg(test)]
@ -18,12 +18,10 @@ mod integration_tests {
use super::*;
use common::configuration::{Agent, AgentFilterChain, Listener};
fn create_test_router_service() -> Arc<RouterService> {
Arc::new(RouterService::new(
vec![], // empty providers for testing
fn create_test_orchestrator_service() -> Arc<OrchestratorService> {
Arc::new(OrchestratorService::new(
"http://localhost:8080".to_string(),
"test-model".to_string(),
"test-provider".to_string(),
))
}
@ -40,8 +38,8 @@ mod integration_tests {
#[tokio::test]
async fn test_modular_agent_chat_flow() {
// Setup services
let router_service = create_test_router_service();
let agent_selector = AgentSelector::new(router_service);
let orchestrator_service = create_test_orchestrator_service();
let agent_selector = AgentSelector::new(orchestrator_service);
let mut pipeline_processor = PipelineProcessor::default();
// Create test data
@ -64,7 +62,7 @@ mod integration_tests {
let agent_pipeline = AgentFilterChain {
id: "terminal-agent".to_string(),
filter_chain: vec!["filter-agent".to_string(), "terminal-agent".to_string()],
filter_chain: Some(vec!["filter-agent".to_string(), "terminal-agent".to_string()]),
description: Some("Test pipeline".to_string()),
default: Some(true),
};
@ -104,7 +102,7 @@ mod integration_tests {
// Create a pipeline with empty filter chain to avoid network calls
let test_pipeline = AgentFilterChain {
id: "terminal-agent".to_string(),
filter_chain: vec![], // Empty filter chain - no network calls needed
filter_chain: Some(vec![]), // Empty filter chain - no network calls needed
description: None,
default: None,
};
@ -143,7 +141,7 @@ mod integration_tests {
#[tokio::test]
async fn test_error_handling_flow() {
let router_service = create_test_router_service();
let router_service = create_test_orchestrator_service();
let agent_selector = AgentSelector::new(router_service);
// Test listener not found

View file

@ -4,7 +4,7 @@ use common::configuration::{Agent, AgentFilterChain};
use common::consts::{
ARCH_UPSTREAM_HOST_HEADER, BRIGHT_STAFF_SERVICE_NAME, ENVOY_RETRY_HEADER, TRACE_PARENT_HEADER,
};
use common::traces::{SpanBuilder, SpanKind, generate_random_span_id};
use common::traces::{generate_random_span_id, SpanBuilder, SpanKind};
use hermesllm::apis::openai::Message;
use hermesllm::{ProviderRequest, ProviderRequestType};
use hyper::header::HeaderMap;
@ -200,7 +200,13 @@ impl PipelineProcessor {
) -> Result<Vec<Message>, PipelineError> {
let mut chat_history_updated = chat_history.to_vec();
for agent_name in &agent_filter_chain.filter_chain {
// If filter_chain is None or empty, proceed without filtering
let filter_chain = match agent_filter_chain.filter_chain.as_ref() {
Some(fc) if !fc.is_empty() => fc,
_ => return Ok(chat_history_updated),
};
for agent_name in filter_chain {
debug!("Processing filter agent: {}", agent_name);
let agent = agent_map
@ -210,10 +216,11 @@ impl PipelineProcessor {
let tool_name = agent.tool.as_deref().unwrap_or(&agent.id);
info!(
"executing filter: {}/{}, url: {}, conversation length: {}",
"executing filter: {}/{}, url: {}, type: {}, conversation length: {}",
agent_name,
tool_name,
agent.url,
agent.agent_type.as_deref().unwrap_or("mcp"),
chat_history.len()
);
@ -223,16 +230,29 @@ impl PipelineProcessor {
// Generate filter span ID before execution so MCP spans can use it as parent
let filter_span_id = generate_random_span_id();
chat_history_updated = self
.execute_filter(
&chat_history_updated,
agent,
request_headers,
trace_collector,
trace_id.clone(),
filter_span_id.clone(),
)
.await?;
if agent.agent_type.as_deref().unwrap_or("mcp") == "mcp" {
chat_history_updated = self
.execute_mcp_filter(
&chat_history_updated,
agent,
request_headers,
trace_collector,
trace_id.clone(),
filter_span_id.clone(),
)
.await?;
} else {
chat_history_updated = self
.execute_rest_filter(
&chat_history_updated,
agent,
request_headers,
trace_collector,
trace_id.clone(),
filter_span_id.clone(),
)
.await?;
}
let end_time = SystemTime::now();
let elapsed = start_instant.elapsed();
@ -406,7 +426,7 @@ impl PipelineProcessor {
}
/// Send request to a specific agent and return the response content
async fn execute_filter(
async fn execute_mcp_filter(
&mut self,
messages: &[Message],
agent: &Agent,
@ -420,11 +440,7 @@ impl PipelineProcessor {
session_id.clone()
} else {
let session_id = self
.get_new_session_id(
&agent.id,
trace_id.clone(),
filter_span_id.clone(),
)
.get_new_session_id(&agent.id, trace_id.clone(), filter_span_id.clone())
.await;
self.agent_id_session_map
.insert(agent.id.clone(), session_id.clone());
@ -444,19 +460,20 @@ impl PipelineProcessor {
let mcp_span_id = generate_random_span_id();
// Build headers
let agent_headers =
self.build_mcp_headers(request_headers, &agent.id, Some(&mcp_session_id), trace_id.clone(), mcp_span_id.clone())?;
let agent_headers = self.build_mcp_headers(
request_headers,
&agent.id,
Some(&mcp_session_id),
trace_id.clone(),
mcp_span_id.clone(),
)?;
// Send request with tracing
let start_time = SystemTime::now();
let start_instant = Instant::now();
let response = self
.send_mcp_request(
&json_rpc_request,
agent_headers,
&agent.id,
)
.send_mcp_request(&json_rpc_request, agent_headers, &agent.id)
.await?;
let http_status = response.status();
let response_bytes = response.bytes().await?;
@ -598,7 +615,13 @@ impl PipelineProcessor {
let notification_body = serde_json::to_string(&initialized_notification)?;
debug!("Sending initialized notification for agent {}", agent_id);
let headers = self.build_mcp_headers(&HeaderMap::new(), agent_id, Some(session_id), trace_id.clone(), parent_span_id.clone())?;
let headers = self.build_mcp_headers(
&HeaderMap::new(),
agent_id,
Some(session_id),
trace_id.clone(),
parent_span_id.clone(),
)?;
let response = self
.client
@ -626,7 +649,13 @@ impl PipelineProcessor {
let initialize_request = self.build_initialize_request();
let headers = self
.build_mcp_headers(&HeaderMap::new(), agent_id, None, trace_id.clone(), parent_span_id.clone())
.build_mcp_headers(
&HeaderMap::new(),
agent_id,
None,
trace_id.clone(),
parent_span_id.clone(),
)
.expect("Failed to build headers for initialization");
let response = self
@ -661,6 +690,129 @@ impl PipelineProcessor {
session_id
}
/// Execute a REST-based filter agent
async fn execute_rest_filter(
&mut self,
messages: &[Message],
agent: &Agent,
request_headers: &HeaderMap,
trace_collector: Option<&std::sync::Arc<common::traces::TraceCollector>>,
trace_id: String,
filter_span_id: String,
) -> Result<Vec<Message>, PipelineError> {
let tool_name = agent.tool.as_deref().unwrap_or(&agent.id);
// Generate span ID for this REST call (child of filter span)
let rest_span_id = generate_random_span_id();
// Build headers
let trace_parent = format!("00-{}-{}-01", trace_id, rest_span_id);
let mut agent_headers = request_headers.clone();
agent_headers.remove(hyper::header::CONTENT_LENGTH);
agent_headers.remove(TRACE_PARENT_HEADER);
agent_headers.insert(
TRACE_PARENT_HEADER,
hyper::header::HeaderValue::from_str(&trace_parent).unwrap(),
);
agent_headers.insert(
ARCH_UPSTREAM_HOST_HEADER,
hyper::header::HeaderValue::from_str(&agent.id)
.map_err(|_| PipelineError::AgentNotFound(agent.id.clone()))?,
);
agent_headers.insert(
ENVOY_RETRY_HEADER,
hyper::header::HeaderValue::from_str("3").unwrap(),
);
agent_headers.insert(
"Accept",
hyper::header::HeaderValue::from_static("application/json"),
);
agent_headers.insert(
"Content-Type",
hyper::header::HeaderValue::from_static("application/json"),
);
// Send request with tracing
let start_time = SystemTime::now();
let start_instant = Instant::now();
debug!(
"Sending REST request to agent {} at URL: {}",
agent.id, agent.url
);
// Send messages array directly as request body
let response = self
.client
.post(&agent.url)
.headers(agent_headers)
.json(&messages)
.send()
.await?;
let http_status = response.status();
let response_bytes = response.bytes().await?;
let end_time = SystemTime::now();
let elapsed = start_instant.elapsed();
// Record REST call span
if let Some(collector) = trace_collector {
let mut attrs = HashMap::new();
attrs.insert("rest.tool_name", tool_name.to_string());
attrs.insert("rest.url", agent.url.clone());
attrs.insert("http.status_code", http_status.as_u16().to_string());
self.record_mcp_span(
collector,
"rest_call",
&agent.id,
start_time,
end_time,
elapsed,
Some(attrs),
trace_id.clone(),
filter_span_id.clone(),
Some(rest_span_id),
);
}
// Handle HTTP errors
if !http_status.is_success() {
let error_body = String::from_utf8_lossy(&response_bytes).to_string();
return Err(if http_status.is_client_error() {
PipelineError::ClientError {
agent: agent.id.clone(),
status: http_status.as_u16(),
body: error_body,
}
} else {
PipelineError::ServerError {
agent: agent.id.clone(),
status: http_status.as_u16(),
body: error_body,
}
});
}
info!(
"Response from REST agent {}: {}",
agent.id,
String::from_utf8_lossy(&response_bytes)
);
// Parse response - expecting array of messages directly
let messages: Vec<Message> =
serde_json::from_slice(&response_bytes).map_err(PipelineError::ParseError)?;
Ok(messages)
}
/// Send request to terminal agent and return the raw response for streaming
pub async fn invoke_agent(
&self,
@ -734,7 +886,7 @@ mod tests {
fn create_test_pipeline(agents: Vec<&str>) -> AgentFilterChain {
AgentFilterChain {
id: "test-agent".to_string(),
filter_chain: agents.iter().map(|s| s.to_string()).collect(),
filter_chain: Some(agents.iter().map(|s| s.to_string()).collect()),
description: None,
default: None,
}
@ -751,7 +903,15 @@ mod tests {
let pipeline = create_test_pipeline(vec!["nonexistent-agent", "terminal-agent"]);
let result = processor
.process_filter_chain(&messages, &pipeline, &agent_map, &request_headers, None, String::new(), String::new())
.process_filter_chain(
&messages,
&pipeline,
&agent_map,
&request_headers,
None,
String::new(),
String::new(),
)
.await;
assert!(result.is_err());
@ -785,7 +945,14 @@ mod tests {
let request_headers = HeaderMap::new();
let result = processor
.execute_filter(&messages, &agent, &request_headers, None, "trace-123".to_string(), "span-123".to_string())
.execute_mcp_filter(
&messages,
&agent,
&request_headers,
None,
"trace-123".to_string(),
"span-123".to_string(),
)
.await;
match result {
@ -824,7 +991,14 @@ mod tests {
let request_headers = HeaderMap::new();
let result = processor
.execute_filter(&messages, &agent, &request_headers, None, "trace-456".to_string(), "span-456".to_string())
.execute_mcp_filter(
&messages,
&agent,
&request_headers,
None,
"trace-456".to_string(),
"span-456".to_string(),
)
.await;
match result {
@ -876,7 +1050,14 @@ mod tests {
let request_headers = HeaderMap::new();
let result = processor
.execute_filter(&messages, &agent, &request_headers, None, "trace-789".to_string(), "span-789".to_string())
.execute_mcp_filter(
&messages,
&agent,
&request_headers,
None,
"trace-789".to_string(),
"span-789".to_string(),
)
.await;
match result {

View file

@ -1,4 +1,7 @@
use bytes::Bytes;
use hermesllm::apis::OpenAIApi;
use hermesllm::clients::{SupportedAPIsFromClient, SupportedUpstreamAPIs};
use hermesllm::SseEvent;
use http_body_util::combinators::BoxBody;
use http_body_util::{BodyExt, Full, StreamBody};
use hyper::body::Frame;
@ -6,7 +9,7 @@ use hyper::{Response, StatusCode};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use tokio_stream::StreamExt;
use tracing::warn;
use tracing::{info, warn};
/// Errors that can occur during response handling
#[derive(Debug, thiserror::Error)]
@ -113,6 +116,74 @@ impl ResponseHandler {
.body(stream_body)
.map_err(ResponseError::from)
}
/// Collect the full response body as a string
/// This is used for intermediate agents where we need to capture the full response
/// before passing it to the next agent.
///
/// This method handles both streaming and non-streaming responses:
/// - For streaming SSE responses: parses chunks and extracts text deltas
/// - For non-streaming responses: returns the full text
pub async fn collect_full_response(
&self,
llm_response: reqwest::Response,
) -> Result<String, ResponseError> {
use hermesllm::apis::streaming_shapes::sse::SseStreamIter;
let response_headers = llm_response.headers();
let is_sse_streaming = response_headers
.get(hyper::header::CONTENT_TYPE)
.map_or(false, |v| {
v.to_str().unwrap_or("").contains("text/event-stream")
});
let response_bytes = llm_response
.bytes()
.await
.map_err(|e| ResponseError::StreamError(format!("Failed to read response: {}", e)))?;
if is_sse_streaming {
let client_api =
SupportedAPIsFromClient::OpenAIChatCompletions(OpenAIApi::ChatCompletions);
let upstream_api =
SupportedUpstreamAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions);
let sse_iter = SseStreamIter::try_from(response_bytes.as_ref()).unwrap();
let mut accumulated_text = String::new();
for sse_event in sse_iter {
// Skip [DONE] markers and event-only lines
if sse_event.is_done() || sse_event.is_event_only() {
continue;
}
let transformed_event =
SseEvent::try_from((sse_event, &client_api, &upstream_api)).unwrap();
// Try to get provider response and extract content delta
match transformed_event.provider_response() {
Ok(provider_response) => {
if let Some(content) = provider_response.content_delta() {
accumulated_text.push_str(&content);
} else {
info!("No content delta in provider response");
}
}
Err(e) => {
warn!("Failed to parse provider response: {:?}", e);
}
}
}
return Ok(accumulated_text);
} else {
// If not SSE, treat as regular text response
let response_text = String::from_utf8(response_bytes.to_vec()).map_err(|e| {
ResponseError::StreamError(format!("Failed to decode response: {}", e))
})?;
Ok(response_text)
}
}
}
impl Default for ResponseHandler {

View file

@ -3,13 +3,14 @@ use brightstaff::handlers::function_calling::function_calling_chat_handler;
use brightstaff::handlers::llm::llm_chat;
use brightstaff::handlers::models::list_models;
use brightstaff::router::llm_router::RouterService;
use brightstaff::router::plano_orchestrator::OrchestratorService;
use brightstaff::state::StateStorage;
use brightstaff::state::postgresql::PostgreSQLConversationStorage;
use brightstaff::state::memory::MemoryConversationalStorage;
use brightstaff::utils::tracing::init_tracer;
use bytes::Bytes;
use common::configuration::{Agent, Configuration};
use common::consts::{CHAT_COMPLETIONS_PATH, MESSAGES_PATH, OPENAI_RESPONSES_API_PATH};
use common::consts::{CHAT_COMPLETIONS_PATH, MESSAGES_PATH, OPENAI_RESPONSES_API_PATH, PLANO_ORCHESTRATOR_MODEL_NAME};
use common::traces::TraceCollector;
use http_body_util::{combinators::BoxBody, BodyExt, Empty};
use hyper::body::Incoming;
@ -95,10 +96,16 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let router_service: Arc<RouterService> = Arc::new(RouterService::new(
arch_config.model_providers.clone(),
llm_provider_url.clone() + CHAT_COMPLETIONS_PATH,
routing_model_name,
routing_llm_provider,
routing_model_name.clone(),
routing_llm_provider.clone(),
));
let orchestrator_service: Arc<OrchestratorService> = Arc::new(OrchestratorService::new(
llm_provider_url.clone() + CHAT_COMPLETIONS_PATH,
PLANO_ORCHESTRATOR_MODEL_NAME.to_string(),
));
let model_aliases = Arc::new(arch_config.model_aliases.clone());
// Initialize trace collector and start background flusher
@ -154,6 +161,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let io = TokioIo::new(stream);
let router_service: Arc<RouterService> = Arc::clone(&router_service);
let orchestrator_service: Arc<OrchestratorService> = Arc::clone(&orchestrator_service);
let model_aliases: Arc<
Option<std::collections::HashMap<String, common::configuration::ModelAlias>>,
> = Arc::clone(&model_aliases);
@ -166,6 +174,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let state_storage = state_storage.clone();
let service = service_fn(move |req| {
let router_service = Arc::clone(&router_service);
let orchestrator_service = Arc::clone(&orchestrator_service);
let parent_cx = extract_context_from_request(&req);
let llm_provider_url = llm_provider_url.clone();
let llm_providers = llm_providers.clone();
@ -188,7 +197,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let fully_qualified_url = format!("{}{}", llm_provider_url, stripped_path);
return agent_chat(
req,
router_service,
orchestrator_service,
fully_qualified_url,
agents_list,
listeners,

View file

@ -17,6 +17,7 @@ pub struct RouterService {
router_url: String,
client: reqwest::Client,
router_model: Arc<dyn RouterModel>,
#[allow(dead_code)]
routing_provider_name: String,
llm_usage_defined: bool,
}

View file

@ -1,5 +1,6 @@
pub mod llm_router;
pub mod orchestrator_model;
pub mod orchestrator_model_v1;
pub mod plano_orchestrator;
pub mod router_model;
pub mod router_model_v1;

View file

@ -7,6 +7,8 @@ use tracing::{debug, warn};
use super::orchestrator_model::{OrchestratorModel, OrchestratorModelError};
pub const MAX_TOKEN_LEN: usize = 2048; // Default max token length for the orchestration model
/// Custom JSON formatter that produces spaced JSON (space after colons and commas), same as JSON in python
struct SpacedJsonFormatter;

View file

@ -0,0 +1,162 @@
use std::{collections::HashMap, sync::Arc};
use common::{
configuration::{AgentUsagePreference, OrchestrationPreference},
consts::{ARCH_PROVIDER_HINT_HEADER, PLANO_ORCHESTRATOR_MODEL_NAME},
};
use hermesllm::apis::openai::{ChatCompletionsResponse, Message};
use hyper::header;
use thiserror::Error;
use tracing::{debug, info, warn};
use crate::router::orchestrator_model_v1::{self};
use super::orchestrator_model::OrchestratorModel;
pub struct OrchestratorService {
orchestrator_url: String,
client: reqwest::Client,
orchestrator_model: Arc<dyn OrchestratorModel>,
}
#[derive(Debug, Error)]
pub enum OrchestrationError {
#[error("Failed to send request: {0}")]
RequestError(#[from] reqwest::Error),
#[error("Failed to parse JSON: {0}, JSON: {1}")]
JsonError(serde_json::Error, String),
#[error("Orchestrator model error: {0}")]
OrchestratorModelError(#[from] super::orchestrator_model::OrchestratorModelError),
}
pub type Result<T> = std::result::Result<T, OrchestrationError>;
impl OrchestratorService {
pub fn new(
orchestrator_url: String,
orchestration_model_name: String,
) -> Self {
// Empty agent orchestrations - will be provided via usage_preferences in requests
let agent_orchestrations: HashMap<String, Vec<OrchestrationPreference>> = HashMap::new();
let orchestrator_model = Arc::new(orchestrator_model_v1::OrchestratorModelV1::new(
agent_orchestrations,
orchestration_model_name.clone(),
orchestrator_model_v1::MAX_TOKEN_LEN,
));
OrchestratorService {
orchestrator_url,
client: reqwest::Client::new(),
orchestrator_model,
}
}
pub async fn determine_orchestration(
&self,
messages: &[Message],
trace_parent: Option<String>,
usage_preferences: Option<Vec<AgentUsagePreference>>,
) -> Result<Option<Vec<(String, String)>>> {
if messages.is_empty() {
return Ok(None);
}
// Require usage_preferences to be provided
if usage_preferences.is_none() || usage_preferences.as_ref().unwrap().is_empty() {
return Ok(None);
}
let orchestrator_request = self
.orchestrator_model
.generate_request(messages, &usage_preferences);
debug!(
"sending request to arch-orchestrator model: {}, endpoint: {}",
self.orchestrator_model.get_model_name(),
self.orchestrator_url
);
debug!(
"arch orchestrator request body: {}",
&serde_json::to_string(&orchestrator_request).unwrap(),
);
let mut orchestration_request_headers = header::HeaderMap::new();
orchestration_request_headers.insert(
header::CONTENT_TYPE,
header::HeaderValue::from_static("application/json"),
);
orchestration_request_headers.insert(
header::HeaderName::from_static(ARCH_PROVIDER_HINT_HEADER),
header::HeaderValue::from_str(PLANO_ORCHESTRATOR_MODEL_NAME).unwrap(),
);
if let Some(trace_parent) = trace_parent {
orchestration_request_headers.insert(
header::HeaderName::from_static("traceparent"),
header::HeaderValue::from_str(&trace_parent).unwrap(),
);
}
orchestration_request_headers.insert(
header::HeaderName::from_static("model"),
header::HeaderValue::from_static(PLANO_ORCHESTRATOR_MODEL_NAME),
);
let start_time = std::time::Instant::now();
let res = self
.client
.post(&self.orchestrator_url)
.headers(orchestration_request_headers)
.body(serde_json::to_string(&orchestrator_request).unwrap())
.send()
.await?;
let body = res.text().await?;
let orchestrator_response_time = start_time.elapsed();
let chat_completion_response: ChatCompletionsResponse = match serde_json::from_str(&body) {
Ok(response) => response,
Err(err) => {
warn!(
"Failed to parse JSON: {}. Body: {}",
err,
&serde_json::to_string(&body).unwrap()
);
return Err(OrchestrationError::JsonError(
err,
format!("Failed to parse JSON: {}", body),
));
}
};
if chat_completion_response.choices.is_empty() {
warn!("No choices in orchestrator response: {}", body);
return Ok(None);
}
if let Some(content) = &chat_completion_response.choices[0].message.content {
let parsed_response = self
.orchestrator_model
.parse_response(content, &usage_preferences)?;
info!(
"arch-orchestrator determined routes: {}, selected_routes: {:?}, response time: {}ms",
content.replace("\n", "\\n"),
parsed_response,
orchestrator_response_time.as_millis()
);
if let Some(ref parsed_response) = parsed_response {
return Ok(Some(parsed_response.clone()));
}
Ok(None)
} else {
Ok(None)
}
}
}

View file

@ -150,9 +150,12 @@ pub mod operation_component {
/// Inbound request handling
pub const INBOUND: &str = "plano(inbound)";
/// Routing decision phase
/// Orchestrator for llm route selection
pub const ROUTING: &str = "plano(routing)";
/// Orchestrator for agent selection
pub const ORCHESTRATOR: &str = "plano(orchestrator)";
/// Handoff to upstream service
pub const HANDOFF: &str = "plano(handoff)";

View file

@ -33,7 +33,7 @@ pub struct AgentFilterChain {
pub id: String,
pub default: Option<bool>,
pub description: Option<String>,
pub filter_chain: Vec<String>,
pub filter_chain: Option<Vec<String>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]

View file

@ -33,3 +33,5 @@ pub const OTEL_POST_PATH: &str = "/v1/traces";
pub const LLM_ROUTE_HEADER: &str = "x-arch-llm-route";
pub const ENVOY_RETRY_HEADER: &str = "x-envoy-max-retries";
pub const BRIGHT_STAFF_SERVICE_NAME : &str = "brightstaff";
pub const PLANO_ORCHESTRATOR_MODEL_NAME: &str = "Plano-Orchestrator";
pub const ARCH_FC_CLUSTER: &str = "arch";

View file

@ -47,7 +47,7 @@ pub struct StreamContext {
ttft_time: Option<u128>,
traceparent: Option<String>,
request_body_sent_time: Option<u128>,
overrides: Rc<Option<Overrides>>,
_overrides: Rc<Option<Overrides>>,
user_message: Option<String>,
upstream_status_code: Option<StatusCode>,
binary_frame_decoder: Option<BedrockBinaryFrameDecoder<bytes::BytesMut>>,
@ -65,7 +65,7 @@ impl StreamContext {
) -> Self {
StreamContext {
metrics,
overrides,
_overrides: overrides,
ratelimit_selector: None,
streaming_response: false,
response_tokens: 0,
@ -133,6 +133,7 @@ impl StreamContext {
.get_http_request_header(ARCH_PROVIDER_HINT_HEADER)
.map(|llm_name| llm_name.into());
// info!("llm_providers: {:?}", self.llm_providers);
self.llm_provider = Some(routing::get_llm_provider(
&self.llm_providers,
provider_hint,
@ -744,55 +745,35 @@ impl HttpContext for StreamContext {
.map(|val| val == "true")
.unwrap_or(false);
let use_agent_orchestrator = match self.overrides.as_ref() {
Some(overrides) => overrides.use_agent_orchestrator.unwrap_or_default(),
None => false,
};
// let routing_header_value = self.get_http_request_header(ARCH_ROUTING_HEADER);
let routing_header_value = self.get_http_request_header(ARCH_ROUTING_HEADER);
self.select_llm_provider();
// Check if this is a supported API endpoint
if SupportedAPIsFromClient::from_endpoint(&request_path).is_none() {
self.send_http_response(404, vec![], Some(b"Unsupported endpoint"));
return Action::Continue;
}
if routing_header_value.is_some() && !routing_header_value.as_ref().unwrap().is_empty() {
let routing_header_value = routing_header_value.as_ref().unwrap();
info!("routing header already set: {}", routing_header_value);
self.llm_provider = Some(Rc::new(LlmProvider {
name: routing_header_value.to_string(),
provider_interface: LlmProviderType::OpenAI,
..Default::default() //TODO: THiS IS BROKEN. WHY ARE WE ASSUMING OPENAI FOR UPSTREAM?
}));
} else {
//TODO: Fix this brittle code path. We need to return values and have compile time
self.select_llm_provider();
// Get the SupportedApi for routing decisions
let supported_api: Option<SupportedAPIsFromClient> =
SupportedAPIsFromClient::from_endpoint(&request_path);
self.client_api = supported_api;
// Check if this is a supported API endpoint
if SupportedAPIsFromClient::from_endpoint(&request_path).is_none() {
self.send_http_response(404, vec![], Some(b"Unsupported endpoint"));
return Action::Continue;
}
// Debug: log provider, client API, resolved API, and request path
if let (Some(api), Some(provider)) = (self.client_api.as_ref(), self.llm_provider.as_ref())
{
let provider_id = provider.to_provider_id();
self.resolved_api =
Some(provider_id.compatible_api_for_client(api, self.streaming_response));
// Get the SupportedApi for routing decisions
let supported_api: Option<SupportedAPIsFromClient> =
SupportedAPIsFromClient::from_endpoint(&request_path);
self.client_api = supported_api;
// Debug: log provider, client API, resolved API, and request path
if let (Some(api), Some(provider)) =
(self.client_api.as_ref(), self.llm_provider.as_ref())
{
let provider_id = provider.to_provider_id();
self.resolved_api =
Some(provider_id.compatible_api_for_client(api, self.streaming_response));
debug!(
"[PLANO_REQ_ID:{}] ROUTING_INFO: provider='{}' client_api={:?} resolved_api={:?} request_path='{}'",
self.request_identifier(),
provider.to_provider_id(),
api,
self.resolved_api,
request_path
);
} else {
self.resolved_api = None;
}
debug!(
"[PLANO_REQ_ID:{}] ROUTING_INFO: provider='{}' client_api={:?} resolved_api={:?} request_path='{}'",
self.request_identifier(),
provider.to_provider_id(),
api,
self.resolved_api,
request_path
);
//We need to update the upstream path if there is a variation for a provider like Gemini/Groq, etc.
self.update_upstream_path(&request_path);
@ -816,7 +797,6 @@ impl HttpContext for StreamContext {
if let Err(error) = self.modify_auth_headers() {
// ensure that the provider has an endpoint if the access key is missing else return a bad request
if self.llm_provider.as_ref().unwrap().endpoint.is_none()
&& !use_agent_orchestrator
&& self.llm_provider.as_ref().unwrap().provider_interface
!= LlmProviderType::Arch
{
@ -918,11 +898,6 @@ impl HttpContext for StreamContext {
None => None,
};
let use_agent_orchestrator = match self.overrides.as_ref() {
Some(overrides) => overrides.use_agent_orchestrator.unwrap_or_default(),
None => false,
};
// Store the original model for logging
let model_requested = deserialized_client_request.model().to_string();
@ -930,29 +905,25 @@ impl HttpContext for StreamContext {
let resolved_model = match model_name {
Some(model_name) => model_name.clone(),
None => {
if use_agent_orchestrator {
"agent_orchestrator".to_string()
} else {
warn!(
"[PLANO_REQ_ID:{}] MODEL_RESOLUTION_ERROR: no model specified | req_model='{}' provider='{}' config_model={:?}",
self.request_identifier(),
model_requested,
self.llm_provider().name,
self.llm_provider().model
);
self.send_server_error(
ServerError::BadRequest {
why: format!(
"No model specified in request and couldn't determine model name from arch_config. Model name in req: {}, arch_config, provider: {}, model: {:?}",
model_requested,
self.llm_provider().name,
self.llm_provider().model
),
},
Some(StatusCode::BAD_REQUEST),
);
return Action::Continue;
}
warn!(
"[PLANO_REQ_ID:{}] MODEL_RESOLUTION_ERROR: no model specified | req_model='{}' provider='{}' config_model={:?}",
self.request_identifier(),
model_requested,
self.llm_provider().name,
self.llm_provider().model
);
self.send_server_error(
ServerError::BadRequest {
why: format!(
"No model specified in request and couldn't determine model name from arch_config. Model name in req: {}, arch_config, provider: {}, model: {:?}",
model_requested,
self.llm_provider().name,
self.llm_provider().model
),
},
Some(StatusCode::BAD_REQUEST),
);
return Action::Continue;
}
};

View file

@ -6,8 +6,9 @@ agents:
filters:
- id: query_rewriter
url: http://host.docker.internal:10501
# type: mcp # default is mcp
url: http://host.docker.internal:10500
type: rest
# type: rest or mcp, mcp is default
# transport: streamable-http # default is streamable-http
# tool: query_rewriter # default name is the filter id
- id: context_builder
@ -30,7 +31,7 @@ listeners:
- type: agent
name: agent_1
port: 8001
router: arch_agent_router
router: plano_orchestrator_v1
agents:
- id: rag_agent
description: virtual assistant for retrieval augmented generation tasks

View file

@ -54,18 +54,26 @@ def main(host, port, agent, transport, agent_name, rest_server, rest_port):
mcp_name = agent_name or default_name
if rest_server:
# Only response_generator supports REST server mode
if agent != "response_generator":
# REST server mode - supported for query_rewriter and response_generator
if agent == "response_generator":
print(f"Starting REST server on {host}:{rest_port} for agent: {agent}")
from rag_agent.rag_agent import start_server
start_server(host=host, port=rest_port)
return
elif agent == "query_rewriter":
print(f"Starting REST server on {host}:{rest_port} for agent: {agent}")
from rag_agent.query_rewriter import start_server
start_server(host=host, port=rest_port)
return
else:
print(f"Error: Agent '{agent}' does not support REST server mode.")
print(f"REST server is only supported for: response_generator")
print(
f"REST server is only supported for: query_rewriter, response_generator"
)
print(f"Remove --rest-server flag to start {agent} as an MCP server.")
return
print(f"Starting REST server on {host}:{rest_port} for agent: {agent}")
from rag_agent.rag_agent import start_server
start_server(host=host, port=rest_port)
return
else:
# Only query_rewriter and context_builder support MCP
if agent not in ["query_rewriter", "context_builder"]:

View file

@ -184,7 +184,6 @@ async def augment_query_with_context(
load_knowledge_base()
@mcp.tool()
async def context_builder(messages: List[ChatMessage]) -> List[ChatMessage]:
"""MCP tool that augments user queries with relevant context from the knowledge base."""
logger.info(f"Received chat completion request with {len(messages)} messages")
@ -203,3 +202,8 @@ async def context_builder(messages: List[ChatMessage]) -> List[ChatMessage]:
# Return as dict to minimize text serialization
return [{"role": msg.role, "content": msg.content} for msg in updated_messages]
# Register MCP tool only if mcp is available
if mcp is not None:
mcp.tool()(context_builder)

View file

@ -1,11 +1,14 @@
import asyncio
import json
import time
from typing import List, Optional, Dict, Any
import uuid
from fastapi import FastAPI, Depends, Request
from openai import AsyncOpenAI
import os
import logging
from .api import ChatMessage
from .api import ChatCompletionRequest, ChatCompletionResponse, ChatMessage
from . import mcp
from fastmcp.server.dependencies import get_http_headers
@ -16,7 +19,6 @@ logging.basicConfig(
)
logger = logging.getLogger(__name__)
# Configuration for archgw LLM gateway
LLM_GATEWAY_ENDPOINT = os.getenv("LLM_GATEWAY_ENDPOINT", "http://localhost:12000/v1")
QUERY_REWRITE_MODEL = "gpt-4o-mini"
@ -27,6 +29,8 @@ archgw_client = AsyncOpenAI(
api_key="EMPTY", # archgw doesn't require a real API key
)
app = FastAPI()
async def rewrite_query_with_archgw(
messages: List[ChatMessage], traceparent_header: str
@ -79,15 +83,11 @@ async def rewrite_query_with_archgw(
return ""
@mcp.tool()
async def query_rewriter(messages: List[ChatMessage]) -> List[ChatMessage]:
"""Chat completions endpoint that rewrites the last user query using archgw.
Returns a dict with a 'messages' key containing the updated message list.
"""
import time
import uuid
logger.info(f"Received chat completion request with {len(messages)} messages")
# Get traceparent header from HTTP request using FastMCP's dependency function
@ -117,3 +117,42 @@ async def query_rewriter(messages: List[ChatMessage]) -> List[ChatMessage]:
# Return as dict to minimize text serialization
return [{"role": msg.role, "content": msg.content} for msg in updated_messages]
# Register MCP tool only if mcp is available
if mcp is not None:
mcp.tool()(query_rewriter)
@app.post("/")
async def chat_completions_endpoint(
request_messages: List[ChatMessage], request: Request
) -> List[ChatMessage]:
"""FastAPI endpoint for chat completions with query rewriting."""
logger.info(
f"Received /v1/chat/completions request with {len(request_messages)} messages"
)
# Extract traceparent header
traceparent_header = request.headers.get("traceparent")
if traceparent_header:
logger.info(f"Received traceparent header: {traceparent_header}")
else:
logger.info("No traceparent header found")
# Call the query rewriter tool
updated_messages_data = await query_rewriter(request_messages)
# Convert back to ChatMessage objects
updated_messages = [ChatMessage(**msg) for msg in updated_messages_data]
logger.info("Returning rewritten chat completion response")
return updated_messages
def start_server(host: str = "0.0.0.0", port: int = 10501):
"""Start the FastAPI server for query rewriter."""
import uvicorn
logger.info(f"Starting Query Rewriter REST server on {host}:{port}")
uvicorn.run(app, host=host, port=port)

View file

@ -74,17 +74,13 @@ async def chat_completion_http(request: Request, request_body: ChatCompletionReq
else:
logger.info("No traceparent header found")
# Check if streaming is requested
if request_body.stream:
return StreamingResponse(
stream_chat_completions(request_body, traceparent_header),
media_type="text/plain",
headers={
"content-type": "text/event-stream",
},
)
else:
return await non_streaming_chat_completions(request_body, traceparent_header)
return StreamingResponse(
stream_chat_completions(request_body, traceparent_header),
media_type="text/plain",
headers={
"content-type": "text/event-stream",
},
)
async def stream_chat_completions(
@ -186,88 +182,6 @@ async def stream_chat_completions(
yield "data: [DONE]\n\n"
async def non_streaming_chat_completions(
request_body: ChatCompletionRequest, traceparent_header: str = None
):
"""Generate non-streaming chat completions."""
# Prepare messages for response generation
response_messages = prepare_response_messages(request_body)
try:
# Call archgw using OpenAI client
logger.info(f"Calling archgw at {LLM_GATEWAY_ENDPOINT} to generate response")
# Prepare extra headers if traceparent is provided
extra_headers = {"x-envoy-max-retries": "3"}
if traceparent_header:
extra_headers["traceparent"] = traceparent_header
response = await archgw_client.chat.completions.create(
model=RESPONSE_MODEL,
messages=response_messages,
temperature=request_body.temperature or 0.7,
max_tokens=request_body.max_tokens or 1000,
extra_headers=extra_headers,
)
generated_response = response.choices[0].message.content.strip()
logger.info(f"Response generated successfully")
return ChatCompletionResponse(
id=f"chatcmpl-{uuid.uuid4().hex[:8]}",
created=int(time.time()),
model=request_body.model,
choices=[
{
"index": 0,
"message": {
"role": "assistant",
"content": generated_response,
},
"finish_reason": "stop",
}
],
usage={
"prompt_tokens": sum(
len(msg.content.split()) for msg in request_body.messages
),
"completion_tokens": len(generated_response.split()),
"total_tokens": sum(
len(msg.content.split()) for msg in request_body.messages
)
+ len(generated_response.split()),
},
)
except Exception as e:
logger.error(f"Error generating response: {e}")
# Fallback response
fallback_message = "I apologize, but I'm having trouble generating a response right now. Please try again."
return ChatCompletionResponse(
id=f"chatcmpl-{uuid.uuid4().hex[:8]}",
created=int(time.time()),
model=request_body.model,
choices=[
{
"index": 0,
"message": {"role": "assistant", "content": fallback_message},
"finish_reason": "stop",
}
],
usage={
"prompt_tokens": sum(
len(msg.content.split()) for msg in request_body.messages
),
"completion_tokens": len(fallback_message.split()),
"total_tokens": sum(
len(msg.content.split()) for msg in request_body.messages
)
+ len(fallback_message.split()),
},
)
@app.get("/health")
async def health_check():
"""Health check endpoint."""

View file

@ -26,11 +26,16 @@ trap cleanup EXIT
# WAIT_FOR_PIDS+=($!)
log "Starting query_parser agent on port 10501..."
log "Starting query_rewriter agent on port 10500/http..."
uv run python -m rag_agent --rest-server --host 0.0.0.0 --rest-port 10500 --agent query_rewriter &
WAIT_FOR_PIDS+=($!)
log "Starting query_parser agent on port 10501/mcp..."
uv run python -m rag_agent --host 0.0.0.0 --port 10501 --agent query_rewriter &
WAIT_FOR_PIDS+=($!)
log "Starting context_builder agent on port 10502..."
log "Starting context_builder agent on port 10502/mcp..."
uv run python -m rag_agent --host 0.0.0.0 --port 10502 --agent context_builder &
WAIT_FOR_PIDS+=($!)

View file

@ -52,19 +52,16 @@ Content-Type: application/json
"stream": true
}
### send request to context builder agent
POST http://localhost:10501/v1/chat/completions
### send request to query_rewriter agent
POST http://localhost:10500/
Content-Type: application/json
{
"model": "gpt-4o-mini",
"messages": [
{
"role": "user",
"content": "What is the guaranteed uptime percentage for TechCorp's cloud services?"
}
]
}
[
{
"role": "user",
"content": "What is the guaranteed uptime percentage for TechCorp's cloud services?"
}
]
### test fast-llm
POST http://localhost:12000/v1/chat/completions

View file

@ -0,0 +1,220 @@
# Travel Booking Agent Demo
A production-ready multi-agent travel booking system demonstrating Plano's intelligent agent routing. This demo showcases three specialized agents working together to help users plan trips with weather information, flight searches, and currency exchange rates.
## Overview
This demo consists of three intelligent agents that work together seamlessly:
- **Weather Agent** - Real-time weather conditions and forecasts for any city worldwide
- **Flight Agent** - Live flight information between airports with real-time tracking
- **Currency Agent** - Real-time currency exchange rates and conversions
All agents use Plano's agent router to intelligently route user requests to the appropriate specialized agent based on conversation context and user intent.
## Features
- **Intelligent Routing**: Plano automatically routes requests to the right agent
- **Conversation Context**: Agents understand follow-up questions and references
- **Real-Time Data**: Live weather, flight, and currency data from public APIs
- **LLM-Powered**: Uses GPT-4o-mini for extraction and GPT-4o for responses
- **Streaming Responses**: Real-time streaming for better user experience
## Prerequisites
- Python 3.10 or higher
- [UV package manager](https://github.com/astral-sh/uv) (recommended) or pip
- OpenAI API key
- [Plano CLI](https://docs.planoai.dev) installed
## Quick Start
### 1. Install Dependencies
```bash
# Using UV (recommended)
uv sync
# Or using pip
pip install -e .
```
### 2. Set Environment Variables
Create a `.env` file or export environment variables:
```bash
export OPENAI_API_KEY="your-openai-api-key"
export AEROAPI_KEY="your-flightaware-api-key" # Optional, demo key included
```
### 3. Start All Agents
```bash
chmod +x start_agents.sh
./start_agents.sh
```
This starts:
- Weather Agent on port 10510
- Flight Agent on port 10520
- Currency Agent on port 10530
### 4. Start Plano Orchestrator
In a new terminal:
```bash
cd /path/to/travel_booking
plano up arch_config.yaml
```
The gateway will start on port 8001 and route requests to the appropriate agents.
### 5. Test the System
Send requests to Plano Orchestrator:
```bash
curl -X POST http://localhost:8001/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "gpt-4o",
"messages": [
{"role": "user", "content": "What is the weather like in Paris?"}
]
}'
```
## Example Conversations
### Weather Query
```
User: What's the weather in Istanbul?
Assistant: [Weather Agent provides current conditions and forecast]
```
### Flight Search
```
User: What flights go from London to Seattle?
Assistant: [Flight Agent shows available flights with schedules and status]
```
### Currency Exchange
```
User: What's the exchange rate for Turkish Lira to USD?
Assistant: [Currency Agent provides current exchange rate]
```
### Multi-Agent Conversation
```
User: What's the weather in Istanbul?
Assistant: [Weather information]
User: What's their exchange rate?
Assistant: [Currency rate for Turkey]
User: Do they fly out from Seattle?
Assistant: [Flight information from Istanbul to Seattle]
```
The system understands context and pronouns, automatically routing to the right agent.
### Multi-Intent Queries
```
User: What's the weather in Seattle, and do any flights go direct to New York?
Assistant: [Both weather_agent and flight_agent respond simultaneously]
- Weather Agent: [Weather information for Seattle]
- Flight Agent: [Flight information from Seattle to New York]
```
The orchestrator can select multiple agents simultaneously for queries containing multiple intents.
## Agent Details
### Weather Agent
- **Port**: 10510
- **API**: Open-Meteo (free, no API key)
- **Capabilities**: Current weather, multi-day forecasts, temperature, conditions, sunrise/sunset
### Flight Agent
- **Port**: 10520
- **API**: FlightAware AeroAPI
- **Capabilities**: Real-time flight status, schedules, delays, gates, terminals, live tracking
### Currency Agent
- **Port**: 10530
- **API**: Frankfurter (free, no API key)
- **Capabilities**: Exchange rates, currency conversions, historical rates
## Architecture
```
User Request → Plano Gateway (port 8001)
Agent Router (LLM-based)
┌───────────┼───────────┐
↓ ↓ ↓
Weather Flight Currency
Agent Agent Agent
(10510) (10520) (10530)
```
Each agent:
1. Extracts intent using GPT-4o-mini
2. Fetches real-time data from APIs
3. Generates response using GPT-4o
4. Streams response back to user
## Configuration
### plano_config.yaml
Defines the three agents, their descriptions, and routing configuration. The agent router uses these descriptions to intelligently route requests.
### Environment Variables
- `OPENAI_API_KEY` - Required for LLM operations
- `AEROAPI_KEY` - Optional, FlightAware API key (demo key included)
- `LLM_GATEWAY_ENDPOINT` - Plano LLM gateway URL (default: http://localhost:12000/v1)
## Project Structure
```
travel_booking/
├── arch_config.yaml # Plano configuration
├── start_agents.sh # Start all agents script
├── pyproject.toml # Python dependencies
└── src/
└── travel_agents/
├── __init__.py # CLI entry point
├── api.py # Shared API models
├── weather_agent.py # Weather forecast agent
├── flight_agent.py # Flight information agent
└── currency_agent.py # Currency exchange agent
```
## Troubleshooting
**Agents won't start**
- Ensure Python 3.10+ is installed
- Check that UV is installed: `pip install uv`
- Verify ports 10510, 10520, 10530 are available
**Plano won't start**
- Verify Plano is installed: `plano --version`
- Check that `OPENAI_API_KEY` is set
- Ensure you're in the travel_booking directory
**No response from agents**
- Verify all agents are running (check start_agents.sh output)
- Check that Plano is running on port 8001
- Review agent logs for errors
## API Endpoints
All agents expose OpenAI-compatible chat completion endpoints:
- `POST /v1/chat/completions` - Chat completion endpoint
- `GET /health` - Health check endpoint

View file

@ -0,0 +1,34 @@
version: v0.3.0
agents:
- id: weather_agent
url: http://host.docker.internal:10510
- id: flight_agent
url: http://host.docker.internal:10520
- id: currency_agent
url: http://host.docker.internal:10530
model_providers:
- model: openai/gpt-4o
access_key: $OPENAI_API_KEY
- model: openai/gpt-4o-mini
access_key: $OPENAI_API_KEY
system_prompt: |
You are a professional travel planner assistant. Your role is to provide accurate, clear, and helpful information about weather and flights based on the structured data provided to you.\n\nCRITICAL INSTRUCTIONS:\n\n1. DATA STRUCTURE:\n \n WEATHER DATA:\n - You will receive weather data as JSON in a system message\n - The data contains a \"location\" field (string) and a \"forecast\" array\n - Each forecast entry has: date, day_name, temperature_c, temperature_f, temperature_max_c, temperature_min_c, condition, sunrise, sunset\n - Some fields may be null/None - handle these gracefully\n \n FLIGHT DATA:\n - You will receive flight information in a system message\n - Flight data includes: airline, flight number, departure time, arrival time, origin airport, destination airport, aircraft type, status, gate, terminal\n - Information may include both scheduled and estimated times\n - Some fields may be unavailable - handle these gracefully\n\n2. WEATHER HANDLING:\n - For single-day queries: Use temperature_c/temperature_f (current/primary temperature)\n - For multi-day forecasts: Use temperature_max_c and temperature_min_c when available\n - Always provide temperatures in both Celsius and Fahrenheit when available\n - If temperature is null, say \"temperature data unavailable\" rather than making up numbers\n - Use exact condition descriptions provided (e.g., \"Clear sky\", \"Rainy\", \"Partly Cloudy\")\n - Add helpful context when appropriate (e.g., \"perfect for outdoor activities\" for clear skies)\n\n3. FLIGHT HANDLING:\n - Present flight information clearly with airline name and flight number\n - Include departure and arrival times with time zones when provided\n - Mention origin and destination airports with their codes\n - Include gate and terminal information when available\n - Note aircraft type if relevant to the query\n - Highlight any status updates (delays, early arrivals, etc.)\n - For multiple flights, list them in chronological order by departure time\n - If specific details are missing, acknowledge this rather than inventing information\n\n4. MULTI-PART QUERIES:\n - Users may ask about both weather and flights in one message\n - Answer ALL parts of the query that you have data for\n - Organize your response logically - typically weather first, then flights, or vice versa based on the query\n - Provide complete information for each topic without mentioning other agents\n - If you receive data for only one topic but the user asked about multiple, answer what you can with the provided data\n\n5. ERROR HANDLING:\n - If weather forecast contains an \"error\" field, acknowledge the issue politely\n - If temperature or condition is null/None, mention that specific data is unavailable\n - If flight details are incomplete, state which information is unavailable\n - Never invent or guess weather or flight data - only use what's provided\n - If location couldn't be determined, acknowledge this but still provide available data\n\n6. RESPONSE FORMAT:\n \n For Weather:\n - Single-day queries: Provide current conditions, temperature, and condition\n - Multi-day forecasts: List each day with date, day name, high/low temps, and condition\n - Include sunrise/sunset times when available and relevant\n \n For Flights:\n - List flights with clear numbering or bullet points\n - Include key details: airline, flight number, departure/arrival times, airports\n - Add gate, terminal, and status information when available\n - For multiple flights, organize chronologically\n \n General:\n - Use natural, conversational language\n - Be concise but complete\n - Format dates and times clearly\n - Use bullet points or numbered lists for clarity\n\n7. LOCATION HANDLING:\n - Always mention location names from the data\n - For flights, clearly state origin and destination cities/airports\n - If locations differ from what the user asked, acknowledge this politely\n\n8. RESPONSE STYLE:\n - Be friendly and professional\n - Use natural language, not technical jargon\n - Provide information in a logical, easy-to-read format\n - When answering multi-part queries, create a cohesive response that addresses all aspects\n\nRemember: Only use the data provided. Never fabricate weather or flight information. If data is missing, clearly state what's unavailable. Answer all parts of the user's query that you have data for.
listeners:
- type: agent
name: travel_booking_service
port: 8001
router: plano_orchestrator_v1
agents:
- id: weather_agent
description: Get real-time weather conditions and multi-day forecasts for any city worldwide using Open-Meteo API (free, no API key needed). Provides current temperature, multi-day forecasts, weather conditions, sunrise/sunset times, and detailed weather information. Understands conversation context to resolve location references from previous messages. Handles weather-related questions including "What's the weather in [city]?", "What's the forecast for [city]?", "How's the weather in [city]?". When queries include both weather and other travel questions (e.g., flights, currency), this agent answers ONLY the weather part.
- id: flight_agent
description: Get live flight information between airports using FlightAware AeroAPI. Shows real-time flight status, scheduled/estimated/actual departure and arrival times, gate and terminal information, delays, aircraft type, and flight status. Automatically resolves city names to airport codes (IATA/ICAO). Understands conversation context to infer origin/destination from follow-up questions. Handles flight-related questions including "What flights go from [city] to [city]?", "Do flights go to [city]?", "Are there direct flights from [city]?". When queries include both flight and other travel questions (e.g., weather, currency), this agent answers ONLY the flight part.
- id: currency_agent
description: Get real-time currency exchange rates and perform currency conversions using Frankfurter API (free, no API key needed). Provides latest exchange rates, currency conversions with amount calculations, and supports any currency pair. Automatically extracts currency codes from country names and conversation context. Understands pronouns like "their currency" when referring to previously mentioned countries. Uses standard 3-letter ISO currency codes (e.g., USD, EUR, GBP, JPY, PKR).
tracing:
random_sampling: 100

View file

@ -0,0 +1,17 @@
services:
jaeger:
build:
context: ../../shared/jaeger
ports:
- "16686:16686"
- "4317:4317"
- "4318:4318"
open-web-ui:
image: dyrnq/open-webui:main
restart: always
ports:
- "8080:8080"
environment:
- DEFAULT_MODEL=gpt-4o-mini
- ENABLE_OPENAI_API=true
- OPENAI_API_BASE_URL=http://host.docker.internal:8001/v1

View file

@ -0,0 +1,21 @@
[project]
name = "travel-agents"
version = "0.1.0"
description = "Travel Booking Agents - Weather, Flight, and Currency"
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
"click>=8.2.1",
"pydantic>=2.11.7",
"fastapi>=0.104.1",
"uvicorn>=0.24.0",
"openai>=2.13.0",
"httpx>=0.24.0",
]
[project.scripts]
travel_agents = "travel_agents:main"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

View file

@ -0,0 +1,48 @@
import click
@click.command()
@click.option("--host", "host", default="localhost", help="Host to bind server to")
@click.option("--port", "port", type=int, default=8000, help="Port for server")
@click.option(
"--agent",
"agent",
required=True,
help="Agent name: weather, flight, or currency",
)
def main(host, port, agent):
"""Start a travel agent REST server."""
agent_map = {
"weather": ("travel_agents.weather_agent", 10510),
"flight": ("travel_agents.flight_agent", 10520),
"currency": ("travel_agents.currency_agent", 10530),
}
if agent not in agent_map:
print(f"Error: Unknown agent '{agent}'")
print(f"Available agents: {', '.join(agent_map.keys())}")
return
module_name, default_port = agent_map[agent]
if port == 8000:
port = default_port
print(f"Starting {agent} agent REST server on {host}:{port}")
if agent == "weather":
from travel_agents.weather_agent import start_server
start_server(host=host, port=port)
elif agent == "flight":
from travel_agents.flight_agent import start_server
start_server(host=host, port=port)
elif agent == "currency":
from travel_agents.currency_agent import start_server
start_server(host=host, port=port)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,4 @@
from . import main
if __name__ == "__main__":
main()

View file

@ -0,0 +1,36 @@
from pydantic import BaseModel
from typing import List, Optional, Dict, Any
class ChatMessage(BaseModel):
role: str
content: str
class ChatCompletionRequest(BaseModel):
model: str
messages: List[ChatMessage]
temperature: Optional[float] = 1.0
max_tokens: Optional[int] = None
top_p: Optional[float] = 1.0
frequency_penalty: Optional[float] = 0.0
presence_penalty: Optional[float] = 0.0
stream: Optional[bool] = False
stop: Optional[List[str]] = None
class ChatCompletionResponse(BaseModel):
id: str
object: str = "chat.completion"
created: int
model: str
choices: List[Dict[str, Any]]
usage: Dict[str, int]
class ChatCompletionStreamResponse(BaseModel):
id: str
object: str = "chat.completion.chunk"
created: int
model: str
choices: List[Dict[str, Any]]

View file

@ -0,0 +1,584 @@
import json
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from openai import AsyncOpenAI
import os
import logging
import time
import uuid
import uvicorn
import httpx
from typing import Optional
from urllib.parse import quote
from .api import (
ChatCompletionRequest,
ChatCompletionStreamResponse,
)
# Set up logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - [CURRENCY_AGENT] - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
# Configuration for archgw LLM gateway
LLM_GATEWAY_ENDPOINT = os.getenv("LLM_GATEWAY_ENDPOINT", "http://localhost:12000/v1")
CURRENCY_MODEL = "openai/gpt-4o"
CURRENCY_EXTRACTION_MODEL = "openai/gpt-4o-mini"
# HTTP client for API calls
http_client = httpx.AsyncClient(timeout=10.0)
# Initialize OpenAI client for archgw
archgw_client = AsyncOpenAI(
base_url=LLM_GATEWAY_ENDPOINT,
api_key="EMPTY",
)
# System prompt for currency agent
SYSTEM_PROMPT = """You are a professional travel planner assistant. Your role is to provide accurate, clear, and helpful information about weather, flights, and currency exchange based on the structured data provided to you.
CRITICAL INSTRUCTIONS:
1. DATA STRUCTURE:
WEATHER DATA:
- You will receive weather data as JSON in a system message
- The data contains a "location" field (string) and a "forecast" array
- Each forecast entry has: date, day_name, temperature_c, temperature_f, temperature_max_c, temperature_min_c, condition, sunrise, sunset
- Some fields may be null/None - handle these gracefully
FLIGHT DATA:
- You will receive flight information in a system message
- Flight data includes: airline, flight number, departure time, arrival time, origin airport, destination airport, aircraft type, status, gate, terminal
- Information may include both scheduled and estimated times
- Some fields may be unavailable - handle these gracefully
CURRENCY DATA:
- You will receive currency exchange data as JSON in a system message
- The data contains: from_currency, to_currency, rate, date, and optionally original_amount and converted_amount
- Some fields may be null/None - handle these gracefully
2. WEATHER HANDLING:
- For single-day queries: Use temperature_c/temperature_f (current/primary temperature)
- For multi-day forecasts: Use temperature_max_c and temperature_min_c when available
- Always provide temperatures in both Celsius and Fahrenheit when available
- If temperature is null, say "temperature data unavailable" rather than making up numbers
- Use exact condition descriptions provided (e.g., "Clear sky", "Rainy", "Partly Cloudy")
- Add helpful context when appropriate (e.g., "perfect for outdoor activities" for clear skies)
3. FLIGHT HANDLING:
- Present flight information clearly with airline name and flight number
- Include departure and arrival times with time zones when provided
- Mention origin and destination airports with their codes
- Include gate and terminal information when available
- Note aircraft type if relevant to the query
- Highlight any status updates (delays, early arrivals, etc.)
- For multiple flights, list them in chronological order by departure time
- If specific details are missing, acknowledge this rather than inventing information
4. CURRENCY HANDLING:
- Present exchange rates clearly with both currency codes and names when helpful
- Include the date of the exchange rate
- If an amount was provided, show both the original and converted amounts
- Use clear formatting (e.g., "100 USD = 92.50 EUR" or "1 USD = 0.925 EUR")
- If rate data is unavailable, acknowledge this politely
5. MULTI-PART QUERIES:
- Users may ask about weather, flights, and currency in one message
- Answer ALL parts of the query that you have data for
- Organize your response logically - typically weather first, then flights, then currency, or based on the query order
- Provide complete information for each topic without mentioning other agents
- If you receive data for only one topic but the user asked about multiple, answer what you can with the provided data
6. ERROR HANDLING:
- If weather forecast contains an "error" field, acknowledge the issue politely
- If temperature or condition is null/None, mention that specific data is unavailable
- If flight details are incomplete, state which information is unavailable
- If currency rate is unavailable, mention that specific data is unavailable
- Never invent or guess weather, flight, or currency data - only use what's provided
- If location couldn't be determined, acknowledge this but still provide available data
7. RESPONSE FORMAT:
For Weather:
- Single-day queries: Provide current conditions, temperature, and condition
- Multi-day forecasts: List each day with date, day name, high/low temps, and condition
- Include sunrise/sunset times when available and relevant
For Flights:
- List flights with clear numbering or bullet points
- Include key details: airline, flight number, departure/arrival times, airports
- Add gate, terminal, and status information when available
- For multiple flights, organize chronologically
For Currency:
- Show exchange rate clearly: "1 [FROM] = [RATE] [TO]"
- If amount provided: "[AMOUNT] [FROM] = [CONVERTED] [TO]"
- Include the date of the exchange rate
General:
- Use natural, conversational language
- Be concise but complete
- Format dates and times clearly
- Use bullet points or numbered lists for clarity
8. LOCATION HANDLING:
- Always mention location names from the data
- For flights, clearly state origin and destination cities/airports
- For currency, use country/city context to resolve currency references
- If locations differ from what the user asked, acknowledge this politely
9. RESPONSE STYLE:
- Be friendly and professional
- Use natural language, not technical jargon
- Provide information in a logical, easy-to-read format
- When answering multi-part queries, create a cohesive response that addresses all aspects
Remember: Only use the data provided. Never fabricate weather, flight, or currency information. If data is missing, clearly state what's unavailable. Answer all parts of the user's query that you have data for."""
CURRENCY_EXTRACTION_PROMPT = """You are a currency information extraction assistant. Your ONLY job is to extract currency-related information from user messages and convert it to standard 3-letter ISO currency codes.
CRITICAL RULES:
1. Extract currency codes (3-letter ISO codes like USD, EUR, GBP, JPY, PKR, etc.) from the message AND conversation context
2. Extract any mentioned amounts or numbers that might be currency amounts
3. PAY ATTENTION TO CONVERSATION CONTEXT:
- If previous messages mention a country/city, use that context to resolve pronouns like "their", "that country", "there", etc.
- Example: If previous message was "What's the weather in Lahore, Pakistan?" and current message is "What is their currency exchange rate with USD?", then "their" = Pakistan = PKR
- Look for country names in the conversation history to infer currencies
4. If country names or regions are mentioned (in current message OR conversation context), convert them to their standard currency codes:
- United States/USA/US USD
- Europe/Eurozone/France/Germany/Italy/Spain/etc. EUR
- United Kingdom/UK/Britain GBP
- Japan JPY
- China CNY
- India INR
- Pakistan PKR
- Australia AUD
- Canada CAD
- Switzerland CHF
- South Korea KRW
- Singapore SGD
- Hong Kong HKD
- Brazil BRL
- Mexico MXN
- And any other countries you know the currency for
5. Determine the FROM currency (source) and TO currency (target) based on context:
- "from X to Y" from_currency=X, to_currency=Y
- "X to Y" from_currency=X, to_currency=Y
- "convert X to Y" from_currency=X, to_currency=Y
- "X in Y" from_currency=X, to_currency=Y
- "rate for X" or "X rate" to_currency=X (assume USD as base)
- "their currency with USD" or "their currency to USD" from_currency=country_from_context, to_currency=USD
- "X dollars/euros/pounds/etc." from_currency=X
6. If only one currency is mentioned, determine if it's the source or target based on context
7. ALWAYS return currency codes, never country names in the currency fields
8. Return your response as a JSON object with the following structure:
{
"from_currency": "USD" or null,
"to_currency": "EUR" or null,
"amount": 100.0 or null
}
9. If you cannot determine a currency, use null for that field
10. Use standard 3-letter ISO currency codes ONLY
11. Ignore error messages, HTML tags, and assistant responses
12. Extract from the most recent user message BUT use conversation context to resolve references
13. Default behavior: If only one currency is mentioned without context, assume it's the target currency and use USD as the source
Examples with context:
- Conversation: "What's the weather in Lahore, Pakistan?" Current: "What is their currency exchange rate with USD?" {"from_currency": "PKR", "to_currency": "USD", "amount": null}
- Conversation: "Tell me about Tokyo" Current: "What's their currency rate?" {"from_currency": "JPY", "to_currency": "USD", "amount": null}
- "What's the exchange rate from USD to EUR?" {"from_currency": "USD", "to_currency": "EUR", "amount": null}
- "Convert 100 dollars to euros" {"from_currency": "USD", "to_currency": "EUR", "amount": 100.0}
- "How much is 50 GBP in Japanese yen?" {"from_currency": "GBP", "to_currency": "JPY", "amount": 50.0}
- "What's the rate for euros?" {"from_currency": "USD", "to_currency": "EUR", "amount": null}
- "Convert money from United States to France" {"from_currency": "USD", "to_currency": "EUR", "amount": null}
- "100 pounds to dollars" {"from_currency": "GBP", "to_currency": "USD", "amount": 100.0}
Now extract the currency information from this message, considering the conversation context:"""
async def extract_currency_info_from_messages(messages):
"""Extract currency information from user messages using LLM, considering conversation context."""
# Get all messages for context (both user and assistant)
conversation_context = []
for msg in messages:
# Skip error messages and HTML tags
content = msg.content.strip()
content_lower = content.lower()
if any(
pattern in content_lower
for pattern in ["<", ">", "error:", "i apologize", "i'm having trouble"]
):
continue
conversation_context.append({"role": msg.role, "content": content})
# Get the most recent user message
user_messages = [msg for msg in messages if msg.role == "user"]
if not user_messages:
logger.warning("No user messages found")
return {"from_currency": "USD", "to_currency": "EUR", "amount": None}
# Get the most recent user message (skip error messages and HTML tags)
user_content = None
for msg in reversed(user_messages):
content = msg.content.strip()
# Skip messages with error patterns or HTML tags
content_lower = content.lower()
if any(
pattern in content_lower
for pattern in [
"<",
">",
"assistant:",
"error:",
"i apologize",
"i'm having trouble",
]
):
continue
user_content = content
break
if not user_content:
logger.warning("No valid user message found")
return {"from_currency": "USD", "to_currency": "EUR", "amount": None}
try:
logger.info(f"Extracting currency info from user message: {user_content[:200]}")
logger.info(
f"Using conversation context with {len(conversation_context)} messages"
)
llm_messages = [{"role": "system", "content": CURRENCY_EXTRACTION_PROMPT}]
context_messages = (
conversation_context[-10:]
if len(conversation_context) > 10
else conversation_context
)
for msg in context_messages:
llm_messages.append({"role": msg["role"], "content": msg["content"]})
response = await archgw_client.chat.completions.create(
model=CURRENCY_EXTRACTION_MODEL,
messages=llm_messages,
temperature=0.1,
max_tokens=200,
)
extracted_text = response.choices[0].message.content.strip()
try:
if "```json" in extracted_text:
extracted_text = (
extracted_text.split("```json")[1].split("```")[0].strip()
)
elif "```" in extracted_text:
extracted_text = extracted_text.split("```")[1].split("```")[0].strip()
currency_info = json.loads(extracted_text)
from_currency = currency_info.get("from_currency")
to_currency = currency_info.get("to_currency")
amount = currency_info.get("amount")
if not from_currency:
from_currency = "USD"
if not to_currency:
to_currency = "EUR"
result = {
"from_currency": from_currency,
"to_currency": to_currency,
"amount": amount,
}
logger.info(f"LLM extracted currency info: {result}")
return result
except json.JSONDecodeError as e:
logger.warning(
f"Failed to parse JSON from LLM response: {extracted_text}, error: {e}"
)
return {"from_currency": "USD", "to_currency": "EUR", "amount": None}
except Exception as e:
logger.error(f"Error extracting currency info with LLM: {e}, using defaults")
return {"from_currency": "USD", "to_currency": "EUR", "amount": None}
async def get_currency_exchange_rate(
from_currency: str, to_currency: str
) -> Optional[dict]:
"""Get currency exchange rate between two currencies using Frankfurter API.
Uses the Frankfurter API (api.frankfurter.dev) which provides free, open-source
currency data tracking reference exchange rates published by institutional sources.
No API keys required.
Args:
from_currency: Base currency code (e.g., "USD", "EUR")
to_currency: Target currency code (e.g., "EUR", "GBP")
Returns:
Dictionary with exchange rate data or None if error occurs
"""
try:
url = f"https://api.frankfurter.dev/v1/latest?base={from_currency}&symbols={to_currency}"
response = await http_client.get(url)
if response.status_code != 200:
logger.warning(
f"Currency API returned status {response.status_code} for {from_currency} to {to_currency}"
)
return None
data = response.json()
if "rates" not in data:
logger.warning(f"Invalid API response structure: missing 'rates' field")
return None
if to_currency not in data["rates"]:
logger.warning(
f"Currency {to_currency} not found in API response for base {from_currency}"
)
return None
return {
"from_currency": from_currency,
"to_currency": to_currency,
"rate": data["rates"][to_currency],
"date": data.get("date"),
"base": data.get("base"),
}
except httpx.HTTPError as e:
logger.error(
f"HTTP error fetching currency rate from {from_currency} to {to_currency}: {e}"
)
return None
except json.JSONDecodeError as e:
logger.error(f"Failed to parse JSON response from currency API: {e}")
return None
except Exception as e:
logger.error(f"Unexpected error fetching currency rate: {e}")
return None
# FastAPI app for REST server
app = FastAPI(title="Currency Exchange Agent", version="1.0.0")
async def prepare_currency_messages(request_body: ChatCompletionRequest):
"""Prepare messages with currency exchange data."""
# Extract currency information from conversation using LLM
currency_info = await extract_currency_info_from_messages(request_body.messages)
from_currency = currency_info["from_currency"]
to_currency = currency_info["to_currency"]
amount = currency_info.get("amount")
# Get currency exchange rate
rate_data = await get_currency_exchange_rate(from_currency, to_currency)
if rate_data:
currency_data = {
"from_currency": rate_data["from_currency"],
"to_currency": rate_data["to_currency"],
"rate": rate_data["rate"],
"date": rate_data.get("date"),
}
# If an amount was mentioned, calculate the conversion
if amount is not None:
converted_amount = amount * rate_data["rate"]
currency_data["original_amount"] = amount
currency_data["converted_amount"] = round(converted_amount, 2)
else:
logger.warning(
f"Could not fetch currency rate for {from_currency} to {to_currency}"
)
currency_data = {
"from_currency": from_currency,
"to_currency": to_currency,
"rate": None,
"error": "Could not retrieve exchange rate",
}
# Create system message with currency data
currency_context = f"""
Current currency exchange data:
{json.dumps(currency_data, indent=2)}
Use this data to answer the user's currency exchange query.
"""
response_messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "assistant", "content": currency_context},
]
# Add conversation history
for msg in request_body.messages:
response_messages.append({"role": msg.role, "content": msg.content})
return response_messages
@app.post("/v1/chat/completions")
async def chat_completion_http(request: Request, request_body: ChatCompletionRequest):
"""HTTP endpoint for chat completions with streaming support."""
logger.info(f"Received currency request with {len(request_body.messages)} messages")
traceparent_header = request.headers.get("traceparent")
if traceparent_header:
logger.info(f"Received traceparent header: {traceparent_header}")
return StreamingResponse(
stream_chat_completions(request_body, traceparent_header),
media_type="text/plain",
headers={
"content-type": "text/event-stream",
},
)
async def stream_chat_completions(
request_body: ChatCompletionRequest, traceparent_header: str = None
):
"""Generate streaming chat completions."""
# Prepare messages with currency exchange data
response_messages = await prepare_currency_messages(request_body)
try:
logger.info(
f"Calling archgw at {LLM_GATEWAY_ENDPOINT} to generate currency response"
)
# Prepare extra headers
extra_headers = {"x-envoy-max-retries": "3"}
if traceparent_header:
extra_headers["traceparent"] = traceparent_header
response_stream = await archgw_client.chat.completions.create(
model=CURRENCY_MODEL,
messages=response_messages,
temperature=request_body.temperature or 0.7,
max_tokens=request_body.max_tokens or 1000,
stream=True,
extra_headers=extra_headers,
)
completion_id = f"chatcmpl-{uuid.uuid4().hex[:8]}"
created_time = int(time.time())
collected_content = []
async for chunk in response_stream:
if chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
collected_content.append(content)
stream_chunk = ChatCompletionStreamResponse(
id=completion_id,
created=created_time,
model=request_body.model,
choices=[
{
"index": 0,
"delta": {"content": content},
"finish_reason": None,
}
],
)
yield f"data: {stream_chunk.model_dump_json()}\n\n"
full_response = "".join(collected_content)
updated_history = [{"role": "assistant", "content": full_response}]
final_chunk = ChatCompletionStreamResponse(
id=completion_id,
created=created_time,
model=request_body.model,
choices=[
{
"index": 0,
"delta": {},
"finish_reason": "stop",
"message": {
"role": "assistant",
"content": json.dumps(updated_history),
},
}
],
)
yield f"data: {final_chunk.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
except Exception as e:
logger.error(f"Error generating currency response: {e}")
error_chunk = ChatCompletionStreamResponse(
id=f"chatcmpl-{uuid.uuid4().hex[:8]}",
created=int(time.time()),
model=request_body.model,
choices=[
{
"index": 0,
"delta": {
"content": "I apologize, but I'm having trouble generating a currency exchange response right now. Please try again."
},
"finish_reason": "stop",
}
],
)
yield f"data: {error_chunk.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
@app.get("/health")
async def health_check():
"""Health check endpoint."""
return {"status": "healthy", "agent": "currency_exchange"}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=10530)
def start_server(host: str = "localhost", port: int = 10530):
"""Start the currency agent server."""
uvicorn.run(
app,
host=host,
port=port,
log_config={
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(asctime)s - [CURRENCY_AGENT] - %(levelname)s - %(message)s",
},
},
"handlers": {
"default": {
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
},
},
"root": {
"level": "INFO",
"handlers": ["default"],
},
},
)

View file

@ -0,0 +1,913 @@
import json
import re
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from openai import AsyncOpenAI
import os
import logging
import time
import uuid
import uvicorn
from datetime import datetime, timedelta
import httpx
from typing import Optional
from urllib.parse import quote
from .api import (
ChatCompletionRequest,
ChatCompletionStreamResponse,
)
# Set up logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - [FLIGHT_AGENT] - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
# Configuration for archgw LLM gateway
LLM_GATEWAY_ENDPOINT = os.getenv("LLM_GATEWAY_ENDPOINT", "http://localhost:12000/v1")
FLIGHT_MODEL = "openai/gpt-4o"
FLIGHT_EXTRACTION_MODEL = "openai/gpt-4o-mini"
# FlightAware AeroAPI configuration
AEROAPI_BASE_URL = "https://aeroapi.flightaware.com/aeroapi"
AEROAPI_KEY = os.getenv("AEROAPI_KEY", "ESVFX7TJLxB7OTuayUv0zTQBryA3tOPr")
# HTTP client for API calls
http_client = httpx.AsyncClient(timeout=30.0)
# Initialize OpenAI client for archgw
archgw_client = AsyncOpenAI(
base_url=LLM_GATEWAY_ENDPOINT,
api_key="EMPTY",
)
# System prompt for flight agent
SYSTEM_PROMPT = """You are a professional travel planner assistant. Your role is to provide accurate, clear, and helpful information about weather and flights based on the structured data provided to you.
CRITICAL INSTRUCTIONS:
1. DATA STRUCTURE:
WEATHER DATA:
- You will receive weather data as JSON in a system message
- The data contains a "location" field (string) and a "forecast" array
- Each forecast entry has: date, day_name, temperature_c, temperature_f, temperature_max_c, temperature_min_c, condition, sunrise, sunset
- Some fields may be null/None - handle these gracefully
FLIGHT DATA:
- You will receive flight information in a system message
- Flight data includes: airline, flight number, departure time, arrival time, origin airport, destination airport, aircraft type, status, gate, terminal
- Information may include both scheduled and estimated times
- Some fields may be unavailable - handle these gracefully
2. WEATHER HANDLING:
- For single-day queries: Use temperature_c/temperature_f (current/primary temperature)
- For multi-day forecasts: Use temperature_max_c and temperature_min_c when available
- Always provide temperatures in both Celsius and Fahrenheit when available
- If temperature is null, say "temperature data unavailable" rather than making up numbers
- Use exact condition descriptions provided (e.g., "Clear sky", "Rainy", "Partly Cloudy")
- Add helpful context when appropriate (e.g., "perfect for outdoor activities" for clear skies)
3. FLIGHT HANDLING:
- Present flight information clearly with airline name and flight number
- Include departure and arrival times with time zones when provided
- Mention origin and destination airports with their codes
- Include gate and terminal information when available
- Note aircraft type if relevant to the query
- Highlight any status updates (delays, early arrivals, etc.)
- For multiple flights, list them in chronological order by departure time
- If specific details are missing, acknowledge this rather than inventing information
4. MULTI-PART QUERIES:
- Users may ask about both weather and flights in one message
- Answer ALL parts of the query that you have data for
- Organize your response logically - typically weather first, then flights, or vice versa based on the query
- Provide complete information for each topic without mentioning other agents
- If you receive data for only one topic but the user asked about multiple, answer what you can with the provided data
5. ERROR HANDLING:
- If weather forecast contains an "error" field, acknowledge the issue politely
- If temperature or condition is null/None, mention that specific data is unavailable
- If flight details are incomplete, state which information is unavailable
- Never invent or guess weather or flight data - only use what's provided
- If location couldn't be determined, acknowledge this but still provide available data
6. RESPONSE FORMAT:
For Weather:
- Single-day queries: Provide current conditions, temperature, and condition
- Multi-day forecasts: List each day with date, day name, high/low temps, and condition
- Include sunrise/sunset times when available and relevant
For Flights:
- List flights with clear numbering or bullet points
- Include key details: airline, flight number, departure/arrival times, airports
- Add gate, terminal, and status information when available
- For multiple flights, organize chronologically
General:
- Use natural, conversational language
- Be concise but complete
- Format dates and times clearly
- Use bullet points or numbered lists for clarity
7. LOCATION HANDLING:
- Always mention location names from the data
- For flights, clearly state origin and destination cities/airports
- If locations differ from what the user asked, acknowledge this politely
8. RESPONSE STYLE:
- Be friendly and professional
- Use natural language, not technical jargon
- Provide information in a logical, easy-to-read format
- When answering multi-part queries, create a cohesive response that addresses all aspects
Remember: Only use the data provided. Never fabricate weather or flight information. If data is missing, clearly state what's unavailable. Answer all parts of the user's query that you have data for."""
FLIGHT_EXTRACTION_PROMPT = """You are a flight information extraction assistant. Your ONLY job is to extract flight-related information from user messages and convert it to structured data.
CRITICAL RULES:
1. Extract origin city/airport and destination city/airport from the message AND conversation context
2. Extract any mentioned dates or time references
3. **CROSS-AGENT REFERENCE HANDLING - CRITICAL**: When extracting flight info, use cities mentioned in weather queries as context
- If a weather query mentions a city (e.g., "weather in Seattle"), use that city to fill missing flight origin/destination
- Example: "What is the weather in Seattle and what flight goes to New York direct?"
Weather mentions "Seattle" Use Seattle as flight origin
Extract origin=Seattle, destination=New York
- Example: "What is the weather in Atlanta and what flight goes from Detroit to Atlanta?"
Extract origin=Detroit, destination=Atlanta (both explicitly mentioned in flight part)
- **ALWAYS check conversation history for cities mentioned in weather queries** - use them to infer missing flight origin/destination
4. **MULTI-PART QUERY HANDLING**: When the user asks about both weather/flights/currency in one query, extract ONLY the flight-related parts
- Look for patterns like "flight from X to Y", "flights from X", "flights to Y", "flight goes from X to Y"
- Example: "What is the weather in Atlanta and what flight goes from Detroit to Atlanta?" Extract origin=Detroit, destination=Atlanta (ignore Atlanta weather part)
- Example: "What's the weather in Seattle, and what is one flight that goes direct to Atlanta?" Extract origin=Seattle (from weather context), destination=Atlanta
- Focus on the flight route, but use weather context to fill missing parts
5. PAY ATTENTION TO CONVERSATION CONTEXT - THIS IS CRITICAL:
- If previous messages mention cities/countries, use that context to resolve pronouns and incomplete queries
- Example 1: Previous: "What's the weather in Istanbul?" Current: "Do they fly out from Seattle?"
"they" refers to Istanbul origin=Istanbul, destination=Seattle
- Example 2: Previous: "What's the weather in London?" Current: "What flights go from there to Seattle?"
"there" = London origin=London, destination=Seattle
- Example 3: Previous: "What's the exchange rate for Turkey?" Current: "Do they have flights to Seattle?"
"they" refers to Turkey/Istanbul origin=Istanbul, destination=Seattle
- Example 4: Previous: "What is the weather in Seattle?" Current: "What flight goes to New York direct?"
Seattle mentioned in weather query Use Seattle as origin origin=Seattle, destination=New York
6. For follow-up questions like "Do they fly out from X?" or "Do they have flights to Y?":
- Look for previously mentioned cities/countries in the conversation
- If a city was mentioned earlier, use it as the missing origin or destination
- If the question mentions a city explicitly, use that city
- Try to infer the complete route from context
7. Extract dates and time references:
- "tomorrow", "today", "next week", specific dates
- Convert relative dates to ISO format (YYYY-MM-DD) when possible
8. Determine the origin and destination based on context:
- "from X to Y" origin=X, destination=Y
- "X to Y" origin=X, destination=Y
- "flight goes from X to Y" origin=X, destination=Y
- "flights from X" origin=X, destination=null (UNLESS conversation context provides a previously mentioned city - use that as destination)
- "flights to Y" origin=null (UNLESS conversation context provides a previously mentioned city - use that as origin), destination=Y
- "What flights go direct from X?" origin=X, destination=from conversation context (if a city was mentioned earlier)
- "Do they fly out from X?" origin=X (or from context), destination=from context (check ALL previous messages for mentioned cities)
- "Do they have flights to Y?" origin=from context (check ALL previous messages), destination=Y
- CRITICAL: When only one part (origin OR destination) is provided, ALWAYS check conversation history for the missing part
8. Return your response as a JSON object with the following structure:
{
"origin": "London" or null,
"destination": "Seattle" or null,
"date": "2025-12-20" or null,
"origin_airport_code": "LHR" or null,
"destination_airport_code": "SEA" or null
}
9. If you cannot determine a value, use null for that field
10. Use city names (not airport codes) in origin/destination fields - airport codes will be resolved separately
11. Ignore error messages, HTML tags, and assistant responses
12. Extract from the most recent user message BUT use conversation context to resolve references
13. For dates: Use ISO format (YYYY-MM-DD). If relative date like "tomorrow", calculate the actual date
14. IMPORTANT: When a follow-up question mentions one city but context has another city, try to infer the complete route
Examples with context:
- "What is the weather in Atlanta and what flight goes from Detroit to Atlanta?" {"origin": "Detroit", "destination": "Atlanta", "date": null, "origin_airport_code": null, "destination_airport_code": null}
- "What is the weather in Seattle and what flight goes to New York direct?" {"origin": "Seattle", "destination": "New York", "date": null, "origin_airport_code": null, "destination_airport_code": null} (Seattle from weather context)
- Conversation: "What's the weather in Istanbul?" Current: "Do they fly out from Seattle?" {"origin": "Istanbul", "destination": "Seattle", "date": null, "origin_airport_code": null, "destination_airport_code": null}
- Conversation: "What's the weather in Istanbul?" Current: "What flights go direct from Seattle?" {"origin": "Seattle", "destination": "Istanbul", "date": null, "origin_airport_code": null, "destination_airport_code": null} (Istanbul from previous context)
- Conversation: "What's the weather in London?" Current: "What flights go from there to Seattle?" {"origin": "London", "destination": "Seattle", "date": null, "origin_airport_code": null, "destination_airport_code": null}
- Conversation: "Tell me about Istanbul" Current: "Do they have flights to Seattle?" {"origin": "Istanbul", "destination": "Seattle", "date": null, "origin_airport_code": null, "destination_airport_code": null}
- "What flights go from London to Seattle?" {"origin": "London", "destination": "Seattle", "date": null, "origin_airport_code": null, "destination_airport_code": null}
- "Show me flights to New York tomorrow" {"origin": null, "destination": "New York", "date": "2025-12-21", "origin_airport_code": null, "destination_airport_code": null}
- "Flights from LAX to JFK" {"origin": "Los Angeles", "destination": "New York", "date": null, "origin_airport_code": "LAX", "destination_airport_code": "JFK"}
Now extract the flight information from this message, considering the conversation context:"""
async def extract_flight_info_from_messages(messages):
"""Extract flight information from user messages using LLM, considering conversation context."""
conversation_context = []
for msg in messages:
content = msg.content.strip()
content_lower = content.lower()
if any(
pattern in content_lower
for pattern in ["<", ">", "error:", "i apologize", "i'm having trouble"]
):
continue
conversation_context.append({"role": msg.role, "content": content})
user_messages = [msg for msg in messages if msg.role == "user"]
if not user_messages:
logger.warning("No user messages found")
return {
"origin": None,
"destination": None,
"date": None,
"origin_airport_code": None,
"destination_airport_code": None,
}
# CRITICAL: Always preserve the FIRST user message (original query) for multi-agent scenarios
# When Plano processes multiple agents, it may add assistant responses that get filtered out,
# but we need to always use the original user query
original_user_message = user_messages[0].content.strip() if user_messages else None
# Try to find a valid recent user message first (for follow-up queries)
user_content = None
for msg in reversed(user_messages):
content = msg.content.strip()
content_lower = content.lower()
# Skip messages that are clearly JSON-encoded assistant responses or errors
# But be less aggressive - only skip if it's clearly not a user query
if content.startswith("[{") or content.startswith("[{"):
# Likely JSON-encoded assistant response
continue
if any(
pattern in content_lower
for pattern in [
'"role": "assistant"',
'"role":"assistant"',
"error:",
]
):
continue
# Don't skip messages that just happen to contain these words naturally
user_content = content
break
# Fallback to original user message if no valid recent message found
if not user_content and original_user_message:
# Check if original message is valid (not JSON-encoded)
if not (
original_user_message.startswith("[{")
or original_user_message.startswith("[{")
):
user_content = original_user_message
logger.info(f"Using original user message: {user_content[:200]}")
if not user_content:
logger.warning("No valid user message found")
return {
"origin": None,
"destination": None,
"date": None,
"origin_airport_code": None,
"destination_airport_code": None,
}
try:
logger.info(f"Extracting flight info from user message: {user_content[:200]}")
logger.info(
f"Using conversation context with {len(conversation_context)} messages"
)
llm_messages = [{"role": "system", "content": FLIGHT_EXTRACTION_PROMPT}]
context_messages = (
conversation_context[-10:]
if len(conversation_context) > 10
else conversation_context
)
for msg in context_messages:
llm_messages.append({"role": msg["role"], "content": msg["content"]})
response = await archgw_client.chat.completions.create(
model=FLIGHT_EXTRACTION_MODEL,
messages=llm_messages,
temperature=0.1,
max_tokens=300,
)
extracted_text = response.choices[0].message.content.strip()
try:
if "```json" in extracted_text:
extracted_text = (
extracted_text.split("```json")[1].split("```")[0].strip()
)
elif "```" in extracted_text:
extracted_text = extracted_text.split("```")[1].split("```")[0].strip()
flight_info = json.loads(extracted_text)
date = flight_info.get("date")
if date:
today = datetime.now().date()
if date.lower() == "tomorrow":
date = (today + timedelta(days=1)).strftime("%Y-%m-%d")
elif date.lower() == "today":
date = today.strftime("%Y-%m-%d")
elif "next week" in date.lower():
date = (today + timedelta(days=7)).strftime("%Y-%m-%d")
result = {
"origin": flight_info.get("origin"),
"destination": flight_info.get("destination"),
"date": date,
"origin_airport_code": flight_info.get("origin_airport_code"),
"destination_airport_code": flight_info.get("destination_airport_code"),
}
# Fallback: If origin is missing but we have destination, infer from weather context
if not result["origin"] and result["destination"]:
# Look for cities mentioned in weather queries in conversation context
for msg in reversed(conversation_context):
if msg["role"] == "user":
content = msg["content"]
# Look for weather queries mentioning cities
if (
"weather" in content.lower()
or "forecast" in content.lower()
):
# Common patterns: "weather in [city]", "forecast for [city]", "weather [city]"
patterns = [
r"(?:weather|forecast).*?(?:in|for)\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)",
r"weather\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)",
]
for pattern in patterns:
city_match = re.search(pattern, content, re.IGNORECASE)
if city_match:
potential_city = city_match.group(1).strip()
# Don't use the same city as destination
if (
potential_city.lower()
!= result["destination"].lower()
):
logger.info(
f"Inferring origin from weather context in extraction: {potential_city}"
)
result["origin"] = potential_city
break
if result["origin"]:
break
# Fallback: If destination is missing but we have origin, try to infer from conversation context
if result["origin"] and not result["destination"]:
# Look for cities mentioned in previous messages
for msg in reversed(conversation_context):
if msg["role"] == "user":
content = msg["content"]
# Look for weather queries mentioning cities
if (
"weather" in content.lower()
or "forecast" in content.lower()
):
# Common patterns: "weather in [city]", "forecast for [city]", "weather [city]"
patterns = [
r"(?:weather|forecast).*?(?:in|for)\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)",
r"weather\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)",
]
for pattern in patterns:
city_match = re.search(pattern, content, re.IGNORECASE)
if city_match:
potential_city = city_match.group(1).strip()
# Don't use the same city as origin
if (
potential_city.lower()
!= result["origin"].lower()
):
logger.info(
f"Inferring destination from conversation context: {potential_city}"
)
result["destination"] = potential_city
break
if result["destination"]:
break
logger.info(f"LLM extracted flight info: {result}")
return result
except json.JSONDecodeError as e:
logger.warning(
f"Failed to parse JSON from LLM response: {extracted_text}, error: {e}"
)
return {
"origin": None,
"destination": None,
"date": None,
"origin_airport_code": None,
"destination_airport_code": None,
}
except Exception as e:
logger.error(f"Error extracting flight info with LLM: {e}, using defaults")
return {
"origin": None,
"destination": None,
"date": None,
"origin_airport_code": None,
"destination_airport_code": None,
}
AIRPORT_CODE_RESOLUTION_PROMPT = """You are an airport code resolution assistant. Your ONLY job is to convert city names or locations to their primary airport IATA/ICAO codes.
CRITICAL RULES:
1. Convert city names, locations, or airport names to their primary airport code (prefer IATA 3-letter codes like JFK, LHR, LAX)
2. For cities with multiple airports, choose the PRIMARY/MOST COMMONLY USED airport:
- London LHR (Heathrow, not Gatwick or Stansted)
- New York JFK (not LGA or EWR)
- Paris CDG (not ORY)
- Tokyo NRT (Narita, not HND)
- Beijing PEK (not PKX)
- Shanghai PVG (not SHA)
3. If the input is already an airport code (3-letter IATA or 4-letter ICAO), return it as-is
4. Return ONLY the airport code, nothing else
5. Use standard IATA codes when available, ICAO codes as fallback
6. If you cannot determine the airport code, return "NOT_FOUND"
Examples:
- "London" "LHR"
- "New York" "JFK"
- "Los Angeles" "LAX"
- "Seattle" "SEA"
- "Paris" "CDG"
- "Tokyo" "NRT"
- "JFK" "JFK"
- "LAX" "LAX"
- "LHR" "LHR"
- "Unknown City" "NOT_FOUND"
Now convert this location to an airport code:"""
async def resolve_airport_code(city_name: str) -> Optional[str]:
"""Resolve city name to airport code using LLM and FlightAware API.
Uses LLM to convert city names to airport codes, then validates via API.
"""
if not city_name:
return None
try:
logger.info(f"Resolving airport code for: {city_name}")
response = await archgw_client.chat.completions.create(
model=FLIGHT_EXTRACTION_MODEL,
messages=[
{"role": "system", "content": AIRPORT_CODE_RESOLUTION_PROMPT},
{"role": "user", "content": city_name},
],
temperature=0.1,
max_tokens=50,
)
airport_code = response.choices[0].message.content.strip().upper()
airport_code = airport_code.strip("\"'`.,!? \n\t")
if airport_code == "NOT_FOUND" or not airport_code:
logger.warning(f"LLM could not resolve airport code for {city_name}")
return None
logger.info(f"LLM resolved {city_name} to airport code: {airport_code}")
try:
url = f"{AEROAPI_BASE_URL}/airports/{airport_code}"
headers = {"x-apikey": AEROAPI_KEY}
validation_response = await http_client.get(url, headers=headers)
if validation_response.status_code == 200:
data = validation_response.json()
validated_code = data.get("code_icao") or data.get("code_iata")
if validated_code:
logger.info(
f"Validated airport code {airport_code}{validated_code}"
)
return validated_code
else:
return airport_code
else:
logger.warning(
f"API validation failed for {airport_code}, but using LLM result"
)
return airport_code
except Exception as e:
logger.warning(
f"API validation error for {airport_code}: {e}, using LLM result"
)
return airport_code
except Exception as e:
logger.error(f"Error resolving airport code for {city_name} with LLM: {e}")
return None
async def get_flights_between_airports(
origin_code: str, dest_code: str, start_date: str = None, end_date: str = None
) -> Optional[dict]:
"""Get flights between two airports using FlightAware AeroAPI."""
try:
url = f"{AEROAPI_BASE_URL}/airports/{origin_code}/flights/to/{dest_code}"
headers = {"x-apikey": AEROAPI_KEY}
params = {}
if start_date:
params["start"] = start_date
if end_date:
params["end"] = end_date
params["connection"] = "nonstop"
params["max_pages"] = 1
response = await http_client.get(url, headers=headers, params=params)
if response.status_code != 200:
logger.warning(
f"FlightAware API returned status {response.status_code} for {origin_code} to {dest_code}"
)
return None
data = response.json()
flights = []
for flight_group in data.get("flights", []):
segments = flight_group.get("segments", [])
if segments:
segment = segments[0]
flight_info = {
"ident": segment.get("ident"),
"ident_icao": segment.get("ident_icao"),
"ident_iata": segment.get("ident_iata"),
"operator": segment.get("operator"),
"operator_iata": segment.get("operator_iata"),
"flight_number": segment.get("flight_number"),
"origin": {
"code": segment.get("origin", {}).get("code"),
"code_iata": segment.get("origin", {}).get("code_iata"),
"name": segment.get("origin", {}).get("name"),
"city": segment.get("origin", {}).get("city"),
},
"destination": {
"code": segment.get("destination", {}).get("code"),
"code_iata": segment.get("destination", {}).get("code_iata"),
"name": segment.get("destination", {}).get("name"),
"city": segment.get("destination", {}).get("city"),
},
"scheduled_out": segment.get("scheduled_out"),
"estimated_out": segment.get("estimated_out"),
"actual_out": segment.get("actual_out"),
"scheduled_off": segment.get("scheduled_off"),
"estimated_off": segment.get("estimated_off"),
"actual_off": segment.get("actual_off"),
"scheduled_on": segment.get("scheduled_on"),
"estimated_on": segment.get("estimated_on"),
"actual_on": segment.get("actual_on"),
"scheduled_in": segment.get("scheduled_in"),
"estimated_in": segment.get("estimated_in"),
"actual_in": segment.get("actual_in"),
"status": segment.get("status"),
"aircraft_type": segment.get("aircraft_type"),
"departure_delay": segment.get("departure_delay"),
"arrival_delay": segment.get("arrival_delay"),
"gate_origin": segment.get("gate_origin"),
"gate_destination": segment.get("gate_destination"),
"terminal_origin": segment.get("terminal_origin"),
"terminal_destination": segment.get("terminal_destination"),
"cancelled": segment.get("cancelled"),
"diverted": segment.get("diverted"),
}
flights.append(flight_info)
return {
"origin_code": origin_code,
"destination_code": dest_code,
"flights": flights,
"num_flights": len(flights),
}
except httpx.HTTPError as e:
logger.error(
f"HTTP error fetching flights from {origin_code} to {dest_code}: {e}"
)
return None
except json.JSONDecodeError as e:
logger.error(f"Failed to parse JSON response from FlightAware API: {e}")
return None
except Exception as e:
logger.error(f"Unexpected error fetching flights: {e}")
return None
app = FastAPI(title="Flight Information Agent", version="1.0.0")
async def prepare_flight_messages(request_body: ChatCompletionRequest):
"""Prepare messages with flight data."""
flight_info = await extract_flight_info_from_messages(request_body.messages)
origin = flight_info.get("origin")
destination = flight_info.get("destination")
date = flight_info.get("date")
origin_code = flight_info.get("origin_airport_code")
dest_code = flight_info.get("destination_airport_code")
# Enhanced context extraction: Use weather queries to infer missing origin or destination
# CRITICAL: When user asks "weather in X and flight to Y", use X as origin
if not origin and destination:
# Look through conversation history for cities mentioned in weather queries
for msg in request_body.messages:
if msg.role == "user":
content = msg.content
# Extract cities from weather queries: "weather in [city]", "forecast for [city]"
weather_patterns = [
r"(?:weather|forecast).*?(?:in|for)\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)",
r"weather\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)",
]
for pattern in weather_patterns:
matches = re.findall(pattern, content, re.IGNORECASE)
for match in matches:
city = match.strip()
# Use weather city as origin if it's different from destination
if (
city.lower() != destination.lower()
and len(city.split()) <= 3
):
origin = city
logger.info(
f"Inferred origin from weather context: {origin} (destination: {destination})"
)
flight_info["origin"] = origin
break
if origin:
break
if origin:
break
# If destination is missing but origin is present, try to infer from conversation
if origin and not destination:
# Look through conversation history for mentioned cities
mentioned_cities = set()
for msg in request_body.messages:
if msg.role == "user":
content = msg.content
# Extract cities from weather queries: "weather in [city]", "forecast for [city]"
weather_patterns = [
r"(?:weather|forecast).*?(?:in|for)\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)",
r"weather\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)",
]
for pattern in weather_patterns:
matches = re.findall(pattern, content, re.IGNORECASE)
for match in matches:
city = match.strip()
# Don't use same city as origin, and validate it's a real city name
if city.lower() != origin.lower() and len(city.split()) <= 3:
mentioned_cities.add(city)
# If we found cities in context, use the first one as destination
if mentioned_cities:
destination = list(mentioned_cities)[0]
logger.info(
f"Inferred destination from conversation context: {destination}"
)
flight_info["destination"] = destination
if origin and not origin_code:
origin_code = await resolve_airport_code(origin)
if destination and not dest_code:
dest_code = await resolve_airport_code(destination)
if not date:
date = (datetime.now() + timedelta(days=1)).strftime("%Y-%m-%d")
start_date = f"{date}T00:00:00Z"
end_date = f"{date}T23:59:59Z"
flight_data = None
if origin_code and dest_code:
flight_data = await get_flights_between_airports(
origin_code, dest_code, start_date, end_date
)
else:
logger.warning(
f"Cannot fetch flights: origin_code={origin_code}, dest_code={dest_code}"
)
# Build context message based on what we have
if flight_data and flight_data.get("flights"):
flight_context = f"""
Flight search results for {origin or origin_code} to {destination or dest_code} on {date}:
{json.dumps(flight_data, indent=2)}
Use this data to answer the user's flight query. Present the flights clearly with all relevant details.
"""
elif origin_code and not dest_code:
# We have origin but no destination - this is a follow-up question
flight_context = f"""
The user is asking about flights from {origin or origin_code}, but no destination was specified.
From the conversation context, it appears the user may be asking about flights from {origin or origin_code} to a previously mentioned location, or they may need to specify a destination.
Check the conversation history to see if a destination was mentioned earlier. If so, you can mention that you'd be happy to search for flights from {origin or origin_code} to that destination. If not, politely ask the user to specify both origin and destination cities.
Example response: "I can help you find flights from {origin or origin_code}! Could you please tell me which city you'd like to fly to? For example, 'flights from {origin or origin_code} to Seattle' or 'flights from {origin or origin_code} to Istanbul'."
"""
elif dest_code and not origin_code:
# We have destination but no origin
flight_context = f"""
The user is asking about flights to {destination or dest_code}, but no origin was specified.
From the conversation context, it appears the user may be asking about flights to {destination or dest_code} from a previously mentioned location, or they may need to specify an origin.
Check the conversation history to see if an origin was mentioned earlier. If so, you can mention that you'd be happy to search for flights from that origin to {destination or dest_code}. If not, politely ask the user to specify both origin and destination cities.
"""
else:
# Neither origin nor destination
flight_context = f"""
Flight search attempted but both origin and destination are missing.
The user's query was incomplete. Check the conversation history to see if origin or destination cities were mentioned earlier. If so, use that context to help the user. If not, politely ask the user to specify both origin and destination cities for their flight search.
Example: "I'd be happy to help you find flights! Could you please tell me both the departure city and destination city? For example, 'flights from Seattle to Istanbul' or 'flights from Istanbul to Seattle'."
"""
response_messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "assistant", "content": flight_context},
]
# Add conversation history
for msg in request_body.messages:
response_messages.append({"role": msg.role, "content": msg.content})
return response_messages
@app.post("/v1/chat/completions")
async def chat_completion_http(request: Request, request_body: ChatCompletionRequest):
"""HTTP endpoint for chat completions with streaming support."""
logger.info(f"Received flight request with {len(request_body.messages)} messages")
traceparent_header = request.headers.get("traceparent")
if traceparent_header:
logger.info(f"Received traceparent header: {traceparent_header}")
return StreamingResponse(
stream_chat_completions(request_body, traceparent_header),
media_type="text/plain",
headers={
"content-type": "text/event-stream",
},
)
async def stream_chat_completions(
request_body: ChatCompletionRequest, traceparent_header: str = None
):
"""Generate streaming chat completions."""
logger.info("Preparing flight messages for LLM")
# Prepare messages with flight data
response_messages = await prepare_flight_messages(request_body)
try:
logger.info(
f"Calling archgw at {LLM_GATEWAY_ENDPOINT} to generate flight response"
)
# Prepare extra headers
extra_headers = {"x-envoy-max-retries": "3"}
if traceparent_header:
extra_headers["traceparent"] = traceparent_header
response_stream = await archgw_client.chat.completions.create(
model=FLIGHT_MODEL,
messages=response_messages,
temperature=request_body.temperature or 0.7,
max_tokens=request_body.max_tokens or 1000,
stream=True,
extra_headers=extra_headers,
)
completion_id = f"chatcmpl-{uuid.uuid4().hex[:8]}"
created_time = int(time.time())
collected_content = []
async for chunk in response_stream:
if chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
collected_content.append(content)
stream_chunk = ChatCompletionStreamResponse(
id=completion_id,
created=created_time,
model=request_body.model,
choices=[
{
"index": 0,
"delta": {"content": content},
"finish_reason": None,
}
],
)
yield f"data: {stream_chunk.model_dump_json()}\n\n"
full_response = "".join(collected_content)
updated_history = [{"role": "assistant", "content": full_response}]
logger.info(f"Full flight agent response: {full_response}")
final_chunk = ChatCompletionStreamResponse(
id=completion_id,
created=created_time,
model=request_body.model,
choices=[
{
"index": 0,
"delta": {},
"finish_reason": "stop",
"message": {
"role": "assistant",
"content": json.dumps(updated_history),
},
}
],
)
yield f"data: {final_chunk.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
except Exception as e:
logger.error(f"Error generating flight response: {e}")
error_chunk = ChatCompletionStreamResponse(
id=f"chatcmpl-{uuid.uuid4().hex[:8]}",
created=int(time.time()),
model=request_body.model,
choices=[
{
"index": 0,
"delta": {
"content": "I apologize, but I'm having trouble retrieving flight information right now. Please try again."
},
"finish_reason": "stop",
}
],
)
yield f"data: {error_chunk.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
@app.get("/health")
async def health_check():
"""Health check endpoint."""
return {"status": "healthy", "agent": "flight_information"}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=10520)
def start_server(host: str = "localhost", port: int = 10520):
"""Start the REST server."""
uvicorn.run(
app,
host=host,
port=port,
log_config={
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(asctime)s - [FLIGHT_AGENT] - %(levelname)s - %(message)s",
},
},
"handlers": {
"default": {
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
},
},
"root": {
"level": "INFO",
"handlers": ["default"],
},
},
)

View file

@ -0,0 +1,674 @@
import json
import re
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from openai import AsyncOpenAI
import os
import logging
import time
import uuid
import uvicorn
from datetime import datetime, timedelta
import httpx
from typing import Optional
from urllib.parse import quote
from .api import (
ChatCompletionRequest,
ChatCompletionResponse,
ChatCompletionStreamResponse,
)
# Set up logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - [WEATHER_AGENT] - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
# Configuration for archgw LLM gateway
LLM_GATEWAY_ENDPOINT = os.getenv("LLM_GATEWAY_ENDPOINT", "http://localhost:12000/v1")
WEATHER_MODEL = "openai/gpt-4o"
LOCATION_MODEL = "openai/gpt-4o-mini"
# HTTP client for API calls
http_client = httpx.AsyncClient(timeout=10.0)
# System prompt for weather agent
SYSTEM_PROMPT = """You are a professional travel planner assistant. Your role is to provide accurate, clear, and helpful information about weather and flights based on the structured data provided to you.
CRITICAL INSTRUCTIONS:
1. DATA STRUCTURE:
WEATHER DATA:
- You will receive weather data as JSON in a system message
- The data contains a "location" field (string) and a "forecast" array
- Each forecast entry has: date, day_name, temperature_c, temperature_f, temperature_max_c, temperature_min_c, condition, sunrise, sunset
- Some fields may be null/None - handle these gracefully
FLIGHT DATA:
- You will receive flight information in a system message
- Flight data includes: airline, flight number, departure time, arrival time, origin airport, destination airport, aircraft type, status, gate, terminal
- Information may include both scheduled and estimated times
- Some fields may be unavailable - handle these gracefully
2. WEATHER HANDLING:
- For single-day queries: Use temperature_c/temperature_f (current/primary temperature)
- For multi-day forecasts: Use temperature_max_c and temperature_min_c when available
- Always provide temperatures in both Celsius and Fahrenheit when available
- If temperature is null, say "temperature data unavailable" rather than making up numbers
- Use exact condition descriptions provided (e.g., "Clear sky", "Rainy", "Partly Cloudy")
- Add helpful context when appropriate (e.g., "perfect for outdoor activities" for clear skies)
3. FLIGHT HANDLING:
- Present flight information clearly with airline name and flight number
- Include departure and arrival times with time zones when provided
- Mention origin and destination airports with their codes
- Include gate and terminal information when available
- Note aircraft type if relevant to the query
- Highlight any status updates (delays, early arrivals, etc.)
- For multiple flights, list them in chronological order by departure time
- If specific details are missing, acknowledge this rather than inventing information
4. MULTI-PART QUERIES:
- Users may ask about both weather and flights in one message
- Answer ALL parts of the query that you have data for
- Organize your response logically - typically weather first, then flights, or vice versa based on the query
- Provide complete information for each topic without mentioning other agents
- If you receive data for only one topic but the user asked about multiple, answer what you can with the provided data
5. ERROR HANDLING:
- If weather forecast contains an "error" field, acknowledge the issue politely
- If temperature or condition is null/None, mention that specific data is unavailable
- If flight details are incomplete, state which information is unavailable
- Never invent or guess weather or flight data - only use what's provided
- If location couldn't be determined, acknowledge this but still provide available data
6. RESPONSE FORMAT:
For Weather:
- Single-day queries: Provide current conditions, temperature, and condition
- Multi-day forecasts: List each day with date, day name, high/low temps, and condition
- Include sunrise/sunset times when available and relevant
For Flights:
- List flights with clear numbering or bullet points
- Include key details: airline, flight number, departure/arrival times, airports
- Add gate, terminal, and status information when available
- For multiple flights, organize chronologically
General:
- Use natural, conversational language
- Be concise but complete
- Format dates and times clearly
- Use bullet points or numbered lists for clarity
7. LOCATION HANDLING:
- Always mention location names from the data
- For flights, clearly state origin and destination cities/airports
- If locations differ from what the user asked, acknowledge this politely
8. RESPONSE STYLE:
- Be friendly and professional
- Use natural language, not technical jargon
- Provide information in a logical, easy-to-read format
- When answering multi-part queries, create a cohesive response that addresses all aspects
Remember: Only use the data provided. Never fabricate weather or flight information. If data is missing, clearly state what's unavailable. Answer all parts of the user's query that you have data for."""
async def geocode_city(city: str) -> Optional[dict]:
"""Geocode a city name to latitude and longitude using Open-Meteo API."""
try:
url = f"https://geocoding-api.open-meteo.com/v1/search?name={quote(city)}&count=1&language=en&format=json"
response = await http_client.get(url)
if response.status_code != 200:
logger.warning(
f"Geocoding API returned status {response.status_code} for city: {city}"
)
return None
data = response.json()
if not data.get("results") or len(data["results"]) == 0:
logger.warning(f"No geocoding results found for city: {city}")
return None
result = data["results"][0]
return {
"latitude": result["latitude"],
"longitude": result["longitude"],
"name": result.get("name", city),
}
except Exception as e:
logger.error(f"Error geocoding city {city}: {e}")
return None
async def get_live_weather(
latitude: float, longitude: float, days: int = 1
) -> Optional[dict]:
"""Get live weather data from Open-Meteo API."""
try:
forecast_days = min(days, 16)
url = (
f"https://api.open-meteo.com/v1/forecast?"
f"latitude={latitude}&"
f"longitude={longitude}&"
f"current=temperature_2m&"
f"hourly=temperature_2m&"
f"daily=sunrise,sunset,temperature_2m_max,temperature_2m_min,weather_code&"
f"forecast_days={forecast_days}&"
f"timezone=auto"
)
response = await http_client.get(url)
if response.status_code != 200:
logger.warning(f"Weather API returned status {response.status_code}")
return None
return response.json()
except Exception as e:
logger.error(f"Error fetching weather data: {e}")
return None
def weather_code_to_condition(weather_code: int) -> str:
"""Convert WMO weather code to human-readable condition."""
# WMO Weather interpretation codes (WW)
if weather_code == 0:
return "Clear sky"
elif weather_code in [1, 2, 3]:
return "Partly Cloudy"
elif weather_code in [45, 48]:
return "Foggy"
elif weather_code in [51, 53, 55, 56, 57]:
return "Drizzle"
elif weather_code in [61, 63, 65, 66, 67]:
return "Rainy"
elif weather_code in [71, 73, 75, 77]:
return "Snowy"
elif weather_code in [80, 81, 82]:
return "Rainy"
elif weather_code in [85, 86]:
return "Snowy"
elif weather_code in [95, 96, 99]:
return "Stormy"
else:
return "Cloudy"
async def get_weather_data(location: str, days: int = 1):
"""Get live weather data for a location using Open-Meteo API."""
geocode_result = await geocode_city(location)
if not geocode_result:
logger.warning(f"Could not geocode location: {location}, using fallback")
geocode_result = await geocode_city("New York")
if not geocode_result:
return {
"location": location,
"forecast": [
{
"date": datetime.now().strftime("%Y-%m-%d"),
"day_name": datetime.now().strftime("%A"),
"temperature_c": None,
"temperature_f": None,
"condition": "Unknown",
"error": "Could not retrieve weather data",
}
],
}
location_name = geocode_result["name"]
latitude = geocode_result["latitude"]
longitude = geocode_result["longitude"]
weather_data = await get_live_weather(latitude, longitude, days)
if not weather_data:
logger.warning("Could not fetch weather data for requested location")
return {
"location": location_name,
"forecast": [
{
"date": datetime.now().strftime("%Y-%m-%d"),
"day_name": datetime.now().strftime("%A"),
"temperature_c": None,
"temperature_f": None,
"condition": "Unknown",
"error": "Could not retrieve weather data",
}
],
}
current_temp = weather_data.get("current", {}).get("temperature_2m")
daily_data = weather_data.get("daily", {})
forecast = []
for i in range(min(days, len(daily_data.get("time", [])))):
date_str = daily_data["time"][i]
date_obj = datetime.fromisoformat(date_str.replace("Z", "+00:00"))
temp_max = (
daily_data.get("temperature_2m_max", [None])[i]
if i < len(daily_data.get("temperature_2m_max", []))
else None
)
temp_min = (
daily_data.get("temperature_2m_min", [None])[i]
if i < len(daily_data.get("temperature_2m_min", []))
else None
)
weather_code = (
daily_data.get("weather_code", [0])[i]
if i < len(daily_data.get("weather_code", []))
else 0
)
sunrise = (
daily_data.get("sunrise", [None])[i]
if i < len(daily_data.get("sunrise", []))
else None
)
sunset = (
daily_data.get("sunset", [None])[i]
if i < len(daily_data.get("sunset", []))
else None
)
temp_c = (
temp_max if temp_max is not None else (current_temp if i == 0 else temp_min)
)
day_info = {
"date": date_str.split("T")[0],
"day_name": date_obj.strftime("%A"),
"temperature_c": round(temp_c, 1) if temp_c is not None else None,
"temperature_f": (
round(temp_c * 9 / 5 + 32, 1) if temp_c is not None else None
),
"temperature_max_c": round(temp_max, 1) if temp_max is not None else None,
"temperature_min_c": round(temp_min, 1) if temp_min is not None else None,
"condition": weather_code_to_condition(weather_code),
"sunrise": sunrise.split("T")[1] if sunrise else None,
"sunset": sunset.split("T")[1] if sunset else None,
}
forecast.append(day_info)
return {"location": location_name, "forecast": forecast}
LOCATION_EXTRACTION_PROMPT = """You are a location extraction assistant for WEATHER queries. Your ONLY job is to extract the geographic location (city, state, country, etc.) that the user is asking about for WEATHER information.
CRITICAL RULES:
1. Extract ONLY the location name associated with WEATHER questions - nothing else
2. Return just the location name in plain text (e.g., "London", "New York", "Paris, France")
3. **MULTI-PART QUERY HANDLING**: If the user mentions multiple locations in a multi-part query, extract ONLY the location mentioned in the WEATHER part
- Look for patterns like "weather in [location]", "forecast for [location]", "weather [location]"
- The location that appears WITH "weather" keywords is the weather location
- Example: "What's the weather in Seattle, and what is one flight that goes direct to Atlanta?" Extract "Seattle" (appears with "weather in")
- Example: "What is the weather in Atlanta and what flight goes from Detroit to Atlanta?" Extract "Atlanta" (appears with "weather in", even though Atlanta also appears in flight part)
- Example: "Weather in London and flights to Paris" Extract "London" (weather location)
- Example: "What flight goes from Detroit to Atlanta and what's the weather in Atlanta?" Extract "Atlanta" (appears with "weather in")
4. Look for patterns like "weather in [location]", "forecast for [location]", "weather [location]", "temperature in [location]"
5. Ignore error messages, HTML tags, and assistant responses
6. If no clear weather-related location is found, return exactly: "NOT_FOUND"
7. Clean the location name - remove words like "about", "for", "in", "the weather in", etc.
8. Return the location in a format suitable for geocoding (city name, or "City, State", or "City, Country")
Examples:
- "What's the weather in London?" "London"
- "Tell me about the weather for New York" "New York"
- "Weather forecast for Paris, France" "Paris, France"
- "What's the weather in Seattle, and what is one flight that goes direct to Atlanta?" "Seattle" (appears with "weather in")
- "What is the weather in Atlanta and what flight goes from Detroit to Atlanta?" "Atlanta" (appears with "weather in")
- "Weather in Istanbul and flights to Seattle" "Istanbul" (weather location)
- "What flight goes from Detroit to Atlanta and what's the weather in Atlanta?" "Atlanta" (appears with "weather in")
- "I'm going to Seattle" "Seattle" (if context suggests weather query)
- "What's happening?" "NOT_FOUND"
Now extract the WEATHER location from this message:"""
async def extract_location_from_messages(messages):
"""Extract location from user messages using LLM, focusing on weather-related locations."""
user_messages = [msg for msg in messages if msg.role == "user"]
if not user_messages:
logger.warning("No user messages found, using default: New York")
return "New York"
# CRITICAL: Always preserve the FIRST user message (original query) for multi-agent scenarios
# When Plano processes multiple agents, it may add assistant responses that get filtered out,
# but we need to always use the original user query
original_user_message = user_messages[0].content.strip() if user_messages else None
# Try to find a valid recent user message first (for follow-up queries)
user_content = None
for msg in reversed(user_messages):
content = msg.content.strip()
content_lower = content.lower()
# Skip messages that are clearly JSON-encoded assistant responses or errors
# But be less aggressive - only skip if it's clearly not a user query
if content.startswith("[{") or content.startswith("[{"):
# Likely JSON-encoded assistant response
continue
if any(
pattern in content_lower
for pattern in [
'"role": "assistant"',
'"role":"assistant"',
"error:",
]
):
continue
# Don't skip messages that just happen to contain these words naturally
user_content = content
break
# Fallback to original user message if no valid recent message found
if not user_content and original_user_message:
# Check if original message is valid (not JSON-encoded)
if not (
original_user_message.startswith("[{")
or original_user_message.startswith("[{")
):
user_content = original_user_message
logger.info(f"Using original user message: {user_content[:200]}")
if not user_content:
logger.warning("No valid user message found, using default: New York")
return "New York"
try:
logger.info(
f"Extracting weather location from user message: {user_content[:200]}"
)
# Build context from conversation history
conversation_context = []
for msg in messages:
content = msg.content.strip()
content_lower = content.lower()
if any(
pattern in content_lower
for pattern in ["<", ">", "error:", "i apologize", "i'm having trouble"]
):
continue
conversation_context.append({"role": msg.role, "content": content})
# Use last 5 messages for context
context_messages = (
conversation_context[-5:]
if len(conversation_context) > 5
else conversation_context
)
llm_messages = [{"role": "system", "content": LOCATION_EXTRACTION_PROMPT}]
for msg in context_messages:
llm_messages.append({"role": msg["role"], "content": msg["content"]})
response = await archgw_client.chat.completions.create(
model=LOCATION_MODEL,
messages=llm_messages,
temperature=0.1,
max_tokens=50,
)
location = response.choices[0].message.content.strip()
location = location.strip("\"'`.,!?")
if not location or location.upper() == "NOT_FOUND":
# Fallback: Try regex extraction for weather patterns
weather_patterns = [
r"weather\s+(?:in|for)\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)",
r"forecast\s+(?:in|for)\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)",
r"weather\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)",
]
for msg in reversed(context_messages):
if msg["role"] == "user":
content = msg["content"]
for pattern in weather_patterns:
match = re.search(pattern, content, re.IGNORECASE)
if match:
potential_location = match.group(1).strip()
logger.info(
f"Fallback regex extracted weather location: {potential_location}"
)
return potential_location
logger.warning(
f"LLM could not extract location from message, using default: New York"
)
return "New York"
logger.info(f"LLM extracted weather location: {location}")
return location
except Exception as e:
logger.error(f"Error extracting location with LLM: {e}, trying fallback regex")
# Fallback regex extraction
try:
for msg in reversed(messages):
if msg.role == "user":
content = msg.content
weather_patterns = [
r"weather\s+(?:in|for)\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)",
r"forecast\s+(?:in|for)\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)?)",
]
for pattern in weather_patterns:
match = re.search(pattern, content, re.IGNORECASE)
if match:
potential_location = match.group(1).strip()
logger.info(
f"Fallback regex extracted weather location: {potential_location}"
)
return potential_location
except:
pass
logger.error("All extraction methods failed, using default: New York")
return "New York"
# Initialize OpenAI client for archgw
archgw_client = AsyncOpenAI(
base_url=LLM_GATEWAY_ENDPOINT,
api_key="EMPTY",
)
# FastAPI app for REST server
app = FastAPI(title="Weather Forecast Agent", version="1.0.0")
async def prepare_weather_messages(request_body: ChatCompletionRequest):
"""Prepare messages with weather data."""
# Extract location from conversation using LLM
location = await extract_location_from_messages(request_body.messages)
# Determine if they want forecast (multi-day)
last_user_msg = ""
for msg in reversed(request_body.messages):
if msg.role == "user":
last_user_msg = msg.content.lower()
break
days = 5 if "forecast" in last_user_msg or "week" in last_user_msg else 1
# Get live weather data
weather_data = await get_weather_data(location, days)
# Create system message with weather data
weather_context = f"""
Current weather data for {weather_data['location']}:
{json.dumps(weather_data, indent=2)}
Use this data to answer the user's weather query.
"""
response_messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "assistant", "content": weather_context},
]
# Add conversation history
for msg in request_body.messages:
response_messages.append({"role": msg.role, "content": msg.content})
return response_messages
@app.post("/v1/chat/completions")
async def chat_completion_http(request: Request, request_body: ChatCompletionRequest):
"""HTTP endpoint for chat completions with streaming support."""
logger.info(f"Received weather request with {len(request_body.messages)} messages")
logger.info(
f"messages detail json dumps: {json.dumps([msg.model_dump() for msg in request_body.messages], indent=2)}"
)
traceparent_header = request.headers.get("traceparent")
if traceparent_header:
logger.info(f"Received traceparent header: {traceparent_header}")
return StreamingResponse(
stream_chat_completions(request_body, traceparent_header),
media_type="text/plain",
headers={
"content-type": "text/event-stream",
},
)
async def stream_chat_completions(
request_body: ChatCompletionRequest, traceparent_header: str = None
):
"""Generate streaming chat completions."""
response_messages = await prepare_weather_messages(request_body)
try:
logger.info(
f"Calling archgw at {LLM_GATEWAY_ENDPOINT} to generate weather response"
)
extra_headers = {"x-envoy-max-retries": "3"}
if traceparent_header:
extra_headers["traceparent"] = traceparent_header
response_stream = await archgw_client.chat.completions.create(
model=WEATHER_MODEL,
messages=response_messages,
temperature=request_body.temperature or 0.7,
max_tokens=request_body.max_tokens or 1000,
stream=True,
extra_headers=extra_headers,
)
completion_id = f"chatcmpl-{uuid.uuid4().hex[:8]}"
created_time = int(time.time())
collected_content = []
async for chunk in response_stream:
if chunk.choices and chunk.choices[0].delta.content:
content = chunk.choices[0].delta.content
collected_content.append(content)
stream_chunk = ChatCompletionStreamResponse(
id=completion_id,
created=created_time,
model=request_body.model,
choices=[
{
"index": 0,
"delta": {"content": content},
"finish_reason": None,
}
],
)
yield f"data: {stream_chunk.model_dump_json()}\n\n"
full_response = "".join(collected_content)
updated_history = [{"role": "assistant", "content": full_response}]
final_chunk = ChatCompletionStreamResponse(
id=completion_id,
created=created_time,
model=request_body.model,
choices=[
{
"index": 0,
"delta": {},
"finish_reason": "stop",
"message": {
"role": "assistant",
"content": json.dumps(updated_history),
},
}
],
)
yield f"data: {final_chunk.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
except Exception as e:
logger.error(f"Error generating weather response: {e}")
error_chunk = ChatCompletionStreamResponse(
id=f"chatcmpl-{uuid.uuid4().hex[:8]}",
created=int(time.time()),
model=request_body.model,
choices=[
{
"index": 0,
"delta": {
"content": "I apologize, but I'm having trouble retrieving weather information right now. Please try again."
},
"finish_reason": "stop",
}
],
)
yield f"data: {error_chunk.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
@app.get("/health")
async def health_check():
"""Health check endpoint."""
return {"status": "healthy", "agent": "weather_forecast"}
def start_server(host: str = "localhost", port: int = 10510):
"""Start the REST server."""
uvicorn.run(
app,
host=host,
port=port,
log_config={
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(asctime)s - [WEATHER_AGENT] - %(levelname)s - %(message)s",
},
},
"handlers": {
"default": {
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
},
},
"root": {
"level": "INFO",
"handlers": ["default"],
},
},
)

View file

@ -0,0 +1,45 @@
#!/bin/bash
set -e
WAIT_FOR_PIDS=()
log() {
timestamp=$(python3 -c 'from datetime import datetime; print(datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")[:23])')
message="$*"
echo "$timestamp - $message"
}
cleanup() {
log "Caught signal, terminating all agent processes ..."
for PID in "${WAIT_FOR_PIDS[@]}"; do
if kill $PID 2> /dev/null; then
log "killed process: $PID"
fi
done
exit 1
}
trap cleanup EXIT
log "Starting weather agent on port 10510..."
uv run python -m travel_agents --host 0.0.0.0 --port 10510 --agent weather &
WAIT_FOR_PIDS+=($!)
log "Starting flight agent on port 10520..."
uv run python -m travel_agents --host 0.0.0.0 --port 10520 --agent flight &
WAIT_FOR_PIDS+=($!)
log "Starting currency agent on port 10530..."
uv run python -m travel_agents --host 0.0.0.0 --port 10530 --agent currency &
WAIT_FOR_PIDS+=($!)
log "All agents started successfully!"
log " - Weather Agent: http://localhost:10510"
log " - Flight Agent: http://localhost:10520"
log " - Currency Agent: http://localhost:10530"
log ""
log "Waiting for agents to run..."
for PID in "${WAIT_FOR_PIDS[@]}"; do
wait "$PID"
done

View file

@ -0,0 +1,55 @@
@llm_endpoint = http://localhost:12000
### Travel Agent Chat Completion Request
POST {{llm_endpoint}}/v1/chat/completions HTTP/1.1
Content-Type: application/json
{
"model": "gpt-4o",
"messages": [
{
"role": "system",
"content": "You are a professional travel planner assistant. Your role is to provide accurate, clear, and helpful information about weather and flights based on the structured data provided to you.\n\nCRITICAL INSTRUCTIONS:\n\n1. DATA STRUCTURE:\n \n WEATHER DATA:\n - You will receive weather data as JSON in a system message\n - The data contains a \"location\" field (string) and a \"forecast\" array\n - Each forecast entry has: date, day_name, temperature_c, temperature_f, temperature_max_c, temperature_min_c, condition, sunrise, sunset\n - Some fields may be null/None - handle these gracefully\n \n FLIGHT DATA:\n - You will receive flight information in a system message\n - Flight data includes: airline, flight number, departure time, arrival time, origin airport, destination airport, aircraft type, status, gate, terminal\n - Information may include both scheduled and estimated times\n - Some fields may be unavailable - handle these gracefully\n\n2. WEATHER HANDLING:\n - For single-day queries: Use temperature_c/temperature_f (current/primary temperature)\n - For multi-day forecasts: Use temperature_max_c and temperature_min_c when available\n - Always provide temperatures in both Celsius and Fahrenheit when available\n - If temperature is null, say \"temperature data unavailable\" rather than making up numbers\n - Use exact condition descriptions provided (e.g., \"Clear sky\", \"Rainy\", \"Partly Cloudy\")\n - Add helpful context when appropriate (e.g., \"perfect for outdoor activities\" for clear skies)\n\n3. FLIGHT HANDLING:\n - Present flight information clearly with airline name and flight number\n - Include departure and arrival times with time zones when provided\n - Mention origin and destination airports with their codes\n - Include gate and terminal information when available\n - Note aircraft type if relevant to the query\n - Highlight any status updates (delays, early arrivals, etc.)\n - For multiple flights, list them in chronological order by departure time\n - If specific details are missing, acknowledge this rather than inventing information\n\n4. MULTI-PART QUERIES:\n - Users may ask about both weather and flights in one message\n - Answer ALL parts of the query that you have data for\n - Organize your response logically - typically weather first, then flights, or vice versa based on the query\n - Provide complete information for each topic without mentioning other agents\n - If you receive data for only one topic but the user asked about multiple, answer what you can with the provided data\n\n5. ERROR HANDLING:\n - If weather forecast contains an \"error\" field, acknowledge the issue politely\n - If temperature or condition is null/None, mention that specific data is unavailable\n - If flight details are incomplete, state which information is unavailable\n - Never invent or guess weather or flight data - only use what's provided\n - If location couldn't be determined, acknowledge this but still provide available data\n\n6. RESPONSE FORMAT:\n \n For Weather:\n - Single-day queries: Provide current conditions, temperature, and condition\n - Multi-day forecasts: List each day with date, day name, high/low temps, and condition\n - Include sunrise/sunset times when available and relevant\n \n For Flights:\n - List flights with clear numbering or bullet points\n - Include key details: airline, flight number, departure/arrival times, airports\n - Add gate, terminal, and status information when available\n - For multiple flights, organize chronologically\n \n General:\n - Use natural, conversational language\n - Be concise but complete\n - Format dates and times clearly\n - Use bullet points or numbered lists for clarity\n\n7. LOCATION HANDLING:\n - Always mention location names from the data\n - For flights, clearly state origin and destination cities/airports\n - If locations differ from what the user asked, acknowledge this politely\n\n8. RESPONSE STYLE:\n - Be friendly and professional\n - Use natural language, not technical jargon\n - Provide information in a logical, easy-to-read format\n - When answering multi-part queries, create a cohesive response that addresses all aspects\n\nRemember: Only use the data provided. Never fabricate weather or flight information. If data is missing, clearly state what's unavailable. Answer all parts of the user's query that you have data for."
},
{
"role": "system",
"content": "Current weather data for Seattle:\n\n{\n \"location\": \"Seattle\",\n \"forecast\": [\n {\n \"date\": \"2025-12-22\",\n \"day_name\": \"Monday\",\n \"temperature_c\": 8.3,\n \"temperature_f\": 46.9,\n \"temperature_max_c\": 8.3,\n \"temperature_min_c\": 2.8,\n \"condition\": \"Rainy\",\n \"sunrise\": \"07:55\",\n \"sunset\": \"16:20\"\n }\n ]\n}\n\nUse this data to answer the user's weather query."
},
{
"role": "system",
"content": "Here are some direct flights from Seattle to Atlanta on December 23, 2025:\n\n1. **Delta Airlines Flight DL552**\n - **Departure:** Scheduled at 3:47 PM (Seattle Time), from Seattle-Tacoma Intl (SEA)\n - **Arrival:** Scheduled at 8:31 PM (Atlanta Time), at Hartsfield-Jackson Intl (ATL)\n - **Aircraft:** Boeing 737-900 (B739)\n - **Status:** Scheduled\n - **Terminal at Atlanta:** S\n - **Estimated arrival slightly early**: 8:26 PM\n\n2. **Delta Airlines Flight DL542**\n - **Departure:** Scheduled at 12:00 PM (Seattle Time), Gate A4, from Seattle-Tacoma Intl (SEA)\n - **Arrival:** Scheduled at 4:49 PM (Atlanta Time), at Hartsfield-Jackson Intl (ATL)\n - **Aircraft:** Boeing 737-900 (B739)\n - **Status:** Scheduled\n - **Gate at Atlanta:** E10, Terminal: S\n - **Estimated early arrival**: 4:44 PM\n\n3. **Delta Airlines Flight DL554**\n - **Departure:** Scheduled at 10:15 AM (Seattle Time), Gate A10, from Seattle-Tacoma Intl (SEA)\n - **Arrival:** Scheduled at 4:05 PM (Atlanta Time), at Hartsfield-Jackson Intl (ATL)\n - **Aircraft:** Boeing 737-900 (B739)\n - **Status:** Scheduled\n - **Gate at Atlanta:** B19, Terminal: S\n - **Estimated late arrival**: 4:06 PM\n\n4. **Alaska Airlines Flight AS334**\n - **Departure:** Scheduled at 9:16 AM (Seattle Time), Gate C20, from Seattle-Tacoma Intl (SEA)\n - **Arrival:** Scheduled at 5:08 PM (Atlanta Time), at Hartsfield-Jackson Intl (ATL)\n - **Aircraft:** Boeing 737-900 (B739)\n - **Status:** Scheduled\n - **Gate at Atlanta:** C5, Terminal: N\n\nThese are just a few of the direct flights available. Please let me know if you need more details on any other specific flight."
},
{
"role": "user",
"content": "What's the weather in Seattle?"
},
{
"role": "assistant",
"content": "The weather in Seattle is sunny with a temperature of 60 degrees Fahrenheit."
},
{
"role": "user",
"content": "What is one Alaska flight that goes direct to Atlanta from Seattle?"
}
],
"max_tokens": 1000,
"stream": false,
"temperature": 1.0
}
### test 8001
### test upstream llm
POST http://localhost:8001/v1/chat/completions HTTP/1.1
Content-Type: application/json
{
"messages": [
{
"role": "system",
"content": "\nCurrent weather data for Seattle:\n\n{\n \"location\": \"Seattle\",\n \"forecast\": [\n {\n \"date\": \"2025-12-22\",\n \"day_name\": \"Monday\",\n \"temperature_c\": 8.3,\n \"temperature_f\": 46.9,\n \"temperature_max_c\": 8.3,\n \"temperature_min_c\": 2.8,\n \"condition\": \"Rainy\",\n \"sunrise\": \"07:55\",\n \"sunset\": \"16:20\"\n }\n ]\n}\n\nUse this data to answer the user's weather query."
}
],
"model": "gpt-4o",
}

View file

@ -0,0 +1,30 @@
@llm_endpoint = http://localhost:12000
### Travel Agent Chat Completion - Full Conversation
POST {{llm_endpoint}}/v1/chat/completions HTTP/1.1
Content-Type: application/json
{
"model": "gpt-4o",
"messages": [
{
"role": "system",
"content": "You are a professional travel planner assistant. Your role is to provide accurate, clear, and helpful information about weather and flights based on the structured data provided to you.\n\nCRITICAL INSTRUCTIONS:\n\n1. DATA STRUCTURE:\n \n WEATHER DATA:\n - You will receive weather data as JSON in a system message\n - The data contains a \"location\" field (string) and a \"forecast\" array\n - Each forecast entry has: date, day_name, temperature_c, temperature_f, temperature_max_c, temperature_min_c, condition, sunrise, sunset\n - Some fields may be null/None - handle these gracefully\n \n FLIGHT DATA:\n - You will receive flight information in a system message\n - Flight data includes: airline, flight number, departure time, arrival time, origin airport, destination airport, aircraft type, status, gate, terminal\n - Information may include both scheduled and estimated times\n - Some fields may be unavailable - handle these gracefully\n\n2. WEATHER HANDLING:\n - For single-day queries: Use temperature_c/temperature_f (current/primary temperature)\n - For multi-day forecasts: Use temperature_max_c and temperature_min_c when available\n - Always provide temperatures in both Celsius and Fahrenheit when available\n - If temperature is null, say \"temperature data unavailable\" rather than making up numbers\n - Use exact condition descriptions provided (e.g., \"Clear sky\", \"Rainy\", \"Partly Cloudy\")\n - Add helpful context when appropriate (e.g., \"perfect for outdoor activities\" for clear skies)\n\n3. FLIGHT HANDLING:\n - Present flight information clearly with airline name and flight number\n - Include departure and arrival times with time zones when provided\n - Mention origin and destination airports with their codes\n - Include gate and terminal information when available\n - Note aircraft type if relevant to the query\n - Highlight any status updates (delays, early arrivals, etc.)\n - For multiple flights, list them in chronological order by departure time\n - If specific details are missing, acknowledge this rather than inventing information\n\n4. MULTI-PART QUERIES:\n - Users may ask about both weather and flights in one message\n - Answer ALL parts of the query that you have data for\n - Organize your response logically - typically weather first, then flights, or vice versa based on the query\n - Provide complete information for each topic without mentioning other agents\n - If you receive data for only one topic but the user asked about multiple, answer what you can with the provided data\n\n5. ERROR HANDLING:\n - If weather forecast contains an \"error\" field, acknowledge the issue politely\n - If temperature or condition is null/None, mention that specific data is unavailable\n - If flight details are incomplete, state which information is unavailable\n - Never invent or guess weather or flight data - only use what's provided\n - If location couldn't be determined, acknowledge this but still provide available data\n\n6. RESPONSE FORMAT:\n \n For Weather:\n - Single-day queries: Provide current conditions, temperature, and condition\n - Multi-day forecasts: List each day with date, day name, high/low temps, and condition\n - Include sunrise/sunset times when available and relevant\n \n For Flights:\n - List flights with clear numbering or bullet points\n - Include key details: airline, flight number, departure/arrival times, airports\n - Add gate, terminal, and status information when available\n - For multiple flights, organize chronologically\n \n General:\n - Use natural, conversational language\n - Be concise but complete\n - Format dates and times clearly\n - Use bullet points or numbered lists for clarity\n\n7. LOCATION HANDLING:\n - Always mention location names from the data\n - For flights, clearly state origin and destination cities/airports\n - If locations differ from what the user asked, acknowledge this politely\n\n8. RESPONSE STYLE:\n - Be friendly and professional\n - Use natural language, not technical jargon\n - Provide information in a logical, easy-to-read format\n - When answering multi-part queries, create a cohesive response that addresses all aspects\n\nRemember: Only use the data provided. Never fabricate weather or flight information. If data is missing, clearly state what's unavailable. Answer all parts of the user's query that you have data for."
},
{
"role": "assistant",
"content": "Current weather data for Seattle:\n\n{\n \"location\": \"Seattle\",\n \"forecast\": [\n {\n \"date\": \"2025-12-22\",\n \"day_name\": \"Monday\",\n \"temperature_c\": 8.3,\n \"temperature_f\": 46.9,\n \"temperature_max_c\": 8.3,\n \"temperature_min_c\": 2.8,\n \"condition\": \"Rainy\",\n \"sunrise\": \"07:55\",\n \"sunset\": \"16:20\"\n }\n ]\n}\n\nUse this data to answer the user's weather query."
},
{
"role": "assistant",
"content": "Here are some direct flights from Seattle to Atlanta on December 23, 2025:\n\n1. **Delta Airlines Flight DL552**\n - **Departure:** Scheduled at 3:47 PM (Seattle Time), from Seattle-Tacoma Intl (SEA)\n - **Arrival:** Scheduled at 8:31 PM (Atlanta Time), at Hartsfield-Jackson Intl (ATL)\n - **Aircraft:** Boeing 737-900 (B739)\n - **Status:** Scheduled\n - **Terminal at Atlanta:** S\n - **Estimated arrival slightly early**: 8:26 PM\n\n2. **Delta Airlines Flight DL542**\n - **Departure:** Scheduled at 12:00 PM (Seattle Time), Gate A4, from Seattle-Tacoma Intl (SEA)\n - **Arrival:** Scheduled at 4:49 PM (Atlanta Time), at Hartsfield-Jackson Intl (ATL)\n - **Aircraft:** Boeing 737-900 (B739)\n - **Status:** Scheduled\n - **Gate at Atlanta:** E10, Terminal: S\n - **Estimated early arrival**: 4:44 PM\n\n3. **Delta Airlines Flight DL554**\n - **Departure:** Scheduled at 10:15 AM (Seattle Time), Gate A10, from Seattle-Tacoma Intl (SEA)\n - **Arrival:** Scheduled at 4:05 PM (Atlanta Time), at Hartsfield-Jackson Intl (ATL)\n - **Aircraft:** Boeing 737-900 (B739)\n - **Status:** Scheduled\n - **Gate at Atlanta:** B19, Terminal: S\n - **Estimated late arrival**: 4:06 PM\n\n4. **Alaska Airlines Flight AS334**\n - **Departure:** Scheduled at 9:16 AM (Seattle Time), Gate C20, from Seattle-Tacoma Intl (SEA)\n - **Arrival:** Scheduled at 5:08 PM (Atlanta Time), at Hartsfield-Jackson Intl (ATL)\n - **Aircraft:** Boeing 737-900 (B739)\n - **Status:** Scheduled\n - **Gate at Atlanta:** C5, Terminal: N\n\nThese are just a few of the direct flights available. Please let me know if you need more details on any other specific flight."
},
{
"role": "user",
"content": "What is the weather in Atlanta and what flight goes from Seattle to Atlanta?"
}
],
"max_tokens": 1000,
"stream": false,
"temperature": 1.0
}

488
demos/use_cases/travel_agents/uv.lock generated Normal file
View file

@ -0,0 +1,488 @@
version = 1
revision = 3
requires-python = ">=3.10"
[[package]]
name = "annotated-doc"
version = "0.0.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/57/ba/046ceea27344560984e26a590f90bc7f4a75b06701f653222458922b558c/annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4", size = 7288, upload-time = "2025-11-10T22:07:42.062Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" },
]
[[package]]
name = "annotated-types"
version = "0.7.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" },
]
[[package]]
name = "anyio"
version = "4.12.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "exceptiongroup", marker = "python_full_version < '3.11'" },
{ name = "idna" },
{ name = "typing-extensions", marker = "python_full_version < '3.13'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" },
]
[[package]]
name = "certifi"
version = "2025.11.12"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" },
]
[[package]]
name = "click"
version = "8.3.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" },
]
[[package]]
name = "colorama"
version = "0.4.6"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
]
[[package]]
name = "distro"
version = "1.9.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" },
]
[[package]]
name = "exceptiongroup"
version = "1.3.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typing-extensions", marker = "python_full_version < '3.13'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" },
]
[[package]]
name = "fastapi"
version = "0.125.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "annotated-doc" },
{ name = "pydantic" },
{ name = "starlette" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/17/71/2df15009fb4bdd522a069d2fbca6007c6c5487fce5cb965be00fc335f1d1/fastapi-0.125.0.tar.gz", hash = "sha256:16b532691a33e2c5dee1dac32feb31dc6eb41a3dd4ff29a95f9487cb21c054c0", size = 370550, upload-time = "2025-12-17T21:41:44.15Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/34/2f/ff2fcc98f500713368d8b650e1bbc4a0b3ebcdd3e050dcdaad5f5a13fd7e/fastapi-0.125.0-py3-none-any.whl", hash = "sha256:2570ec4f3aecf5cca8f0428aed2398b774fcdfee6c2116f86e80513f2f86a7a1", size = 112888, upload-time = "2025-12-17T21:41:41.286Z" },
]
[[package]]
name = "h11"
version = "0.16.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" },
]
[[package]]
name = "httpcore"
version = "1.0.9"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "certifi" },
{ name = "h11" },
]
sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" },
]
[[package]]
name = "httpx"
version = "0.28.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "certifi" },
{ name = "httpcore" },
{ name = "idna" },
]
sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" },
]
[[package]]
name = "idna"
version = "3.11"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" },
]
[[package]]
name = "jiter"
version = "0.12.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/45/9d/e0660989c1370e25848bb4c52d061c71837239738ad937e83edca174c273/jiter-0.12.0.tar.gz", hash = "sha256:64dfcd7d5c168b38d3f9f8bba7fc639edb3418abcc74f22fdbe6b8938293f30b", size = 168294, upload-time = "2025-11-09T20:49:23.302Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/3b/91/13cb9505f7be74a933f37da3af22e029f6ba64f5669416cb8b2774bc9682/jiter-0.12.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e7acbaba9703d5de82a2c98ae6a0f59ab9770ab5af5fa35e43a303aee962cf65", size = 316652, upload-time = "2025-11-09T20:46:41.021Z" },
{ url = "https://files.pythonhosted.org/packages/4e/76/4e9185e5d9bb4e482cf6dec6410d5f78dfeb374cfcecbbe9888d07c52daa/jiter-0.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:364f1a7294c91281260364222f535bc427f56d4de1d8ffd718162d21fbbd602e", size = 319829, upload-time = "2025-11-09T20:46:43.281Z" },
{ url = "https://files.pythonhosted.org/packages/86/af/727de50995d3a153138139f259baae2379d8cb0522c0c00419957bc478a6/jiter-0.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85ee4d25805d4fb23f0a5167a962ef8e002dbfb29c0989378488e32cf2744b62", size = 350568, upload-time = "2025-11-09T20:46:45.075Z" },
{ url = "https://files.pythonhosted.org/packages/6a/c1/d6e9f4b7a3d5ac63bcbdfddeb50b2dcfbdc512c86cffc008584fdc350233/jiter-0.12.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:796f466b7942107eb889c08433b6e31b9a7ed31daceaecf8af1be26fb26c0ca8", size = 369052, upload-time = "2025-11-09T20:46:46.818Z" },
{ url = "https://files.pythonhosted.org/packages/eb/be/00824cd530f30ed73fa8a4f9f3890a705519e31ccb9e929f1e22062e7c76/jiter-0.12.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:35506cb71f47dba416694e67af996bbdefb8e3608f1f78799c2e1f9058b01ceb", size = 481585, upload-time = "2025-11-09T20:46:48.319Z" },
{ url = "https://files.pythonhosted.org/packages/74/b6/2ad7990dff9504d4b5052eef64aa9574bd03d722dc7edced97aad0d47be7/jiter-0.12.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:726c764a90c9218ec9e4f99a33d6bf5ec169163f2ca0fc21b654e88c2abc0abc", size = 380541, upload-time = "2025-11-09T20:46:49.643Z" },
{ url = "https://files.pythonhosted.org/packages/b5/c7/f3c26ecbc1adbf1db0d6bba99192143d8fe8504729d9594542ecc4445784/jiter-0.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa47810c5565274810b726b0dc86d18dce5fd17b190ebdc3890851d7b2a0e74", size = 364423, upload-time = "2025-11-09T20:46:51.731Z" },
{ url = "https://files.pythonhosted.org/packages/18/51/eac547bf3a2d7f7e556927278e14c56a0604b8cddae75815d5739f65f81d/jiter-0.12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8ec0259d3f26c62aed4d73b198c53e316ae11f0f69c8fbe6682c6dcfa0fcce2", size = 389958, upload-time = "2025-11-09T20:46:53.432Z" },
{ url = "https://files.pythonhosted.org/packages/2c/1f/9ca592e67175f2db156cff035e0d817d6004e293ee0c1d73692d38fcb596/jiter-0.12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:79307d74ea83465b0152fa23e5e297149506435535282f979f18b9033c0bb025", size = 522084, upload-time = "2025-11-09T20:46:54.848Z" },
{ url = "https://files.pythonhosted.org/packages/83/ff/597d9cdc3028f28224f53e1a9d063628e28b7a5601433e3196edda578cdd/jiter-0.12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cf6e6dd18927121fec86739f1a8906944703941d000f0639f3eb6281cc601dca", size = 513054, upload-time = "2025-11-09T20:46:56.487Z" },
{ url = "https://files.pythonhosted.org/packages/24/6d/1970bce1351bd02e3afcc5f49e4f7ef3dabd7fb688f42be7e8091a5b809a/jiter-0.12.0-cp310-cp310-win32.whl", hash = "sha256:b6ae2aec8217327d872cbfb2c1694489057b9433afce447955763e6ab015b4c4", size = 206368, upload-time = "2025-11-09T20:46:58.638Z" },
{ url = "https://files.pythonhosted.org/packages/e3/6b/eb1eb505b2d86709b59ec06681a2b14a94d0941db091f044b9f0e16badc0/jiter-0.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:c7f49ce90a71e44f7e1aa9e7ec415b9686bbc6a5961e57eab511015e6759bc11", size = 204847, upload-time = "2025-11-09T20:47:00.295Z" },
{ url = "https://files.pythonhosted.org/packages/32/f9/eaca4633486b527ebe7e681c431f529b63fe2709e7c5242fc0f43f77ce63/jiter-0.12.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8f8a7e317190b2c2d60eb2e8aa835270b008139562d70fe732e1c0020ec53c9", size = 316435, upload-time = "2025-11-09T20:47:02.087Z" },
{ url = "https://files.pythonhosted.org/packages/10/c1/40c9f7c22f5e6ff715f28113ebaba27ab85f9af2660ad6e1dd6425d14c19/jiter-0.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2218228a077e784c6c8f1a8e5d6b8cb1dea62ce25811c356364848554b2056cd", size = 320548, upload-time = "2025-11-09T20:47:03.409Z" },
{ url = "https://files.pythonhosted.org/packages/6b/1b/efbb68fe87e7711b00d2cfd1f26bb4bfc25a10539aefeaa7727329ffb9cb/jiter-0.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9354ccaa2982bf2188fd5f57f79f800ef622ec67beb8329903abf6b10da7d423", size = 351915, upload-time = "2025-11-09T20:47:05.171Z" },
{ url = "https://files.pythonhosted.org/packages/15/2d/c06e659888c128ad1e838123d0638f0efad90cc30860cb5f74dd3f2fc0b3/jiter-0.12.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8f2607185ea89b4af9a604d4c7ec40e45d3ad03ee66998b031134bc510232bb7", size = 368966, upload-time = "2025-11-09T20:47:06.508Z" },
{ url = "https://files.pythonhosted.org/packages/6b/20/058db4ae5fb07cf6a4ab2e9b9294416f606d8e467fb74c2184b2a1eeacba/jiter-0.12.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3a585a5e42d25f2e71db5f10b171f5e5ea641d3aa44f7df745aa965606111cc2", size = 482047, upload-time = "2025-11-09T20:47:08.382Z" },
{ url = "https://files.pythonhosted.org/packages/49/bb/dc2b1c122275e1de2eb12905015d61e8316b2f888bdaac34221c301495d6/jiter-0.12.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd9e21d34edff5a663c631f850edcb786719c960ce887a5661e9c828a53a95d9", size = 380835, upload-time = "2025-11-09T20:47:09.81Z" },
{ url = "https://files.pythonhosted.org/packages/23/7d/38f9cd337575349de16da575ee57ddb2d5a64d425c9367f5ef9e4612e32e/jiter-0.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a612534770470686cd5431478dc5a1b660eceb410abade6b1b74e320ca98de6", size = 364587, upload-time = "2025-11-09T20:47:11.529Z" },
{ url = "https://files.pythonhosted.org/packages/f0/a3/b13e8e61e70f0bb06085099c4e2462647f53cc2ca97614f7fedcaa2bb9f3/jiter-0.12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3985aea37d40a908f887b34d05111e0aae822943796ebf8338877fee2ab67725", size = 390492, upload-time = "2025-11-09T20:47:12.993Z" },
{ url = "https://files.pythonhosted.org/packages/07/71/e0d11422ed027e21422f7bc1883c61deba2d9752b720538430c1deadfbca/jiter-0.12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b1207af186495f48f72529f8d86671903c8c10127cac6381b11dddc4aaa52df6", size = 522046, upload-time = "2025-11-09T20:47:14.6Z" },
{ url = "https://files.pythonhosted.org/packages/9f/59/b968a9aa7102a8375dbbdfbd2aeebe563c7e5dddf0f47c9ef1588a97e224/jiter-0.12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef2fb241de583934c9915a33120ecc06d94aa3381a134570f59eed784e87001e", size = 513392, upload-time = "2025-11-09T20:47:16.011Z" },
{ url = "https://files.pythonhosted.org/packages/ca/e4/7df62002499080dbd61b505c5cb351aa09e9959d176cac2aa8da6f93b13b/jiter-0.12.0-cp311-cp311-win32.whl", hash = "sha256:453b6035672fecce8007465896a25b28a6b59cfe8fbc974b2563a92f5a92a67c", size = 206096, upload-time = "2025-11-09T20:47:17.344Z" },
{ url = "https://files.pythonhosted.org/packages/bb/60/1032b30ae0572196b0de0e87dce3b6c26a1eff71aad5fe43dee3082d32e0/jiter-0.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:ca264b9603973c2ad9435c71a8ec8b49f8f715ab5ba421c85a51cde9887e421f", size = 204899, upload-time = "2025-11-09T20:47:19.365Z" },
{ url = "https://files.pythonhosted.org/packages/49/d5/c145e526fccdb834063fb45c071df78b0cc426bbaf6de38b0781f45d956f/jiter-0.12.0-cp311-cp311-win_arm64.whl", hash = "sha256:cb00ef392e7d684f2754598c02c409f376ddcef857aae796d559e6cacc2d78a5", size = 188070, upload-time = "2025-11-09T20:47:20.75Z" },
{ url = "https://files.pythonhosted.org/packages/92/c9/5b9f7b4983f1b542c64e84165075335e8a236fa9e2ea03a0c79780062be8/jiter-0.12.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:305e061fa82f4680607a775b2e8e0bcb071cd2205ac38e6ef48c8dd5ebe1cf37", size = 314449, upload-time = "2025-11-09T20:47:22.999Z" },
{ url = "https://files.pythonhosted.org/packages/98/6e/e8efa0e78de00db0aee82c0cf9e8b3f2027efd7f8a71f859d8f4be8e98ef/jiter-0.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5c1860627048e302a528333c9307c818c547f214d8659b0705d2195e1a94b274", size = 319855, upload-time = "2025-11-09T20:47:24.779Z" },
{ url = "https://files.pythonhosted.org/packages/20/26/894cd88e60b5d58af53bec5c6759d1292bd0b37a8b5f60f07abf7a63ae5f/jiter-0.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df37577a4f8408f7e0ec3205d2a8f87672af8f17008358063a4d6425b6081ce3", size = 350171, upload-time = "2025-11-09T20:47:26.469Z" },
{ url = "https://files.pythonhosted.org/packages/f5/27/a7b818b9979ac31b3763d25f3653ec3a954044d5e9f5d87f2f247d679fd1/jiter-0.12.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:75fdd787356c1c13a4f40b43c2156276ef7a71eb487d98472476476d803fb2cf", size = 365590, upload-time = "2025-11-09T20:47:27.918Z" },
{ url = "https://files.pythonhosted.org/packages/ba/7e/e46195801a97673a83746170b17984aa8ac4a455746354516d02ca5541b4/jiter-0.12.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1eb5db8d9c65b112aacf14fcd0faae9913d07a8afea5ed06ccdd12b724e966a1", size = 479462, upload-time = "2025-11-09T20:47:29.654Z" },
{ url = "https://files.pythonhosted.org/packages/ca/75/f833bfb009ab4bd11b1c9406d333e3b4357709ed0570bb48c7c06d78c7dd/jiter-0.12.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:73c568cc27c473f82480abc15d1301adf333a7ea4f2e813d6a2c7d8b6ba8d0df", size = 378983, upload-time = "2025-11-09T20:47:31.026Z" },
{ url = "https://files.pythonhosted.org/packages/71/b3/7a69d77943cc837d30165643db753471aff5df39692d598da880a6e51c24/jiter-0.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4321e8a3d868919bcb1abb1db550d41f2b5b326f72df29e53b2df8b006eb9403", size = 361328, upload-time = "2025-11-09T20:47:33.286Z" },
{ url = "https://files.pythonhosted.org/packages/b0/ac/a78f90caf48d65ba70d8c6efc6f23150bc39dc3389d65bbec2a95c7bc628/jiter-0.12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a51bad79f8cc9cac2b4b705039f814049142e0050f30d91695a2d9a6611f126", size = 386740, upload-time = "2025-11-09T20:47:34.703Z" },
{ url = "https://files.pythonhosted.org/packages/39/b6/5d31c2cc8e1b6a6bcf3c5721e4ca0a3633d1ab4754b09bc7084f6c4f5327/jiter-0.12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:2a67b678f6a5f1dd6c36d642d7db83e456bc8b104788262aaefc11a22339f5a9", size = 520875, upload-time = "2025-11-09T20:47:36.058Z" },
{ url = "https://files.pythonhosted.org/packages/30/b5/4df540fae4e9f68c54b8dab004bd8c943a752f0b00efd6e7d64aa3850339/jiter-0.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efe1a211fe1fd14762adea941e3cfd6c611a136e28da6c39272dbb7a1bbe6a86", size = 511457, upload-time = "2025-11-09T20:47:37.932Z" },
{ url = "https://files.pythonhosted.org/packages/07/65/86b74010e450a1a77b2c1aabb91d4a91dd3cd5afce99f34d75fd1ac64b19/jiter-0.12.0-cp312-cp312-win32.whl", hash = "sha256:d779d97c834b4278276ec703dc3fc1735fca50af63eb7262f05bdb4e62203d44", size = 204546, upload-time = "2025-11-09T20:47:40.47Z" },
{ url = "https://files.pythonhosted.org/packages/1c/c7/6659f537f9562d963488e3e55573498a442503ced01f7e169e96a6110383/jiter-0.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e8269062060212b373316fe69236096aaf4c49022d267c6736eebd66bbbc60bb", size = 205196, upload-time = "2025-11-09T20:47:41.794Z" },
{ url = "https://files.pythonhosted.org/packages/21/f4/935304f5169edadfec7f9c01eacbce4c90bb9a82035ac1de1f3bd2d40be6/jiter-0.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:06cb970936c65de926d648af0ed3d21857f026b1cf5525cb2947aa5e01e05789", size = 186100, upload-time = "2025-11-09T20:47:43.007Z" },
{ url = "https://files.pythonhosted.org/packages/3d/a6/97209693b177716e22576ee1161674d1d58029eb178e01866a0422b69224/jiter-0.12.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6cc49d5130a14b732e0612bc76ae8db3b49898732223ef8b7599aa8d9810683e", size = 313658, upload-time = "2025-11-09T20:47:44.424Z" },
{ url = "https://files.pythonhosted.org/packages/06/4d/125c5c1537c7d8ee73ad3d530a442d6c619714b95027143f1b61c0b4dfe0/jiter-0.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:37f27a32ce36364d2fa4f7fdc507279db604d27d239ea2e044c8f148410defe1", size = 318605, upload-time = "2025-11-09T20:47:45.973Z" },
{ url = "https://files.pythonhosted.org/packages/99/bf/a840b89847885064c41a5f52de6e312e91fa84a520848ee56c97e4fa0205/jiter-0.12.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbc0944aa3d4b4773e348cda635252824a78f4ba44328e042ef1ff3f6080d1cf", size = 349803, upload-time = "2025-11-09T20:47:47.535Z" },
{ url = "https://files.pythonhosted.org/packages/8a/88/e63441c28e0db50e305ae23e19c1d8fae012d78ed55365da392c1f34b09c/jiter-0.12.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da25c62d4ee1ffbacb97fac6dfe4dcd6759ebdc9015991e92a6eae5816287f44", size = 365120, upload-time = "2025-11-09T20:47:49.284Z" },
{ url = "https://files.pythonhosted.org/packages/0a/7c/49b02714af4343970eb8aca63396bc1c82fa01197dbb1e9b0d274b550d4e/jiter-0.12.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:048485c654b838140b007390b8182ba9774621103bd4d77c9c3f6f117474ba45", size = 479918, upload-time = "2025-11-09T20:47:50.807Z" },
{ url = "https://files.pythonhosted.org/packages/69/ba/0a809817fdd5a1db80490b9150645f3aae16afad166960bcd562be194f3b/jiter-0.12.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:635e737fbb7315bef0037c19b88b799143d2d7d3507e61a76751025226b3ac87", size = 379008, upload-time = "2025-11-09T20:47:52.211Z" },
{ url = "https://files.pythonhosted.org/packages/5f/c3/c9fc0232e736c8877d9e6d83d6eeb0ba4e90c6c073835cc2e8f73fdeef51/jiter-0.12.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e017c417b1ebda911bd13b1e40612704b1f5420e30695112efdbed8a4b389ed", size = 361785, upload-time = "2025-11-09T20:47:53.512Z" },
{ url = "https://files.pythonhosted.org/packages/96/61/61f69b7e442e97ca6cd53086ddc1cf59fb830549bc72c0a293713a60c525/jiter-0.12.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:89b0bfb8b2bf2351fba36bb211ef8bfceba73ef58e7f0c68fb67b5a2795ca2f9", size = 386108, upload-time = "2025-11-09T20:47:54.893Z" },
{ url = "https://files.pythonhosted.org/packages/e9/2e/76bb3332f28550c8f1eba3bf6e5efe211efda0ddbbaf24976bc7078d42a5/jiter-0.12.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:f5aa5427a629a824a543672778c9ce0c5e556550d1569bb6ea28a85015287626", size = 519937, upload-time = "2025-11-09T20:47:56.253Z" },
{ url = "https://files.pythonhosted.org/packages/84/d6/fa96efa87dc8bff2094fb947f51f66368fa56d8d4fc9e77b25d7fbb23375/jiter-0.12.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed53b3d6acbcb0fd0b90f20c7cb3b24c357fe82a3518934d4edfa8c6898e498c", size = 510853, upload-time = "2025-11-09T20:47:58.32Z" },
{ url = "https://files.pythonhosted.org/packages/8a/28/93f67fdb4d5904a708119a6ab58a8f1ec226ff10a94a282e0215402a8462/jiter-0.12.0-cp313-cp313-win32.whl", hash = "sha256:4747de73d6b8c78f2e253a2787930f4fffc68da7fa319739f57437f95963c4de", size = 204699, upload-time = "2025-11-09T20:47:59.686Z" },
{ url = "https://files.pythonhosted.org/packages/c4/1f/30b0eb087045a0abe2a5c9c0c0c8da110875a1d3be83afd4a9a4e548be3c/jiter-0.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:e25012eb0c456fcc13354255d0338cd5397cce26c77b2832b3c4e2e255ea5d9a", size = 204258, upload-time = "2025-11-09T20:48:01.01Z" },
{ url = "https://files.pythonhosted.org/packages/2c/f4/2b4daf99b96bce6fc47971890b14b2a36aef88d7beb9f057fafa032c6141/jiter-0.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:c97b92c54fe6110138c872add030a1f99aea2401ddcdaa21edf74705a646dd60", size = 185503, upload-time = "2025-11-09T20:48:02.35Z" },
{ url = "https://files.pythonhosted.org/packages/39/ca/67bb15a7061d6fe20b9b2a2fd783e296a1e0f93468252c093481a2f00efa/jiter-0.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:53839b35a38f56b8be26a7851a48b89bc47e5d88e900929df10ed93b95fea3d6", size = 317965, upload-time = "2025-11-09T20:48:03.783Z" },
{ url = "https://files.pythonhosted.org/packages/18/af/1788031cd22e29c3b14bc6ca80b16a39a0b10e611367ffd480c06a259831/jiter-0.12.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94f669548e55c91ab47fef8bddd9c954dab1938644e715ea49d7e117015110a4", size = 345831, upload-time = "2025-11-09T20:48:05.55Z" },
{ url = "https://files.pythonhosted.org/packages/05/17/710bf8472d1dff0d3caf4ced6031060091c1320f84ee7d5dcbed1f352417/jiter-0.12.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:351d54f2b09a41600ffea43d081522d792e81dcfb915f6d2d242744c1cc48beb", size = 361272, upload-time = "2025-11-09T20:48:06.951Z" },
{ url = "https://files.pythonhosted.org/packages/fb/f1/1dcc4618b59761fef92d10bcbb0b038b5160be653b003651566a185f1a5c/jiter-0.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2a5e90604620f94bf62264e7c2c038704d38217b7465b863896c6d7c902b06c7", size = 204604, upload-time = "2025-11-09T20:48:08.328Z" },
{ url = "https://files.pythonhosted.org/packages/d9/32/63cb1d9f1c5c6632a783c0052cde9ef7ba82688f7065e2f0d5f10a7e3edb/jiter-0.12.0-cp313-cp313t-win_arm64.whl", hash = "sha256:88ef757017e78d2860f96250f9393b7b577b06a956ad102c29c8237554380db3", size = 185628, upload-time = "2025-11-09T20:48:09.572Z" },
{ url = "https://files.pythonhosted.org/packages/a8/99/45c9f0dbe4a1416b2b9a8a6d1236459540f43d7fb8883cff769a8db0612d/jiter-0.12.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:c46d927acd09c67a9fb1416df45c5a04c27e83aae969267e98fba35b74e99525", size = 312478, upload-time = "2025-11-09T20:48:10.898Z" },
{ url = "https://files.pythonhosted.org/packages/4c/a7/54ae75613ba9e0f55fcb0bc5d1f807823b5167cc944e9333ff322e9f07dd/jiter-0.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:774ff60b27a84a85b27b88cd5583899c59940bcc126caca97eb2a9df6aa00c49", size = 318706, upload-time = "2025-11-09T20:48:12.266Z" },
{ url = "https://files.pythonhosted.org/packages/59/31/2aa241ad2c10774baf6c37f8b8e1f39c07db358f1329f4eb40eba179c2a2/jiter-0.12.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5433fab222fb072237df3f637d01b81f040a07dcac1cb4a5c75c7aa9ed0bef1", size = 351894, upload-time = "2025-11-09T20:48:13.673Z" },
{ url = "https://files.pythonhosted.org/packages/54/4f/0f2759522719133a9042781b18cc94e335b6d290f5e2d3e6899d6af933e3/jiter-0.12.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f8c593c6e71c07866ec6bfb790e202a833eeec885022296aff6b9e0b92d6a70e", size = 365714, upload-time = "2025-11-09T20:48:15.083Z" },
{ url = "https://files.pythonhosted.org/packages/dc/6f/806b895f476582c62a2f52c453151edd8a0fde5411b0497baaa41018e878/jiter-0.12.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:90d32894d4c6877a87ae00c6b915b609406819dce8bc0d4e962e4de2784e567e", size = 478989, upload-time = "2025-11-09T20:48:16.706Z" },
{ url = "https://files.pythonhosted.org/packages/86/6c/012d894dc6e1033acd8db2b8346add33e413ec1c7c002598915278a37f79/jiter-0.12.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:798e46eed9eb10c3adbbacbd3bdb5ecd4cf7064e453d00dbef08802dae6937ff", size = 378615, upload-time = "2025-11-09T20:48:18.614Z" },
{ url = "https://files.pythonhosted.org/packages/87/30/d718d599f6700163e28e2c71c0bbaf6dace692e7df2592fd793ac9276717/jiter-0.12.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3f1368f0a6719ea80013a4eb90ba72e75d7ea67cfc7846db2ca504f3df0169a", size = 364745, upload-time = "2025-11-09T20:48:20.117Z" },
{ url = "https://files.pythonhosted.org/packages/8f/85/315b45ce4b6ddc7d7fceca24068543b02bdc8782942f4ee49d652e2cc89f/jiter-0.12.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65f04a9d0b4406f7e51279710b27484af411896246200e461d80d3ba0caa901a", size = 386502, upload-time = "2025-11-09T20:48:21.543Z" },
{ url = "https://files.pythonhosted.org/packages/74/0b/ce0434fb40c5b24b368fe81b17074d2840748b4952256bab451b72290a49/jiter-0.12.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:fd990541982a24281d12b67a335e44f117e4c6cbad3c3b75c7dea68bf4ce3a67", size = 519845, upload-time = "2025-11-09T20:48:22.964Z" },
{ url = "https://files.pythonhosted.org/packages/e8/a3/7a7a4488ba052767846b9c916d208b3ed114e3eb670ee984e4c565b9cf0d/jiter-0.12.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:b111b0e9152fa7df870ecaebb0bd30240d9f7fff1f2003bcb4ed0f519941820b", size = 510701, upload-time = "2025-11-09T20:48:24.483Z" },
{ url = "https://files.pythonhosted.org/packages/c3/16/052ffbf9d0467b70af24e30f91e0579e13ded0c17bb4a8eb2aed3cb60131/jiter-0.12.0-cp314-cp314-win32.whl", hash = "sha256:a78befb9cc0a45b5a5a0d537b06f8544c2ebb60d19d02c41ff15da28a9e22d42", size = 205029, upload-time = "2025-11-09T20:48:25.749Z" },
{ url = "https://files.pythonhosted.org/packages/e4/18/3cf1f3f0ccc789f76b9a754bdb7a6977e5d1d671ee97a9e14f7eb728d80e/jiter-0.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:e1fe01c082f6aafbe5c8faf0ff074f38dfb911d53f07ec333ca03f8f6226debf", size = 204960, upload-time = "2025-11-09T20:48:27.415Z" },
{ url = "https://files.pythonhosted.org/packages/02/68/736821e52ecfdeeb0f024b8ab01b5a229f6b9293bbdb444c27efade50b0f/jiter-0.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:d72f3b5a432a4c546ea4bedc84cce0c3404874f1d1676260b9c7f048a9855451", size = 185529, upload-time = "2025-11-09T20:48:29.125Z" },
{ url = "https://files.pythonhosted.org/packages/30/61/12ed8ee7a643cce29ac97c2281f9ce3956eb76b037e88d290f4ed0d41480/jiter-0.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e6ded41aeba3603f9728ed2b6196e4df875348ab97b28fc8afff115ed42ba7a7", size = 318974, upload-time = "2025-11-09T20:48:30.87Z" },
{ url = "https://files.pythonhosted.org/packages/2d/c6/f3041ede6d0ed5e0e79ff0de4c8f14f401bbf196f2ef3971cdbe5fd08d1d/jiter-0.12.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a947920902420a6ada6ad51892082521978e9dd44a802663b001436e4b771684", size = 345932, upload-time = "2025-11-09T20:48:32.658Z" },
{ url = "https://files.pythonhosted.org/packages/d5/5d/4d94835889edd01ad0e2dbfc05f7bdfaed46292e7b504a6ac7839aa00edb/jiter-0.12.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:add5e227e0554d3a52cf390a7635edaffdf4f8fce4fdbcef3cc2055bb396a30c", size = 367243, upload-time = "2025-11-09T20:48:34.093Z" },
{ url = "https://files.pythonhosted.org/packages/fd/76/0051b0ac2816253a99d27baf3dda198663aff882fa6ea7deeb94046da24e/jiter-0.12.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f9b1cda8fcb736250d7e8711d4580ebf004a46771432be0ae4796944b5dfa5d", size = 479315, upload-time = "2025-11-09T20:48:35.507Z" },
{ url = "https://files.pythonhosted.org/packages/70/ae/83f793acd68e5cb24e483f44f482a1a15601848b9b6f199dacb970098f77/jiter-0.12.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb12a2223fe0135c7ff1356a143d57f95bbf1f4a66584f1fc74df21d86b993", size = 380714, upload-time = "2025-11-09T20:48:40.014Z" },
{ url = "https://files.pythonhosted.org/packages/b1/5e/4808a88338ad2c228b1126b93fcd8ba145e919e886fe910d578230dabe3b/jiter-0.12.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c596cc0f4cb574877550ce4ecd51f8037469146addd676d7c1a30ebe6391923f", size = 365168, upload-time = "2025-11-09T20:48:41.462Z" },
{ url = "https://files.pythonhosted.org/packages/0c/d4/04619a9e8095b42aef436b5aeb4c0282b4ff1b27d1db1508df9f5dc82750/jiter-0.12.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ab4c823b216a4aeab3fdbf579c5843165756bd9ad87cc6b1c65919c4715f783", size = 387893, upload-time = "2025-11-09T20:48:42.921Z" },
{ url = "https://files.pythonhosted.org/packages/17/ea/d3c7e62e4546fdc39197fa4a4315a563a89b95b6d54c0d25373842a59cbe/jiter-0.12.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e427eee51149edf962203ff8db75a7514ab89be5cb623fb9cea1f20b54f1107b", size = 520828, upload-time = "2025-11-09T20:48:44.278Z" },
{ url = "https://files.pythonhosted.org/packages/cc/0b/c6d3562a03fd767e31cb119d9041ea7958c3c80cb3d753eafb19b3b18349/jiter-0.12.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:edb868841f84c111255ba5e80339d386d937ec1fdce419518ce1bd9370fac5b6", size = 511009, upload-time = "2025-11-09T20:48:45.726Z" },
{ url = "https://files.pythonhosted.org/packages/aa/51/2cb4468b3448a8385ebcd15059d325c9ce67df4e2758d133ab9442b19834/jiter-0.12.0-cp314-cp314t-win32.whl", hash = "sha256:8bbcfe2791dfdb7c5e48baf646d37a6a3dcb5a97a032017741dea9f817dca183", size = 205110, upload-time = "2025-11-09T20:48:47.033Z" },
{ url = "https://files.pythonhosted.org/packages/b2/c5/ae5ec83dec9c2d1af805fd5fe8f74ebded9c8670c5210ec7820ce0dbeb1e/jiter-0.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2fa940963bf02e1d8226027ef461e36af472dea85d36054ff835aeed944dd873", size = 205223, upload-time = "2025-11-09T20:48:49.076Z" },
{ url = "https://files.pythonhosted.org/packages/97/9a/3c5391907277f0e55195550cf3fa8e293ae9ee0c00fb402fec1e38c0c82f/jiter-0.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:506c9708dd29b27288f9f8f1140c3cb0e3d8ddb045956d7757b1fa0e0f39a473", size = 185564, upload-time = "2025-11-09T20:48:50.376Z" },
{ url = "https://files.pythonhosted.org/packages/fe/54/5339ef1ecaa881c6948669956567a64d2670941925f245c434f494ffb0e5/jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:4739a4657179ebf08f85914ce50332495811004cc1747852e8b2041ed2aab9b8", size = 311144, upload-time = "2025-11-09T20:49:10.503Z" },
{ url = "https://files.pythonhosted.org/packages/27/74/3446c652bffbd5e81ab354e388b1b5fc1d20daac34ee0ed11ff096b1b01a/jiter-0.12.0-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:41da8def934bf7bec16cb24bd33c0ca62126d2d45d81d17b864bd5ad721393c3", size = 305877, upload-time = "2025-11-09T20:49:12.269Z" },
{ url = "https://files.pythonhosted.org/packages/a1/f4/ed76ef9043450f57aac2d4fbeb27175aa0eb9c38f833be6ef6379b3b9a86/jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c44ee814f499c082e69872d426b624987dbc5943ab06e9bbaa4f81989fdb79e", size = 340419, upload-time = "2025-11-09T20:49:13.803Z" },
{ url = "https://files.pythonhosted.org/packages/21/01/857d4608f5edb0664aa791a3d45702e1a5bcfff9934da74035e7b9803846/jiter-0.12.0-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd2097de91cf03eaa27b3cbdb969addf83f0179c6afc41bbc4513705e013c65d", size = 347212, upload-time = "2025-11-09T20:49:15.643Z" },
{ url = "https://files.pythonhosted.org/packages/cb/f5/12efb8ada5f5c9edc1d4555fe383c1fb2eac05ac5859258a72d61981d999/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:e8547883d7b96ef2e5fe22b88f8a4c8725a56e7f4abafff20fd5272d634c7ecb", size = 309974, upload-time = "2025-11-09T20:49:17.187Z" },
{ url = "https://files.pythonhosted.org/packages/85/15/d6eb3b770f6a0d332675141ab3962fd4a7c270ede3515d9f3583e1d28276/jiter-0.12.0-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:89163163c0934854a668ed783a2546a0617f71706a2551a4a0666d91ab365d6b", size = 304233, upload-time = "2025-11-09T20:49:18.734Z" },
{ url = "https://files.pythonhosted.org/packages/8c/3e/e7e06743294eea2cf02ced6aa0ff2ad237367394e37a0e2b4a1108c67a36/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d96b264ab7d34bbb2312dedc47ce07cd53f06835eacbc16dde3761f47c3a9e7f", size = 338537, upload-time = "2025-11-09T20:49:20.317Z" },
{ url = "https://files.pythonhosted.org/packages/2f/9c/6753e6522b8d0ef07d3a3d239426669e984fb0eba15a315cdbc1253904e4/jiter-0.12.0-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c24e864cb30ab82311c6425655b0cdab0a98c5d973b065c66a3f020740c2324c", size = 346110, upload-time = "2025-11-09T20:49:21.817Z" },
]
[[package]]
name = "openai"
version = "2.13.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "distro" },
{ name = "httpx" },
{ name = "jiter" },
{ name = "pydantic" },
{ name = "sniffio" },
{ name = "tqdm" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/0f/39/8e347e9fda125324d253084bb1b82407e5e3c7777a03dc398f79b2d95626/openai-2.13.0.tar.gz", hash = "sha256:9ff633b07a19469ec476b1e2b5b26c5ef700886524a7a72f65e6f0b5203142d5", size = 626583, upload-time = "2025-12-16T18:19:44.387Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/bb/d5/eb52edff49d3d5ea116e225538c118699ddeb7c29fa17ec28af14bc10033/openai-2.13.0-py3-none-any.whl", hash = "sha256:746521065fed68df2f9c2d85613bb50844343ea81f60009b60e6a600c9352c79", size = 1066837, upload-time = "2025-12-16T18:19:43.124Z" },
]
[[package]]
name = "pydantic"
version = "2.12.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "annotated-types" },
{ name = "pydantic-core" },
{ name = "typing-extensions" },
{ name = "typing-inspection" },
]
sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" },
]
[[package]]
name = "pydantic-core"
version = "2.41.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c6/90/32c9941e728d564b411d574d8ee0cf09b12ec978cb22b294995bae5549a5/pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146", size = 2107298, upload-time = "2025-11-04T13:39:04.116Z" },
{ url = "https://files.pythonhosted.org/packages/fb/a8/61c96a77fe28993d9a6fb0f4127e05430a267b235a124545d79fea46dd65/pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2", size = 1901475, upload-time = "2025-11-04T13:39:06.055Z" },
{ url = "https://files.pythonhosted.org/packages/5d/b6/338abf60225acc18cdc08b4faef592d0310923d19a87fba1faf05af5346e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97", size = 1918815, upload-time = "2025-11-04T13:39:10.41Z" },
{ url = "https://files.pythonhosted.org/packages/d1/1c/2ed0433e682983d8e8cba9c8d8ef274d4791ec6a6f24c58935b90e780e0a/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9", size = 2065567, upload-time = "2025-11-04T13:39:12.244Z" },
{ url = "https://files.pythonhosted.org/packages/b3/24/cf84974ee7d6eae06b9e63289b7b8f6549d416b5c199ca2d7ce13bbcf619/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52", size = 2230442, upload-time = "2025-11-04T13:39:13.962Z" },
{ url = "https://files.pythonhosted.org/packages/fd/21/4e287865504b3edc0136c89c9c09431be326168b1eb7841911cbc877a995/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941", size = 2350956, upload-time = "2025-11-04T13:39:15.889Z" },
{ url = "https://files.pythonhosted.org/packages/a8/76/7727ef2ffa4b62fcab916686a68a0426b9b790139720e1934e8ba797e238/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a", size = 2068253, upload-time = "2025-11-04T13:39:17.403Z" },
{ url = "https://files.pythonhosted.org/packages/d5/8c/a4abfc79604bcb4c748e18975c44f94f756f08fb04218d5cb87eb0d3a63e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c", size = 2177050, upload-time = "2025-11-04T13:39:19.351Z" },
{ url = "https://files.pythonhosted.org/packages/67/b1/de2e9a9a79b480f9cb0b6e8b6ba4c50b18d4e89852426364c66aa82bb7b3/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2", size = 2147178, upload-time = "2025-11-04T13:39:21Z" },
{ url = "https://files.pythonhosted.org/packages/16/c1/dfb33f837a47b20417500efaa0378adc6635b3c79e8369ff7a03c494b4ac/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556", size = 2341833, upload-time = "2025-11-04T13:39:22.606Z" },
{ url = "https://files.pythonhosted.org/packages/47/36/00f398642a0f4b815a9a558c4f1dca1b4020a7d49562807d7bc9ff279a6c/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49", size = 2321156, upload-time = "2025-11-04T13:39:25.843Z" },
{ url = "https://files.pythonhosted.org/packages/7e/70/cad3acd89fde2010807354d978725ae111ddf6d0ea46d1ea1775b5c1bd0c/pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba", size = 1989378, upload-time = "2025-11-04T13:39:27.92Z" },
{ url = "https://files.pythonhosted.org/packages/76/92/d338652464c6c367e5608e4488201702cd1cbb0f33f7b6a85a60fe5f3720/pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9", size = 2013622, upload-time = "2025-11-04T13:39:29.848Z" },
{ url = "https://files.pythonhosted.org/packages/e8/72/74a989dd9f2084b3d9530b0915fdda64ac48831c30dbf7c72a41a5232db8/pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6", size = 2105873, upload-time = "2025-11-04T13:39:31.373Z" },
{ url = "https://files.pythonhosted.org/packages/12/44/37e403fd9455708b3b942949e1d7febc02167662bf1a7da5b78ee1ea2842/pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b", size = 1899826, upload-time = "2025-11-04T13:39:32.897Z" },
{ url = "https://files.pythonhosted.org/packages/33/7f/1d5cab3ccf44c1935a359d51a8a2a9e1a654b744b5e7f80d41b88d501eec/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a", size = 1917869, upload-time = "2025-11-04T13:39:34.469Z" },
{ url = "https://files.pythonhosted.org/packages/6e/6a/30d94a9674a7fe4f4744052ed6c5e083424510be1e93da5bc47569d11810/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8", size = 2063890, upload-time = "2025-11-04T13:39:36.053Z" },
{ url = "https://files.pythonhosted.org/packages/50/be/76e5d46203fcb2750e542f32e6c371ffa9b8ad17364cf94bb0818dbfb50c/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e", size = 2229740, upload-time = "2025-11-04T13:39:37.753Z" },
{ url = "https://files.pythonhosted.org/packages/d3/ee/fed784df0144793489f87db310a6bbf8118d7b630ed07aa180d6067e653a/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1", size = 2350021, upload-time = "2025-11-04T13:39:40.94Z" },
{ url = "https://files.pythonhosted.org/packages/c8/be/8fed28dd0a180dca19e72c233cbf58efa36df055e5b9d90d64fd1740b828/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b", size = 2066378, upload-time = "2025-11-04T13:39:42.523Z" },
{ url = "https://files.pythonhosted.org/packages/b0/3b/698cf8ae1d536a010e05121b4958b1257f0b5522085e335360e53a6b1c8b/pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b", size = 2175761, upload-time = "2025-11-04T13:39:44.553Z" },
{ url = "https://files.pythonhosted.org/packages/b8/ba/15d537423939553116dea94ce02f9c31be0fa9d0b806d427e0308ec17145/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284", size = 2146303, upload-time = "2025-11-04T13:39:46.238Z" },
{ url = "https://files.pythonhosted.org/packages/58/7f/0de669bf37d206723795f9c90c82966726a2ab06c336deba4735b55af431/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594", size = 2340355, upload-time = "2025-11-04T13:39:48.002Z" },
{ url = "https://files.pythonhosted.org/packages/e5/de/e7482c435b83d7e3c3ee5ee4451f6e8973cff0eb6007d2872ce6383f6398/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e", size = 2319875, upload-time = "2025-11-04T13:39:49.705Z" },
{ url = "https://files.pythonhosted.org/packages/fe/e6/8c9e81bb6dd7560e33b9053351c29f30c8194b72f2d6932888581f503482/pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b", size = 1987549, upload-time = "2025-11-04T13:39:51.842Z" },
{ url = "https://files.pythonhosted.org/packages/11/66/f14d1d978ea94d1bc21fc98fcf570f9542fe55bfcc40269d4e1a21c19bf7/pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe", size = 2011305, upload-time = "2025-11-04T13:39:53.485Z" },
{ url = "https://files.pythonhosted.org/packages/56/d8/0e271434e8efd03186c5386671328154ee349ff0354d83c74f5caaf096ed/pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f", size = 1972902, upload-time = "2025-11-04T13:39:56.488Z" },
{ url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" },
{ url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" },
{ url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" },
{ url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" },
{ url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" },
{ url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" },
{ url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" },
{ url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" },
{ url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" },
{ url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" },
{ url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" },
{ url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" },
{ url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" },
{ url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" },
{ url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" },
{ url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" },
{ url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" },
{ url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" },
{ url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" },
{ url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" },
{ url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" },
{ url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" },
{ url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" },
{ url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" },
{ url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" },
{ url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" },
{ url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" },
{ url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" },
{ url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" },
{ url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" },
{ url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" },
{ url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" },
{ url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" },
{ url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" },
{ url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" },
{ url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" },
{ url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" },
{ url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" },
{ url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" },
{ url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" },
{ url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" },
{ url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" },
{ url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" },
{ url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" },
{ url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" },
{ url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" },
{ url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" },
{ url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" },
{ url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" },
{ url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" },
{ url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" },
{ url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" },
{ url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" },
{ url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" },
{ url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" },
{ url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" },
{ url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" },
{ url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" },
{ url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" },
{ url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905, upload-time = "2025-11-04T13:42:47.156Z" },
{ url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" },
{ url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" },
{ url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" },
{ url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" },
{ url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351, upload-time = "2025-11-04T13:43:02.058Z" },
{ url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363, upload-time = "2025-11-04T13:43:05.159Z" },
{ url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615, upload-time = "2025-11-04T13:43:08.116Z" },
{ url = "https://files.pythonhosted.org/packages/99/0a/99a53d06dd0348b2008f2f30884b34719c323f16c3be4e6cc1203b74a91d/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2", size = 2175369, upload-time = "2025-11-04T13:43:12.49Z" },
{ url = "https://files.pythonhosted.org/packages/6d/94/30ca3b73c6d485b9bb0bc66e611cff4a7138ff9736b7e66bcf0852151636/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093", size = 2144218, upload-time = "2025-11-04T13:43:15.431Z" },
{ url = "https://files.pythonhosted.org/packages/87/57/31b4f8e12680b739a91f472b5671294236b82586889ef764b5fbc6669238/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a", size = 2329951, upload-time = "2025-11-04T13:43:18.062Z" },
{ url = "https://files.pythonhosted.org/packages/7d/73/3c2c8edef77b8f7310e6fb012dbc4b8551386ed575b9eb6fb2506e28a7eb/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963", size = 2318428, upload-time = "2025-11-04T13:43:20.679Z" },
{ url = "https://files.pythonhosted.org/packages/2f/02/8559b1f26ee0d502c74f9cca5c0d2fd97e967e083e006bbbb4e97f3a043a/pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a", size = 2147009, upload-time = "2025-11-04T13:43:23.286Z" },
{ url = "https://files.pythonhosted.org/packages/5f/9b/1b3f0e9f9305839d7e84912f9e8bfbd191ed1b1ef48083609f0dabde978c/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26", size = 2101980, upload-time = "2025-11-04T13:43:25.97Z" },
{ url = "https://files.pythonhosted.org/packages/a4/ed/d71fefcb4263df0da6a85b5d8a7508360f2f2e9b3bf5814be9c8bccdccc1/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808", size = 1923865, upload-time = "2025-11-04T13:43:28.763Z" },
{ url = "https://files.pythonhosted.org/packages/ce/3a/626b38db460d675f873e4444b4bb030453bbe7b4ba55df821d026a0493c4/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc", size = 2134256, upload-time = "2025-11-04T13:43:31.71Z" },
{ url = "https://files.pythonhosted.org/packages/83/d9/8412d7f06f616bbc053d30cb4e5f76786af3221462ad5eee1f202021eb4e/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1", size = 2174762, upload-time = "2025-11-04T13:43:34.744Z" },
{ url = "https://files.pythonhosted.org/packages/55/4c/162d906b8e3ba3a99354e20faa1b49a85206c47de97a639510a0e673f5da/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84", size = 2143141, upload-time = "2025-11-04T13:43:37.701Z" },
{ url = "https://files.pythonhosted.org/packages/1f/f2/f11dd73284122713f5f89fc940f370d035fa8e1e078d446b3313955157fe/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770", size = 2330317, upload-time = "2025-11-04T13:43:40.406Z" },
{ url = "https://files.pythonhosted.org/packages/88/9d/b06ca6acfe4abb296110fb1273a4d848a0bfb2ff65f3ee92127b3244e16b/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f", size = 2316992, upload-time = "2025-11-04T13:43:43.602Z" },
{ url = "https://files.pythonhosted.org/packages/36/c7/cfc8e811f061c841d7990b0201912c3556bfeb99cdcb7ed24adc8d6f8704/pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51", size = 2145302, upload-time = "2025-11-04T13:43:46.64Z" },
]
[[package]]
name = "sniffio"
version = "1.3.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" },
]
[[package]]
name = "starlette"
version = "0.50.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "typing-extensions", marker = "python_full_version < '3.13'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/ba/b8/73a0e6a6e079a9d9cfa64113d771e421640b6f679a52eeb9b32f72d871a1/starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca", size = 2646985, upload-time = "2025-11-01T15:25:27.516Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d9/52/1064f510b141bd54025f9b55105e26d1fa970b9be67ad766380a3c9b74b0/starlette-0.50.0-py3-none-any.whl", hash = "sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca", size = 74033, upload-time = "2025-11-01T15:25:25.461Z" },
]
[[package]]
name = "tqdm"
version = "4.67.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" },
]
[[package]]
name = "travel-agents"
version = "0.1.0"
source = { editable = "." }
dependencies = [
{ name = "click" },
{ name = "fastapi" },
{ name = "httpx" },
{ name = "openai" },
{ name = "pydantic" },
{ name = "uvicorn" },
]
[package.metadata]
requires-dist = [
{ name = "click", specifier = ">=8.2.1" },
{ name = "fastapi", specifier = ">=0.104.1" },
{ name = "httpx", specifier = ">=0.24.0" },
{ name = "openai", specifier = ">=2.13.0" },
{ name = "pydantic", specifier = ">=2.11.7" },
{ name = "uvicorn", specifier = ">=0.24.0" },
]
[[package]]
name = "typing-extensions"
version = "4.15.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
]
[[package]]
name = "typing-inspection"
version = "0.4.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" },
]
[[package]]
name = "uvicorn"
version = "0.38.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "click" },
{ name = "h11" },
{ name = "typing-extensions", marker = "python_full_version < '3.11'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/cb/ce/f06b84e2697fef4688ca63bdb2fdf113ca0a3be33f94488f2cadb690b0cf/uvicorn-0.38.0.tar.gz", hash = "sha256:fd97093bdd120a2609fc0d3afe931d4d4ad688b6e75f0f929fde1bc36fe0e91d", size = 80605, upload-time = "2025-10-18T13:46:44.63Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/ee/d9/d88e73ca598f4f6ff671fb5fde8a32925c2e08a637303a1d12883c7305fa/uvicorn-0.38.0-py3-none-any.whl", hash = "sha256:48c0afd214ceb59340075b4a052ea1ee91c16fbc2a9b1469cca0e54566977b02", size = 68109, upload-time = "2025-10-18T13:46:42.958Z" },
]

View file

@ -64,6 +64,9 @@ model_providers:
- model: Arch-Function
name: arch-function
provider_interface: arch
- model: Plano-Orchestrator
name: plano-orchestrator
provider_interface: arch
overrides:
prompt_target_intent_matching_threshold: 0.6
prompt_guards: