diff --git a/arch/Dockerfile b/arch/Dockerfile index ef36ac73..8282d788 100644 --- a/arch/Dockerfile +++ b/arch/Dockerfile @@ -32,7 +32,7 @@ COPY --from=envoy /usr/local/bin/envoy /usr/local/bin/envoy WORKDIR /app COPY arch/requirements.txt . RUN pip install -r requirements.txt -COPY arch/tools/cli/config_generator.py . +COPY arch/tools . COPY arch/envoy.template.yaml . COPY arch/arch_config_schema.yaml . COPY arch/supervisord.conf /etc/supervisor/conf.d/supervisord.conf diff --git a/arch/arch_config_schema.yaml b/arch/arch_config_schema.yaml index 1186e9c1..f481b389 100644 --- a/arch/arch_config_schema.yaml +++ b/arch/arch_config_schema.yaml @@ -8,46 +8,76 @@ properties: - v0.1.0 - 0.1-beta - 0.2.0 + - v0.3.0 + + agents: + type: array + items: + type: object listeners: - type: object - additionalProperties: false - properties: - ingress_traffic: - type: object - properties: - address: - type: string - port: - type: integer - message_format: - type: string - enum: - - openai - timeout: - type: string + oneOf: + - type: array additionalProperties: false - egress_traffic: - type: object - properties: - address: - type: string - port: - type: integer - message_format: - type: string - enum: - - openai - timeout: - type: string + items: + type: object + properties: + name: + type: string + port: + type: integer + address: + type: string + timeout: + type: string + type: + type: string + enum: + - model + - prompt + - agent + required: + - type + - name + - type: object # deprecated legacy format, use list format instead additionalProperties: false + properties: + ingress_traffic: + type: object + properties: + address: + type: string + port: + type: integer + message_format: + type: string + enum: + - openai + timeout: + type: string + additionalProperties: false + egress_traffic: + type: object + properties: + address: + type: string + port: + type: integer + message_format: + type: string + enum: + - openai + timeout: + type: string + additionalProperties: false endpoints: type: object patternProperties: - "^.*$": + "^[a-zA-Z][a-zA-Z0-9_]*$": type: object properties: endpoint: type: string + pattern: "^.*$" connect_timeout: type: string protocol: @@ -60,7 +90,52 @@ properties: additionalProperties: false required: - endpoint - llm_providers: + + model_providers: + type: array + items: + type: object + properties: + name: + type: string + access_key: + type: string + model: + type: string + default: + type: boolean + base_url: + type: string + http_host: + type: string + provider_interface: + type: string + enum: + - arch + - claude + - deepseek + - groq + - mistral + - openai + - gemini + routing_preferences: + type: array + items: + type: object + properties: + name: + type: string + description: + type: string + additionalProperties: false + required: + - name + - description + additionalProperties: false + required: + - model + + llm_providers: # deprecated for legacy support, use model_providers instead type: array items: type: object @@ -282,4 +357,4 @@ properties: additionalProperties: false required: - version - - llm_providers + - listeners diff --git a/arch/envoy.template.yaml b/arch/envoy.template.yaml index 69cac717..1e60bfd5 100644 --- a/arch/envoy.template.yaml +++ b/arch/envoy.template.yaml @@ -128,7 +128,7 @@ static_resources: domains: - "*" routes: - {% for provider in arch_llm_providers %} + {% for provider in arch_model_providers %} # if endpoint is set then use custom cluster for upstream llm {% if provider.endpoint %} {% set llm_cluster_name = provider.cluster_name %} @@ -218,7 +218,7 @@ static_resources: typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router - - name: egress_api_traffic + - name: outbound_api_traffic address: socket_address: address: 0.0.0.0 @@ -240,11 +240,11 @@ static_resources: envoy_grpc: cluster_name: opentelemetry_collector timeout: 0.250s - service_name: egress_api_traffic + service_name: outbound_api_traffic random_sampling: value: {{ arch_tracing.random_sampling }} {% endif %} - stat_prefix: egress_api_traffic + stat_prefix: outbound_api_traffic codec_type: AUTO scheme_header_transformation: scheme_to_overwrite: https @@ -292,6 +292,108 @@ static_resources: typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + {% for listener in listeners %} + + {% if listener.agents %} + + - name: {{ listener.name | replace(" ", "_") }} + address: + socket_address: + address: 0.0.0.0 + port_value: {{ listener.port }} + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + {% if "random_sampling" in arch_tracing and arch_tracing["random_sampling"] > 0 %} + generate_request_id: true + tracing: + provider: + name: envoy.tracers.opentelemetry + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v3.OpenTelemetryConfig + grpc_service: + envoy_grpc: + cluster_name: opentelemetry_collector + timeout: 0.250s + service_name: arch_gateway + random_sampling: + value: {{ arch_tracing.random_sampling }} + {% endif %} + stat_prefix: {{ listener.name | replace(" ", "_") }}_traffic + codec_type: AUTO + scheme_header_transformation: + scheme_to_overwrite: https + access_log: + - name: envoy.access_loggers.file + typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + path: "/var/log/access_llm.log" + route_config: + name: local_routes + request_headers_to_add: + - header: + key: "x-arch-agent-listener-name" + value: "{{ listener.name }}" + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/healthz" + direct_response: + status: 200 + - match: + prefix: "/" + route: + auto_host_rewrite: true + prefix_rewrite: "/agents/" + cluster: bright_staff + timeout: {{ llm_gateway_listener.timeout }} + http_filters: + - name: envoy.filters.http.compressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + compressor_library: + name: envoy.compression.brotli.compressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.brotli.compressor.v3.Brotli + - name: envoy.filters.http.compressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + compressor_library: + name: compress + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + memory_level: 3 + window_bits: 10 + - name: envoy.filters.http.decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor + decompressor_library: + name: decompress + typed_config: + "@type": "type.googleapis.com/envoy.extensions.compression.gzip.decompressor.v3.Gzip" + window_bits: 9 + chunk_size: 8192 + # If this ratio is set too low, then body data will not be decompressed completely. + max_inflate_ratio: 1000 + - name: envoy.filters.http.decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.decompressor.v3.Decompressor + decompressor_library: + name: envoy.compression.brotli.decompressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.brotli.decompressor.v3.Brotli + - name: envoy.filters.http.router + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + + {% endif %} + {% endfor %} + - name: egress_traffic address: socket_address: @@ -428,7 +530,7 @@ static_resources: domains: - "*" routes: - {% for provider in arch_llm_providers %} + {% for provider in arch_model_providers %} # if endpoint is set then use custom cluster for upstream llm {% if provider.endpoint %} {% set llm_cluster_name = provider.cluster_name %} @@ -796,7 +898,7 @@ static_resources: address: socket_address: address: host.docker.internal - port_value: $MODEL_SERVER_PORT + port_value: 51000 hostname: {{ internal_cluster }} {% endfor %} - name: mistral_7b_instruct diff --git a/arch/supervisord.conf b/arch/supervisord.conf index d4d99494..0c2e8318 100644 --- a/arch/supervisord.conf +++ b/arch/supervisord.conf @@ -9,7 +9,7 @@ stdout_logfile_maxbytes=0 stderr_logfile_maxbytes=0 [program:envoy] -command=/bin/sh -c "python /app/config_generator.py && envsubst < /etc/envoy/envoy.yaml > /etc/envoy.env_sub.yaml && envoy -c /etc/envoy.env_sub.yaml --component-log-level wasm:info --log-format '[%%Y-%%m-%%d %%T.%%e][%%l] %%v' 2>&1 | tee /var/log/envoy.log | while IFS= read -r line; do echo '[envoy_logs] ' \"$line\"; done" +command=/bin/sh -c "python -m cli.config_generator && envsubst < /etc/envoy/envoy.yaml > /etc/envoy.env_sub.yaml && envoy -c /etc/envoy.env_sub.yaml --component-log-level wasm:info --log-format '[%%Y-%%m-%%d %%T.%%e][%%l] %%v' 2>&1 | tee /var/log/envoy.log | while IFS= read -r line; do echo '[envoy_logs] ' \"$line\"; done" stdout_logfile=/dev/stdout redirect_stderr=true stdout_logfile_maxbytes=0 diff --git a/arch/tools/cli/config_generator.py b/arch/tools/cli/config_generator.py index 3e8e02ea..c8479278 100644 --- a/arch/tools/cli/config_generator.py +++ b/arch/tools/cli/config_generator.py @@ -1,9 +1,11 @@ import json import os +from cli.utils import convert_legacy_listeners from jinja2 import Environment, FileSystemLoader import yaml from jsonschema import validate from urllib.parse import urlparse +from copy import deepcopy SUPPORTED_PROVIDERS = [ @@ -72,17 +74,58 @@ def validate_and_render_schema(): _ = yaml.safe_load(arch_config_schema) inferred_clusters = {} + # Convert legacy llm_providers to model_providers + if "llm_providers" in config_yaml: + if "model_providers" in config_yaml: + raise Exception( + "Please provide either llm_providers or model_providers, not both. llm_providers is deprecated, please use model_providers instead" + ) + config_yaml["model_providers"] = config_yaml["llm_providers"] + del config_yaml["llm_providers"] + + listeners, llm_gateway, prompt_gateway = convert_legacy_listeners( + config_yaml.get("listeners"), config_yaml.get("model_providers") + ) + + config_yaml["listeners"] = listeners + endpoints = config_yaml.get("endpoints", {}) + # Process agents section and convert to endpoints + agents = config_yaml.get("agents", []) + for agent in agents: + agent_id = agent.get("id") + agent_endpoint = agent.get("url") + + if agent_id and agent_endpoint: + urlparse_result = urlparse(agent_endpoint) + if urlparse_result.scheme and urlparse_result.hostname: + protocol = urlparse_result.scheme + + port = urlparse_result.port + if port is None: + if protocol == "http": + port = 80 + else: + port = 443 + + endpoints[agent_id] = { + "endpoint": urlparse_result.hostname, + "port": port, + "protocol": protocol, + } + # override the inferred clusters with the ones defined in the config for name, endpoint_details in endpoints.items(): inferred_clusters[name] = endpoint_details - endpoint = inferred_clusters[name]["endpoint"] - protocol = inferred_clusters[name].get("protocol", "http") - ( - inferred_clusters[name]["endpoint"], - inferred_clusters[name]["port"], - ) = get_endpoint_and_port(endpoint, protocol) + # Only call get_endpoint_and_port for manually defined endpoints, not agent-derived ones + if "port" not in endpoint_details: + endpoint = inferred_clusters[name]["endpoint"] + protocol = inferred_clusters[name].get("protocol", "http") + ( + inferred_clusters[name]["endpoint"], + inferred_clusters[name]["port"], + ) = get_endpoint_and_port(endpoint, protocol) print("defined clusters from arch_config.yaml: ", json.dumps(inferred_clusters)) @@ -99,114 +142,148 @@ def validate_and_render_schema(): arch_tracing = config_yaml.get("tracing", {}) llms_with_endpoint = [] - updated_llm_providers = [] - - llm_provider_name_set = set() + updated_model_providers = [] + model_provider_name_set = set() + llms_with_usage = [] model_name_keys = set() model_usage_name_keys = set() - for llm_provider in config_yaml["llm_providers"]: - if llm_provider.get("name") in llm_provider_name_set: - raise Exception( - f"Duplicate llm_provider name {llm_provider.get('name')}, please provide unique name for each llm_provider" - ) - model_name = llm_provider.get("model") - if model_name in model_name_keys: - raise Exception( - f"Duplicate model name {model_name}, please provide unique model name for each llm_provider" - ) + print("listeners: ", listeners) - model_name_keys.add(model_name) - if llm_provider.get("name") is None: - llm_provider["name"] = model_name - - llm_provider_name_set.add(llm_provider.get("name")) - - model_name_tokens = model_name.split("/") - if len(model_name_tokens) < 2: - raise Exception( - f"Invalid model name {model_name}. Please provide model name in the format /." - ) - provider = model_name_tokens[0] - # Validate azure_openai and ollama provider requires base_url + for listener in listeners: if ( - provider == "azure_openai" or provider == "ollama" or provider == "qwen" - ) and llm_provider.get("base_url") is None: - raise Exception( - f"Provider '{provider}' requires 'base_url' to be set for model {model_name}" - ) + listener.get("model_providers") is None + or listener.get("model_providers") == [] + ): + continue + print("Processing listener with model_providers: ", listener) + name = listener.get("name", None) - model_id = "/".join(model_name_tokens[1:]) - if provider not in SUPPORTED_PROVIDERS: + for model_provider in listener.get("model_providers", []): + if model_provider.get("usage", None): + llms_with_usage.append(model_provider["name"]) + if model_provider.get("name") in model_provider_name_set: + raise Exception( + f"Duplicate model_provider name {model_provider.get('name')}, please provide unique name for each model_provider" + ) + + model_name = model_provider.get("model") + print("Processing model_provider: ", model_provider) + if model_name in model_name_keys: + raise Exception( + f"Duplicate model name {model_name}, please provide unique model name for each model_provider" + ) + model_name_keys.add(model_name) + if model_provider.get("name") is None: + model_provider["name"] = model_name + + model_provider_name_set.add(model_provider.get("name")) + + model_name_tokens = model_name.split("/") + if len(model_name_tokens) < 2: + raise Exception( + f"Invalid model name {model_name}. Please provide model name in the format /." + ) + provider = model_name_tokens[0] + + # Validate azure_openai and ollama provider requires base_url if ( - llm_provider.get("base_url", None) is None - or llm_provider.get("provider_interface", None) is None + provider == "azure_openai" or provider == "ollama" or provider == "qwen" + ) and model_provider.get("base_url") is None: + raise Exception( + f"Provider '{provider}' requires 'base_url' to be set for model {model_name}" + ) + + model_id = "/".join(model_name_tokens[1:]) + if provider not in SUPPORTED_PROVIDERS: + if ( + model_provider.get("base_url", None) is None + or model_provider.get("provider_interface", None) is None + ): + raise Exception( + f"Must provide base_url and provider_interface for unsupported provider {provider} for model {model_name}. Supported providers are: {', '.join(SUPPORTED_PROVIDERS)}" + ) + provider = model_provider.get("provider_interface", None) + elif model_provider.get("provider_interface", None) is not None: + raise Exception( + f"Please provide provider interface as part of model name {model_name} using the format /. For example, use 'openai/gpt-3.5-turbo' instead of 'gpt-3.5-turbo' " + ) + + if model_id in model_name_keys: + raise Exception( + f"Duplicate model_id {model_id}, please provide unique model_id for each model_provider" + ) + model_name_keys.add(model_id) + + for routing_preference in model_provider.get("routing_preferences", []): + if routing_preference.get("name") in model_usage_name_keys: + raise Exception( + f"Duplicate routing preference name \"{routing_preference.get('name')}\", please provide unique name for each routing preference" + ) + model_usage_name_keys.add(routing_preference.get("name")) + + model_provider["model"] = model_id + model_provider["provider_interface"] = provider + model_provider_name_set.add(model_provider.get("name")) + if model_provider.get("provider") and model_provider.get( + "provider_interface" ): raise Exception( - f"Must provide base_url and provider_interface for unsupported provider {provider} for model {model_name}. Supported providers are: {', '.join(SUPPORTED_PROVIDERS)}" + "Please provide either provider or provider_interface, not both" ) - provider = llm_provider.get("provider_interface", None) - elif llm_provider.get("provider_interface", None) is not None: - raise Exception( - f"Please provide provider interface as part of model name {model_name} using the format /. For example, use 'openai/gpt-3.5-turbo' instead of 'gpt-3.5-turbo' " - ) + if model_provider.get("provider"): + provider = model_provider["provider"] + model_provider["provider_interface"] = provider + del model_provider["provider"] + updated_model_providers.append(model_provider) - if model_id in model_name_keys: - raise Exception( - f"Duplicate model_id {model_id}, please provide unique model_id for each llm_provider" - ) - model_name_keys.add(model_id) - - for routing_preference in llm_provider.get("routing_preferences", []): - if routing_preference.get("name") in model_usage_name_keys: - raise Exception( - f"Duplicate routing preference name \"{routing_preference.get('name')}\", please provide unique name for each routing preference" - ) - model_usage_name_keys.add(routing_preference.get("name")) - - llm_provider["model"] = model_id - llm_provider["provider_interface"] = provider - updated_llm_providers.append(llm_provider) - - if llm_provider.get("base_url", None): - base_url = llm_provider["base_url"] - urlparse_result = urlparse(base_url) - url_path = urlparse_result.path - if url_path and url_path != "/": - raise Exception( - f"Please provide base_url without path, got {base_url}. Use base_url like 'http://example.com' instead of 'http://example.com/path'." - ) - if urlparse_result.scheme == "" or urlparse_result.scheme not in [ - "http", - "https", - ]: - raise Exception( - "Please provide a valid URL with scheme (http/https) in base_url" - ) - protocol = urlparse_result.scheme - port = urlparse_result.port - if port is None: - if protocol == "http": - port = 80 - else: - port = 443 - endpoint = urlparse_result.hostname - llm_provider["endpoint"] = endpoint - llm_provider["port"] = port - llm_provider["protocol"] = protocol - llm_provider["cluster_name"] = ( - provider + "_" + endpoint - ) # make name unique by appending endpoint - llms_with_endpoint.append(llm_provider) + if model_provider.get("base_url", None): + base_url = model_provider["base_url"] + urlparse_result = urlparse(base_url) + url_path = urlparse_result.path + if url_path and url_path != "/": + raise Exception( + f"Please provide base_url without path, got {base_url}. Use base_url like 'http://example.com' instead of 'http://example.com/path'." + ) + if urlparse_result.scheme == "" or urlparse_result.scheme not in [ + "http", + "https", + ]: + raise Exception( + "Please provide a valid URL with scheme (http/https) in base_url" + ) + protocol = urlparse_result.scheme + port = urlparse_result.port + if port is None: + if protocol == "http": + port = 80 + else: + port = 443 + endpoint = urlparse_result.hostname + model_provider["endpoint"] = endpoint + model_provider["port"] = port + model_provider["protocol"] = protocol + model_provider["cluster_name"] = ( + provider + "_" + endpoint + ) # make name unique by appending endpoint + llms_with_endpoint.append(model_provider) if len(model_usage_name_keys) > 0: - routing_llm_provider = config_yaml.get("routing", {}).get("llm_provider", None) - if routing_llm_provider and routing_llm_provider not in llm_provider_name_set: + routing_model_provider = config_yaml.get("routing", {}).get( + "model_provider", None + ) + if ( + routing_model_provider + and routing_model_provider not in model_provider_name_set + ): raise Exception( - f"Routing llm_provider {routing_llm_provider} is not defined in llm_providers" + f"Routing model_provider {routing_model_provider} is not defined in model_providers" ) - if routing_llm_provider is None and "arch-router" not in llm_provider_name_set: - updated_llm_providers.append( + if ( + routing_model_provider is None + and "arch-router" not in model_provider_name_set + ): + updated_model_providers.append( { "name": "arch-router", "provider_interface": "arch", @@ -214,7 +291,19 @@ def validate_and_render_schema(): } ) - config_yaml["llm_providers"] = updated_llm_providers + updated_model_providers = [] + for listener in listeners: + print("Processing listener: ", listener) + model_providers = listener.get("model_providers", None) + if model_providers is not None and model_providers != []: + print("processing egress traffic listener") + print("updated_model_providers: ", updated_model_providers) + if updated_model_providers is not None and updated_model_providers != []: + raise Exception( + "Please provide model_providers either under listeners or at root level, not both. Currently we don't support multiple listeners with model_providers" + ) + updated_model_providers = deepcopy(model_providers) + config_yaml["model_providers"] = updated_model_providers # Validate model aliases if present if "model_aliases" in config_yaml: @@ -223,30 +312,12 @@ def validate_and_render_schema(): target = alias_config.get("target") if target not in model_name_keys: raise Exception( - f"Model alias '{alias_name}' targets '{target}' which is not defined as a model. Available models: {', '.join(sorted(model_name_keys))}" + f"Model alias 2 - '{alias_name}' targets '{target}' which is not defined as a model. Available models: {', '.join(sorted(model_name_keys))}" ) arch_config_string = yaml.dump(config_yaml) arch_llm_config_string = yaml.dump(config_yaml) - prompt_gateway_listener = config_yaml.get("listeners", {}).get( - "ingress_traffic", {} - ) - if prompt_gateway_listener.get("port") == None: - prompt_gateway_listener["port"] = 10000 # default port for prompt gateway - if prompt_gateway_listener.get("address") == None: - prompt_gateway_listener["address"] = "127.0.0.1" - if prompt_gateway_listener.get("timeout") == None: - prompt_gateway_listener["timeout"] = "10s" - - llm_gateway_listener = config_yaml.get("listeners", {}).get("egress_traffic", {}) - if llm_gateway_listener.get("port") == None: - llm_gateway_listener["port"] = 12000 # default port for llm gateway - if llm_gateway_listener.get("address") == None: - llm_gateway_listener["address"] = "127.0.0.1" - if llm_gateway_listener.get("timeout") == None: - llm_gateway_listener["timeout"] = "300s" - use_agent_orchestrator = config_yaml.get("overrides", {}).get( "use_agent_orchestrator", False ) @@ -269,15 +340,16 @@ def validate_and_render_schema(): print("agent_orchestrator: ", agent_orchestrator) data = { - "prompt_gateway_listener": prompt_gateway_listener, - "llm_gateway_listener": llm_gateway_listener, + "prompt_gateway_listener": prompt_gateway, + "llm_gateway_listener": llm_gateway, "arch_config": arch_config_string, "arch_llm_config": arch_llm_config_string, "arch_clusters": inferred_clusters, - "arch_llm_providers": config_yaml["llm_providers"], + "arch_model_providers": updated_model_providers, "arch_tracing": arch_tracing, "local_llms": llms_with_endpoint, "agent_orchestrator": agent_orchestrator, + "listeners": listeners, } rendered = template.render(data) diff --git a/arch/tools/cli/core.py b/arch/tools/cli/core.py index 0846a51a..6cd028e7 100644 --- a/arch/tools/cli/core.py +++ b/arch/tools/cli/core.py @@ -5,7 +5,7 @@ import time import sys import yaml -from cli.utils import getLogger +from cli.utils import convert_legacy_listeners, getLogger from cli.consts import ( ARCHGW_DOCKER_IMAGE, ARCHGW_DOCKER_NAME, @@ -26,7 +26,7 @@ from cli.docker_cli import ( log = getLogger(__name__) -def _get_gateway_ports(arch_config_file: str) -> tuple: +def _get_gateway_ports(arch_config_file: str) -> list[int]: PROMPT_GATEWAY_DEFAULT_PORT = 10000 LLM_GATEWAY_DEFAULT_PORT = 12000 @@ -35,18 +35,15 @@ def _get_gateway_ports(arch_config_file: str) -> tuple: with open(arch_config_file) as f: arch_config_dict = yaml.safe_load(f) - prompt_gateway_port = ( - arch_config_dict.get("listeners", {}) - .get("ingress_traffic", {}) - .get("port", PROMPT_GATEWAY_DEFAULT_PORT) - ) - llm_gateway_port = ( - arch_config_dict.get("listeners", {}) - .get("egress_traffic", {}) - .get("port", LLM_GATEWAY_DEFAULT_PORT) + print("arch config dict json string: ", json.dumps(arch_config_dict)) + + listeners, _, _ = convert_legacy_listeners( + arch_config_dict.get("listeners"), arch_config_dict.get("llm_providers") ) - return prompt_gateway_port, llm_gateway_port + all_ports = [listener.get("port") for listener in listeners] + + return all_ports def start_arch(arch_config_file, env, log_timeout=120, foreground=False): @@ -68,14 +65,13 @@ def start_arch(arch_config_file, env, log_timeout=120, foreground=False): docker_stop_container(ARCHGW_DOCKER_NAME) docker_remove_container(ARCHGW_DOCKER_NAME) - prompt_gateway_port, llm_gateway_port = _get_gateway_ports(arch_config_file) + gateway_ports = _get_gateway_ports(arch_config_file) return_code, _, archgw_stderr = docker_start_archgw_detached( arch_config_file, os.path.expanduser("~/archgw_logs"), env, - prompt_gateway_port, - llm_gateway_port, + gateway_ports, ) if return_code != 0: log.info("Failed to start arch gateway: " + str(return_code)) @@ -84,13 +80,17 @@ def start_arch(arch_config_file, env, log_timeout=120, foreground=False): start_time = time.time() while True: - prompt_gateway_health_check_status = health_check_endpoint( - f"http://localhost:{prompt_gateway_port}/healthz" - ) - - llm_gateway_health_check_status = health_check_endpoint( - f"http://localhost:{llm_gateway_port}/healthz" - ) + all_listeners_healthy = True + for port in gateway_ports: + log.info(f"Checking health endpoint on port {port}") + health_check_status = health_check_endpoint( + f"http://localhost:{port}/healthz" + ) + if health_check_status: + log.info(f"Gateway on port {port} is healthy!") + else: + all_listeners_healthy = False + log.info(f"Gateway on port {port} is not healthy yet.") archgw_status = docker_container_status(ARCHGW_DOCKER_NAME) current_time = time.time() @@ -107,7 +107,7 @@ def start_arch(arch_config_file, env, log_timeout=120, foreground=False): stream_gateway_logs(follow=False) sys.exit(1) - if prompt_gateway_health_check_status or llm_gateway_health_check_status: + if all_listeners_healthy: log.info("archgw is running and is healthy!") break else: diff --git a/arch/tools/cli/docker_cli.py b/arch/tools/cli/docker_cli.py index e8a12a13..2d7bac28 100644 --- a/arch/tools/cli/docker_cli.py +++ b/arch/tools/cli/docker_cli.py @@ -44,17 +44,18 @@ def docker_start_archgw_detached( arch_config_file: str, logs_path_abs: str, env: dict, - prompt_gateway_port, - llm_gateway_port, + gateway_ports: list[int], ) -> str: env_args = [item for key, value in env.items() for item in ["-e", f"{key}={value}"]] port_mappings = [ - f"{prompt_gateway_port}:{prompt_gateway_port}", - f"{llm_gateway_port}:{llm_gateway_port}", - f"{llm_gateway_port+1}:{llm_gateway_port+1}", + f"{12001}:{12001}", "19901:9901", ] + + for port in gateway_ports: + port_mappings.append(f"{port}:{port}") + port_mappings_args = [item for port in port_mappings for item in ("-p", port)] volume_mappings = [ @@ -126,7 +127,8 @@ def docker_validate_archgw_schema(arch_config_file): "--entrypoint", "python", ARCHGW_DOCKER_IMAGE, - "config_generator.py", + "-m", + "cli.config_generator", ], capture_output=True, text=True, diff --git a/arch/tools/cli/utils.py b/arch/tools/cli/utils.py index c7d39d66..2f29b16e 100644 --- a/arch/tools/cli/utils.py +++ b/arch/tools/cli/utils.py @@ -37,16 +37,98 @@ def has_ingress_listener(arch_config_file): return False +def convert_legacy_listeners( + listeners: dict | list, model_providers: list | None +) -> tuple[list, dict | None, dict | None]: + llm_gateway_listener = { + "name": "egress_traffic", + "type": "model_listener", + "port": 12000, + "address": "0.0.0.0", + "timeout": "30s", + "model_providers": model_providers or [], + } + + prompt_gateway_listener = { + "name": "ingress_traffic", + "type": "prompt_listener", + "port": 10000, + "address": "0.0.0.0", + "timeout": "30s", + } + + if isinstance(listeners, dict): + # legacy listeners + # check if type is array or object + # if its dict its legacy format let's convert it to array + updated_listeners = [] + ingress_traffic = listeners.get("ingress_traffic", {}) + egress_traffic = listeners.get("egress_traffic", {}) + + llm_gateway_listener["port"] = egress_traffic.get( + "port", llm_gateway_listener["port"] + ) + llm_gateway_listener["address"] = egress_traffic.get( + "address", llm_gateway_listener["address"] + ) + llm_gateway_listener["timeout"] = egress_traffic.get( + "timeout", llm_gateway_listener["timeout"] + ) + if model_providers is None or model_providers == []: + raise ValueError("model_providers cannot be empty when using legacy format") + + llm_gateway_listener["model_providers"] = model_providers + updated_listeners.append(llm_gateway_listener) + + if ingress_traffic and ingress_traffic != {}: + prompt_gateway_listener["port"] = ingress_traffic.get( + "port", prompt_gateway_listener["port"] + ) + prompt_gateway_listener["address"] = ingress_traffic.get( + "address", prompt_gateway_listener["address"] + ) + prompt_gateway_listener["timeout"] = ingress_traffic.get( + "timeout", prompt_gateway_listener["timeout"] + ) + updated_listeners.append(prompt_gateway_listener) + + return updated_listeners, llm_gateway_listener, prompt_gateway_listener + + model_provider_set = False + for listener in listeners: + if listener.get("type") == "model_listener": + if model_provider_set: + raise ValueError( + "Currently only one listener can have model_providers set" + ) + listener["model_providers"] = model_providers or [] + model_provider_set = True + llm_gateway_listener = listener + if not model_provider_set: + listeners.append(llm_gateway_listener) + + return listeners, llm_gateway_listener, prompt_gateway_listener + + def get_llm_provider_access_keys(arch_config_file): with open(arch_config_file, "r") as file: arch_config = file.read() arch_config_yaml = yaml.safe_load(arch_config) access_key_list = [] - for llm_provider in arch_config_yaml.get("llm_providers", []): - acess_key = llm_provider.get("access_key") - if acess_key is not None: - access_key_list.append(acess_key) + + # Convert legacy llm_providers to model_providers + if "llm_providers" in arch_config_yaml: + if "model_providers" in arch_config_yaml: + raise Exception( + "Please provide either llm_providers or model_providers, not both. llm_providers is deprecated, please use model_providers instead" + ) + arch_config_yaml["model_providers"] = arch_config_yaml["llm_providers"] + del arch_config_yaml["llm_providers"] + + listeners, _, _ = convert_legacy_listeners( + arch_config_yaml.get("listeners"), arch_config_yaml.get("model_providers") + ) for prompt_target in arch_config_yaml.get("prompt_targets", []): for k, v in prompt_target.get("endpoint", {}).get("http_headers", {}).items(): @@ -60,6 +142,12 @@ def get_llm_provider_access_keys(arch_config_file): else: access_key_list.append(v) + for listener in listeners: + for llm_provider in listener.get("model_providers", []): + access_key = llm_provider.get("access_key") + if access_key is not None: + access_key_list.append(access_key) + return access_key_list diff --git a/arch/tools/test/test_config_generator.py b/arch/tools/test/test_config_generator.py index 0e4f8446..0d8f69b9 100644 --- a/arch/tools/test/test_config_generator.py +++ b/arch/tools/test/test_config_generator.py @@ -1,3 +1,4 @@ +import json import pytest from unittest import mock import sys @@ -81,10 +82,88 @@ tracing: validate_and_render_schema() +def test_validate_and_render_happy_path_agent_config(monkeypatch): + monkeypatch.setenv("ARCH_CONFIG_FILE", "fake_arch_config.yaml") + monkeypatch.setenv("ARCH_CONFIG_SCHEMA_FILE", "fake_arch_config_schema.yaml") + monkeypatch.setenv("ENVOY_CONFIG_TEMPLATE_FILE", "./envoy.template.yaml") + monkeypatch.setenv("ARCH_CONFIG_FILE_RENDERED", "fake_arch_config_rendered.yaml") + monkeypatch.setenv("ENVOY_CONFIG_FILE_RENDERED", "fake_envoy.yaml") + monkeypatch.setenv("TEMPLATE_ROOT", "../") + + arch_config = """ +version: v0.3.0 + +agents: + - name: query_rewriter + kind: openai + endpoint: http://localhost:10500 + - name: context_builder + kind: openai + endpoint: http://localhost:10501 + - name: response_generator + kind: openai + endpoint: http://localhost:10502 + - name: research_agent + kind: openai + endpoint: http://localhost:10500 + - name: input_guard_rails + kind: openai + endpoint: http://localhost:10503 + +listeners: + - name: tmobile + type: agent + router: arch_agent_v2 + agents: + - name: simple_tmobile_rag_agent + description: t-mobile virtual assistant for device contracts. + filter_chain: + - query_rewriter + - context_builder + - response_generator + - name: research_agent + description: agent to research and gather information from various sources. + filter_chain: + - research_agent + - response_generator + port: 8000 + + - name: llm_provider + type: model + description: llm provider configuration + port: 12000 + llm_providers: + - access_key: ${OPENAI_API_KEY} + model: openai/gpt-4o +""" + arch_config_schema = "" + with open("../arch_config_schema.yaml", "r") as file: + arch_config_schema = file.read() + + m_open = mock.mock_open() + # Provide enough file handles for all open() calls in validate_and_render_schema + m_open.side_effect = [ + mock.mock_open(read_data="").return_value, + mock.mock_open(read_data=arch_config).return_value, # ARCH_CONFIG_FILE + mock.mock_open( + read_data=arch_config_schema + ).return_value, # ARCH_CONFIG_SCHEMA_FILE + mock.mock_open(read_data=arch_config).return_value, # ARCH_CONFIG_FILE + mock.mock_open( + read_data=arch_config_schema + ).return_value, # ARCH_CONFIG_SCHEMA_FILE + mock.mock_open().return_value, # ENVOY_CONFIG_FILE_RENDERED (write) + mock.mock_open().return_value, # ARCH_CONFIG_FILE_RENDERED (write) + ] + with mock.patch("builtins.open", m_open): + with mock.patch("config_generator.Environment"): + validate_and_render_schema() + + arch_config_test_cases = [ { "id": "duplicate_provider_name", - "expected_error": "Duplicate llm_provider name", + "expected_error": "Duplicate model_provider name", "arch_config": """ version: v0.1.0 @@ -270,3 +349,126 @@ def test_validate_and_render_schema_tests(monkeypatch, arch_config_test_case): with pytest.raises(Exception) as excinfo: validate_and_render_schema() assert expected_error in str(excinfo.value) + + +def test_convert_legacy_llm_providers(): + from cli.utils import convert_legacy_listeners + + listeners = { + "ingress_traffic": { + "address": "0.0.0.0", + "port": 10000, + "timeout": "30s", + }, + "egress_traffic": { + "address": "0.0.0.0", + "port": 12000, + "timeout": "30s", + }, + } + llm_providers = [ + { + "model": "openai/gpt-4o", + "access_key": "test_key", + } + ] + + updated_providers, llm_gateway, prompt_gateway = convert_legacy_listeners( + listeners, llm_providers + ) + assert isinstance(updated_providers, list) + assert llm_gateway is not None + assert prompt_gateway is not None + print(json.dumps(updated_providers)) + assert updated_providers == [ + { + "name": "egress_traffic", + "type": "model_listener", + "port": 12000, + "address": "0.0.0.0", + "timeout": "30s", + "model_providers": [{"model": "openai/gpt-4o", "access_key": "test_key"}], + }, + { + "name": "ingress_traffic", + "type": "prompt_listener", + "port": 10000, + "address": "0.0.0.0", + "timeout": "30s", + }, + ] + + assert llm_gateway == { + "address": "0.0.0.0", + "model_providers": [ + { + "access_key": "test_key", + "model": "openai/gpt-4o", + }, + ], + "name": "egress_traffic", + "type": "model_listener", + "port": 12000, + "timeout": "30s", + } + + assert prompt_gateway == { + "address": "0.0.0.0", + "name": "ingress_traffic", + "port": 10000, + "timeout": "30s", + "type": "prompt_listener", + } + + +def test_convert_legacy_llm_providers_no_prompt_gateway(): + from cli.utils import convert_legacy_listeners + + listeners = { + "egress_traffic": { + "address": "0.0.0.0", + "port": 12000, + "timeout": "30s", + } + } + llm_providers = [ + { + "model": "openai/gpt-4o", + "access_key": "test_key", + } + ] + + updated_providers, llm_gateway, prompt_gateway = convert_legacy_listeners( + listeners, llm_providers + ) + assert isinstance(updated_providers, list) + assert llm_gateway is not None + assert prompt_gateway is not None + assert updated_providers == [ + { + "address": "0.0.0.0", + "model_providers": [ + { + "access_key": "test_key", + "model": "openai/gpt-4o", + }, + ], + "name": "egress_traffic", + "port": 12000, + "timeout": "30s", + "type": "model_listener", + } + ] + assert llm_gateway == { + "address": "0.0.0.0", + "model_providers": [ + { + "access_key": "test_key", + "model": "openai/gpt-4o", + }, + ], + "name": "egress_traffic", + "type": "model_listener", + "port": 12000, + "timeout": "30s", + } diff --git a/arch/tools/uv.lock b/arch/tools/uv.lock new file mode 100644 index 00000000..5b88a369 --- /dev/null +++ b/arch/tools/uv.lock @@ -0,0 +1,631 @@ +version = 1 +requires-python = ">=3.10" + +[[package]] +name = "archgw" +version = "0.3.10" +source = { editable = "." } +dependencies = [ + { name = "click" }, + { name = "huggingface-hub" }, + { name = "jinja2" }, + { name = "jsonschema" }, + { name = "pytest" }, + { name = "pyyaml" }, + { name = "requests" }, +] + +[package.metadata] +requires-dist = [ + { name = "click" }, + { name = "huggingface-hub", specifier = ">=0.34.4" }, + { name = "jinja2" }, + { name = "jsonschema" }, + { name = "pytest", specifier = ">=8.4.2" }, + { name = "pyyaml" }, + { name = "requests", specifier = ">=2.32.5" }, +] + +[[package]] +name = "attrs" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815 }, +] + +[[package]] +name = "certifi" +version = "2025.8.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/dc/67/960ebe6bf230a96cda2e0abcf73af550ec4f090005363542f0765df162e0/certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407", size = 162386 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/48/1549795ba7742c948d2ad169c1c8cdbae65bc450d6cd753d124b17c8cd32/certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5", size = 161216 }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/2d/5fd176ceb9b2fc619e63405525573493ca23441330fcdaee6bef9460e924/charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14", size = 122371 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d6/98/f3b8013223728a99b908c9344da3aa04ee6e3fa235f19409033eda92fb78/charset_normalizer-3.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72", size = 207695 }, + { url = "https://files.pythonhosted.org/packages/21/40/5188be1e3118c82dcb7c2a5ba101b783822cfb413a0268ed3be0468532de/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe", size = 147153 }, + { url = "https://files.pythonhosted.org/packages/37/60/5d0d74bc1e1380f0b72c327948d9c2aca14b46a9efd87604e724260f384c/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601", size = 160428 }, + { url = "https://files.pythonhosted.org/packages/85/9a/d891f63722d9158688de58d050c59dc3da560ea7f04f4c53e769de5140f5/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c", size = 157627 }, + { url = "https://files.pythonhosted.org/packages/65/1a/7425c952944a6521a9cfa7e675343f83fd82085b8af2b1373a2409c683dc/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2", size = 152388 }, + { url = "https://files.pythonhosted.org/packages/f0/c9/a2c9c2a355a8594ce2446085e2ec97fd44d323c684ff32042e2a6b718e1d/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0", size = 150077 }, + { url = "https://files.pythonhosted.org/packages/3b/38/20a1f44e4851aa1c9105d6e7110c9d020e093dfa5836d712a5f074a12bf7/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0", size = 161631 }, + { url = "https://files.pythonhosted.org/packages/a4/fa/384d2c0f57edad03d7bec3ebefb462090d8905b4ff5a2d2525f3bb711fac/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0", size = 159210 }, + { url = "https://files.pythonhosted.org/packages/33/9e/eca49d35867ca2db336b6ca27617deed4653b97ebf45dfc21311ce473c37/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a", size = 153739 }, + { url = "https://files.pythonhosted.org/packages/2a/91/26c3036e62dfe8de8061182d33be5025e2424002125c9500faff74a6735e/charset_normalizer-3.4.3-cp310-cp310-win32.whl", hash = "sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f", size = 99825 }, + { url = "https://files.pythonhosted.org/packages/e2/c6/f05db471f81af1fa01839d44ae2a8bfeec8d2a8b4590f16c4e7393afd323/charset_normalizer-3.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669", size = 107452 }, + { url = "https://files.pythonhosted.org/packages/7f/b5/991245018615474a60965a7c9cd2b4efbaabd16d582a5547c47ee1c7730b/charset_normalizer-3.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b", size = 204483 }, + { url = "https://files.pythonhosted.org/packages/c7/2a/ae245c41c06299ec18262825c1569c5d3298fc920e4ddf56ab011b417efd/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64", size = 145520 }, + { url = "https://files.pythonhosted.org/packages/3a/a4/b3b6c76e7a635748c4421d2b92c7b8f90a432f98bda5082049af37ffc8e3/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91", size = 158876 }, + { url = "https://files.pythonhosted.org/packages/e2/e6/63bb0e10f90a8243c5def74b5b105b3bbbfb3e7bb753915fe333fb0c11ea/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f", size = 156083 }, + { url = "https://files.pythonhosted.org/packages/87/df/b7737ff046c974b183ea9aa111b74185ac8c3a326c6262d413bd5a1b8c69/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07", size = 150295 }, + { url = "https://files.pythonhosted.org/packages/61/f1/190d9977e0084d3f1dc169acd060d479bbbc71b90bf3e7bf7b9927dec3eb/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30", size = 148379 }, + { url = "https://files.pythonhosted.org/packages/4c/92/27dbe365d34c68cfe0ca76f1edd70e8705d82b378cb54ebbaeabc2e3029d/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14", size = 160018 }, + { url = "https://files.pythonhosted.org/packages/99/04/baae2a1ea1893a01635d475b9261c889a18fd48393634b6270827869fa34/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c", size = 157430 }, + { url = "https://files.pythonhosted.org/packages/2f/36/77da9c6a328c54d17b960c89eccacfab8271fdaaa228305330915b88afa9/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae", size = 151600 }, + { url = "https://files.pythonhosted.org/packages/64/d4/9eb4ff2c167edbbf08cdd28e19078bf195762e9bd63371689cab5ecd3d0d/charset_normalizer-3.4.3-cp311-cp311-win32.whl", hash = "sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849", size = 99616 }, + { url = "https://files.pythonhosted.org/packages/f4/9c/996a4a028222e7761a96634d1820de8a744ff4327a00ada9c8942033089b/charset_normalizer-3.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c", size = 107108 }, + { url = "https://files.pythonhosted.org/packages/e9/5e/14c94999e418d9b87682734589404a25854d5f5d0408df68bc15b6ff54bb/charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1", size = 205655 }, + { url = "https://files.pythonhosted.org/packages/7d/a8/c6ec5d389672521f644505a257f50544c074cf5fc292d5390331cd6fc9c3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884", size = 146223 }, + { url = "https://files.pythonhosted.org/packages/fc/eb/a2ffb08547f4e1e5415fb69eb7db25932c52a52bed371429648db4d84fb1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018", size = 159366 }, + { url = "https://files.pythonhosted.org/packages/82/10/0fd19f20c624b278dddaf83b8464dcddc2456cb4b02bb902a6da126b87a1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392", size = 157104 }, + { url = "https://files.pythonhosted.org/packages/16/ab/0233c3231af734f5dfcf0844aa9582d5a1466c985bbed6cedab85af9bfe3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f", size = 151830 }, + { url = "https://files.pythonhosted.org/packages/ae/02/e29e22b4e02839a0e4a06557b1999d0a47db3567e82989b5bb21f3fbbd9f/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154", size = 148854 }, + { url = "https://files.pythonhosted.org/packages/05/6b/e2539a0a4be302b481e8cafb5af8792da8093b486885a1ae4d15d452bcec/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491", size = 160670 }, + { url = "https://files.pythonhosted.org/packages/31/e7/883ee5676a2ef217a40ce0bffcc3d0dfbf9e64cbcfbdf822c52981c3304b/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93", size = 158501 }, + { url = "https://files.pythonhosted.org/packages/c1/35/6525b21aa0db614cf8b5792d232021dca3df7f90a1944db934efa5d20bb1/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f", size = 153173 }, + { url = "https://files.pythonhosted.org/packages/50/ee/f4704bad8201de513fdc8aac1cabc87e38c5818c93857140e06e772b5892/charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37", size = 99822 }, + { url = "https://files.pythonhosted.org/packages/39/f5/3b3836ca6064d0992c58c7561c6b6eee1b3892e9665d650c803bd5614522/charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc", size = 107543 }, + { url = "https://files.pythonhosted.org/packages/65/ca/2135ac97709b400c7654b4b764daf5c5567c2da45a30cdd20f9eefe2d658/charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe", size = 205326 }, + { url = "https://files.pythonhosted.org/packages/71/11/98a04c3c97dd34e49c7d247083af03645ca3730809a5509443f3c37f7c99/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8", size = 146008 }, + { url = "https://files.pythonhosted.org/packages/60/f5/4659a4cb3c4ec146bec80c32d8bb16033752574c20b1252ee842a95d1a1e/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9", size = 159196 }, + { url = "https://files.pythonhosted.org/packages/86/9e/f552f7a00611f168b9a5865a1414179b2c6de8235a4fa40189f6f79a1753/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31", size = 156819 }, + { url = "https://files.pythonhosted.org/packages/7e/95/42aa2156235cbc8fa61208aded06ef46111c4d3f0de233107b3f38631803/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f", size = 151350 }, + { url = "https://files.pythonhosted.org/packages/c2/a9/3865b02c56f300a6f94fc631ef54f0a8a29da74fb45a773dfd3dcd380af7/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927", size = 148644 }, + { url = "https://files.pythonhosted.org/packages/77/d9/cbcf1a2a5c7d7856f11e7ac2d782aec12bdfea60d104e60e0aa1c97849dc/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9", size = 160468 }, + { url = "https://files.pythonhosted.org/packages/f6/42/6f45efee8697b89fda4d50580f292b8f7f9306cb2971d4b53f8914e4d890/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5", size = 158187 }, + { url = "https://files.pythonhosted.org/packages/70/99/f1c3bdcfaa9c45b3ce96f70b14f070411366fa19549c1d4832c935d8e2c3/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc", size = 152699 }, + { url = "https://files.pythonhosted.org/packages/a3/ad/b0081f2f99a4b194bcbb1934ef3b12aa4d9702ced80a37026b7607c72e58/charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce", size = 99580 }, + { url = "https://files.pythonhosted.org/packages/9a/8f/ae790790c7b64f925e5c953b924aaa42a243fb778fed9e41f147b2a5715a/charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef", size = 107366 }, + { url = "https://files.pythonhosted.org/packages/8e/91/b5a06ad970ddc7a0e513112d40113e834638f4ca1120eb727a249fb2715e/charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15", size = 204342 }, + { url = "https://files.pythonhosted.org/packages/ce/ec/1edc30a377f0a02689342f214455c3f6c2fbedd896a1d2f856c002fc3062/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db", size = 145995 }, + { url = "https://files.pythonhosted.org/packages/17/e5/5e67ab85e6d22b04641acb5399c8684f4d37caf7558a53859f0283a650e9/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d", size = 158640 }, + { url = "https://files.pythonhosted.org/packages/f1/e5/38421987f6c697ee3722981289d554957c4be652f963d71c5e46a262e135/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096", size = 156636 }, + { url = "https://files.pythonhosted.org/packages/a0/e4/5a075de8daa3ec0745a9a3b54467e0c2967daaaf2cec04c845f73493e9a1/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa", size = 150939 }, + { url = "https://files.pythonhosted.org/packages/02/f7/3611b32318b30974131db62b4043f335861d4d9b49adc6d57c1149cc49d4/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049", size = 148580 }, + { url = "https://files.pythonhosted.org/packages/7e/61/19b36f4bd67f2793ab6a99b979b4e4f3d8fc754cbdffb805335df4337126/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0", size = 159870 }, + { url = "https://files.pythonhosted.org/packages/06/57/84722eefdd338c04cf3030ada66889298eaedf3e7a30a624201e0cbe424a/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92", size = 157797 }, + { url = "https://files.pythonhosted.org/packages/72/2a/aff5dd112b2f14bcc3462c312dce5445806bfc8ab3a7328555da95330e4b/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16", size = 152224 }, + { url = "https://files.pythonhosted.org/packages/b7/8c/9839225320046ed279c6e839d51f028342eb77c91c89b8ef2549f951f3ec/charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce", size = 100086 }, + { url = "https://files.pythonhosted.org/packages/ee/7a/36fbcf646e41f710ce0a563c1c9a343c6edf9be80786edeb15b6f62e17db/charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c", size = 107400 }, + { url = "https://files.pythonhosted.org/packages/8a/1f/f041989e93b001bc4e44bb1669ccdcf54d3f00e628229a85b08d330615c5/charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a", size = 53175 }, +] + +[[package]] +name = "click" +version = "8.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "platform_system == 'Windows'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674 }, +] + +[[package]] +name = "filelock" +version = "3.19.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/40/bb/0ab3e58d22305b6f5440629d20683af28959bf793d98d11950e305c1c326/filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58", size = 17687 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/14/42b2651a2f46b022ccd948bca9f2d5af0fd8929c4eec235b8d6d844fbe67/filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d", size = 15988 }, +] + +[[package]] +name = "fsspec" +version = "2025.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/de/e0/bab50af11c2d75c9c4a2a26a5254573c0bd97cea152254401510950486fa/fsspec-2025.9.0.tar.gz", hash = "sha256:19fd429483d25d28b65ec68f9f4adc16c17ea2c7c7bf54ec61360d478fb19c19", size = 304847 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/71/70db47e4f6ce3e5c37a607355f80da8860a33226be640226ac52cb05ef2e/fsspec-2025.9.0-py3-none-any.whl", hash = "sha256:530dc2a2af60a414a832059574df4a6e10cce927f6f4a78209390fe38955cfb7", size = 199289 }, +] + +[[package]] +name = "hf-xet" +version = "1.1.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/23/0f/5b60fc28ee7f8cc17a5114a584fd6b86e11c3e0a6e142a7f97a161e9640a/hf_xet-1.1.9.tar.gz", hash = "sha256:c99073ce404462e909f1d5839b2d14a3827b8fe75ed8aed551ba6609c026c803", size = 484242 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/de/12/56e1abb9a44cdef59a411fe8a8673313195711b5ecce27880eb9c8fa90bd/hf_xet-1.1.9-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:a3b6215f88638dd7a6ff82cb4e738dcbf3d863bf667997c093a3c990337d1160", size = 2762553 }, + { url = "https://files.pythonhosted.org/packages/3a/e6/2d0d16890c5f21b862f5df3146519c182e7f0ae49b4b4bf2bd8a40d0b05e/hf_xet-1.1.9-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:9b486de7a64a66f9a172f4b3e0dfe79c9f0a93257c501296a2521a13495a698a", size = 2623216 }, + { url = "https://files.pythonhosted.org/packages/81/42/7e6955cf0621e87491a1fb8cad755d5c2517803cea174229b0ec00ff0166/hf_xet-1.1.9-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4c5a840c2c4e6ec875ed13703a60e3523bc7f48031dfd750923b2a4d1a5fc3c", size = 3186789 }, + { url = "https://files.pythonhosted.org/packages/df/8b/759233bce05457f5f7ec062d63bbfd2d0c740b816279eaaa54be92aa452a/hf_xet-1.1.9-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:96a6139c9e44dad1c52c52520db0fffe948f6bce487cfb9d69c125f254bb3790", size = 3088747 }, + { url = "https://files.pythonhosted.org/packages/6c/3c/28cc4db153a7601a996985bcb564f7b8f5b9e1a706c7537aad4b4809f358/hf_xet-1.1.9-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ad1022e9a998e784c97b2173965d07fe33ee26e4594770b7785a8cc8f922cd95", size = 3251429 }, + { url = "https://files.pythonhosted.org/packages/84/17/7caf27a1d101bfcb05be85850d4aa0a265b2e1acc2d4d52a48026ef1d299/hf_xet-1.1.9-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:86754c2d6d5afb11b0a435e6e18911a4199262fe77553f8c50d75e21242193ea", size = 3354643 }, + { url = "https://files.pythonhosted.org/packages/cd/50/0c39c9eed3411deadcc98749a6699d871b822473f55fe472fad7c01ec588/hf_xet-1.1.9-cp37-abi3-win_amd64.whl", hash = "sha256:5aad3933de6b725d61d51034e04174ed1dce7a57c63d530df0014dea15a40127", size = 2804797 }, +] + +[[package]] +name = "huggingface-hub" +version = "0.34.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock" }, + { name = "fsspec" }, + { name = "hf-xet", marker = "platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" }, + { name = "packaging" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/45/c9/bdbe19339f76d12985bc03572f330a01a93c04dffecaaea3061bdd7fb892/huggingface_hub-0.34.4.tar.gz", hash = "sha256:a4228daa6fb001be3f4f4bdaf9a0db00e1739235702848df00885c9b5742c85c", size = 459768 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/7b/bb06b061991107cd8783f300adff3e7b7f284e330fd82f507f2a1417b11d/huggingface_hub-0.34.4-py3-none-any.whl", hash = "sha256:9b365d781739c93ff90c359844221beef048403f1bc1f1c123c191257c3c890a", size = 561452 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050 }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899 }, +] + +[[package]] +name = "jsonschema" +version = "4.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040 }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437 }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/90/d08277ce111dd22f77149fd1a5d4653eeb3b3eaacbdfcbae5afb2600eebd/MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", size = 14357 }, + { url = "https://files.pythonhosted.org/packages/04/e1/6e2194baeae0bca1fae6629dc0cbbb968d4d941469cbab11a3872edff374/MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", size = 12393 }, + { url = "https://files.pythonhosted.org/packages/1d/69/35fa85a8ece0a437493dc61ce0bb6d459dcba482c34197e3efc829aa357f/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", size = 21732 }, + { url = "https://files.pythonhosted.org/packages/22/35/137da042dfb4720b638d2937c38a9c2df83fe32d20e8c8f3185dbfef05f7/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", size = 20866 }, + { url = "https://files.pythonhosted.org/packages/29/28/6d029a903727a1b62edb51863232152fd335d602def598dade38996887f0/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", size = 20964 }, + { url = "https://files.pythonhosted.org/packages/cc/cd/07438f95f83e8bc028279909d9c9bd39e24149b0d60053a97b2bc4f8aa51/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", size = 21977 }, + { url = "https://files.pythonhosted.org/packages/29/01/84b57395b4cc062f9c4c55ce0df7d3108ca32397299d9df00fedd9117d3d/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", size = 21366 }, + { url = "https://files.pythonhosted.org/packages/bd/6e/61ebf08d8940553afff20d1fb1ba7294b6f8d279df9fd0c0db911b4bbcfd/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", size = 21091 }, + { url = "https://files.pythonhosted.org/packages/11/23/ffbf53694e8c94ebd1e7e491de185124277964344733c45481f32ede2499/MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50", size = 15065 }, + { url = "https://files.pythonhosted.org/packages/44/06/e7175d06dd6e9172d4a69a72592cb3f7a996a9c396eee29082826449bbc3/MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", size = 15514 }, + { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353 }, + { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392 }, + { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984 }, + { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120 }, + { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032 }, + { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057 }, + { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359 }, + { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306 }, + { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094 }, + { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521 }, + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348 }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149 }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118 }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993 }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178 }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319 }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352 }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097 }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601 }, + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274 }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352 }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122 }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085 }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978 }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208 }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357 }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344 }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101 }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603 }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510 }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486 }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480 }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914 }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796 }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473 }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114 }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098 }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208 }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739 }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469 }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538 }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217 }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/95/a3fac87cb7158e231b5a6012e438c647e1a87f09f8e0d123acec8ab8bf71/PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", size = 184199 }, + { url = "https://files.pythonhosted.org/packages/c7/7a/68bd47624dab8fd4afbfd3c48e3b79efe09098ae941de5b58abcbadff5cb/PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", size = 171758 }, + { url = "https://files.pythonhosted.org/packages/49/ee/14c54df452143b9ee9f0f29074d7ca5516a36edb0b4cc40c3f280131656f/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", size = 718463 }, + { url = "https://files.pythonhosted.org/packages/4d/61/de363a97476e766574650d742205be468921a7b532aa2499fcd886b62530/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", size = 719280 }, + { url = "https://files.pythonhosted.org/packages/6b/4e/1523cb902fd98355e2e9ea5e5eb237cbc5f3ad5f3075fa65087aa0ecb669/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", size = 751239 }, + { url = "https://files.pythonhosted.org/packages/b7/33/5504b3a9a4464893c32f118a9cc045190a91637b119a9c881da1cf6b7a72/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", size = 695802 }, + { url = "https://files.pythonhosted.org/packages/5c/20/8347dcabd41ef3a3cdc4f7b7a2aff3d06598c8779faa189cdbf878b626a4/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", size = 720527 }, + { url = "https://files.pythonhosted.org/packages/be/aa/5afe99233fb360d0ff37377145a949ae258aaab831bde4792b32650a4378/PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", size = 144052 }, + { url = "https://files.pythonhosted.org/packages/b5/84/0fa4b06f6d6c958d207620fc60005e241ecedceee58931bb20138e1e5776/PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", size = 161774 }, + { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612 }, + { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040 }, + { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829 }, + { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167 }, + { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952 }, + { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301 }, + { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638 }, + { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850 }, + { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980 }, + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, +] + +[[package]] +name = "referencing" +version = "0.36.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2f/db/98b5c277be99dd18bfd91dd04e1b759cad18d1a338188c936e92f921c7e2/referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa", size = 74744 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/b1/3baf80dc6d2b7bc27a95a67752d0208e410351e3feb4eb78de5f77454d8d/referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0", size = 26775 }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738 }, +] + +[[package]] +name = "rpds-py" +version = "0.27.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e9/dd/2c0cbe774744272b0ae725f44032c77bdcab6e8bcf544bffa3b6e70c8dba/rpds_py-0.27.1.tar.gz", hash = "sha256:26a1c73171d10b7acccbded82bf6a586ab8203601e565badc74bbbf8bc5a10f8", size = 27479 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/ed/3aef893e2dd30e77e35d20d4ddb45ca459db59cead748cad9796ad479411/rpds_py-0.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:68afeec26d42ab3b47e541b272166a0b4400313946871cba3ed3a4fc0cab1cef", size = 371606 }, + { url = "https://files.pythonhosted.org/packages/6d/82/9818b443e5d3eb4c83c3994561387f116aae9833b35c484474769c4a8faf/rpds_py-0.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74e5b2f7bb6fa38b1b10546d27acbacf2a022a8b5543efb06cfebc72a59c85be", size = 353452 }, + { url = "https://files.pythonhosted.org/packages/99/c7/d2a110ffaaa397fc6793a83c7bd3545d9ab22658b7cdff05a24a4535cc45/rpds_py-0.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9024de74731df54546fab0bfbcdb49fae19159ecaecfc8f37c18d2c7e2c0bd61", size = 381519 }, + { url = "https://files.pythonhosted.org/packages/5a/bc/e89581d1f9d1be7d0247eaef602566869fdc0d084008ba139e27e775366c/rpds_py-0.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:31d3ebadefcd73b73928ed0b2fd696f7fefda8629229f81929ac9c1854d0cffb", size = 394424 }, + { url = "https://files.pythonhosted.org/packages/ac/2e/36a6861f797530e74bb6ed53495f8741f1ef95939eed01d761e73d559067/rpds_py-0.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2e7f8f169d775dd9092a1743768d771f1d1300453ddfe6325ae3ab5332b4657", size = 523467 }, + { url = "https://files.pythonhosted.org/packages/c4/59/c1bc2be32564fa499f988f0a5c6505c2f4746ef96e58e4d7de5cf923d77e/rpds_py-0.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d905d16f77eb6ab2e324e09bfa277b4c8e5e6b8a78a3e7ff8f3cdf773b4c013", size = 402660 }, + { url = "https://files.pythonhosted.org/packages/0a/ec/ef8bf895f0628dd0a59e54d81caed6891663cb9c54a0f4bb7da918cb88cf/rpds_py-0.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50c946f048209e6362e22576baea09193809f87687a95a8db24e5fbdb307b93a", size = 384062 }, + { url = "https://files.pythonhosted.org/packages/69/f7/f47ff154be8d9a5e691c083a920bba89cef88d5247c241c10b9898f595a1/rpds_py-0.27.1-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:3deab27804d65cd8289eb814c2c0e807c4b9d9916c9225e363cb0cf875eb67c1", size = 401289 }, + { url = "https://files.pythonhosted.org/packages/3b/d9/ca410363efd0615814ae579f6829cafb39225cd63e5ea5ed1404cb345293/rpds_py-0.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8b61097f7488de4be8244c89915da8ed212832ccf1e7c7753a25a394bf9b1f10", size = 417718 }, + { url = "https://files.pythonhosted.org/packages/e3/a0/8cb5c2ff38340f221cc067cc093d1270e10658ba4e8d263df923daa18e86/rpds_py-0.27.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8a3f29aba6e2d7d90528d3c792555a93497fe6538aa65eb675b44505be747808", size = 558333 }, + { url = "https://files.pythonhosted.org/packages/6f/8c/1b0de79177c5d5103843774ce12b84caa7164dfc6cd66378768d37db11bf/rpds_py-0.27.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:dd6cd0485b7d347304067153a6dc1d73f7d4fd995a396ef32a24d24b8ac63ac8", size = 589127 }, + { url = "https://files.pythonhosted.org/packages/c8/5e/26abb098d5e01266b0f3a2488d299d19ccc26849735d9d2b95c39397e945/rpds_py-0.27.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6f4461bf931108c9fa226ffb0e257c1b18dc2d44cd72b125bec50ee0ab1248a9", size = 554899 }, + { url = "https://files.pythonhosted.org/packages/de/41/905cc90ced13550db017f8f20c6d8e8470066c5738ba480d7ba63e3d136b/rpds_py-0.27.1-cp310-cp310-win32.whl", hash = "sha256:ee5422d7fb21f6a00c1901bf6559c49fee13a5159d0288320737bbf6585bd3e4", size = 217450 }, + { url = "https://files.pythonhosted.org/packages/75/3d/6bef47b0e253616ccdf67c283e25f2d16e18ccddd38f92af81d5a3420206/rpds_py-0.27.1-cp310-cp310-win_amd64.whl", hash = "sha256:3e039aabf6d5f83c745d5f9a0a381d031e9ed871967c0a5c38d201aca41f3ba1", size = 228447 }, + { url = "https://files.pythonhosted.org/packages/b5/c1/7907329fbef97cbd49db6f7303893bd1dd5a4a3eae415839ffdfb0762cae/rpds_py-0.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:be898f271f851f68b318872ce6ebebbc62f303b654e43bf72683dbdc25b7c881", size = 371063 }, + { url = "https://files.pythonhosted.org/packages/11/94/2aab4bc86228bcf7c48760990273653a4900de89c7537ffe1b0d6097ed39/rpds_py-0.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:62ac3d4e3e07b58ee0ddecd71d6ce3b1637de2d373501412df395a0ec5f9beb5", size = 353210 }, + { url = "https://files.pythonhosted.org/packages/3a/57/f5eb3ecf434342f4f1a46009530e93fd201a0b5b83379034ebdb1d7c1a58/rpds_py-0.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4708c5c0ceb2d034f9991623631d3d23cb16e65c83736ea020cdbe28d57c0a0e", size = 381636 }, + { url = "https://files.pythonhosted.org/packages/ae/f4/ef95c5945e2ceb5119571b184dd5a1cc4b8541bbdf67461998cfeac9cb1e/rpds_py-0.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:abfa1171a9952d2e0002aba2ad3780820b00cc3d9c98c6630f2e93271501f66c", size = 394341 }, + { url = "https://files.pythonhosted.org/packages/5a/7e/4bd610754bf492d398b61725eb9598ddd5eb86b07d7d9483dbcd810e20bc/rpds_py-0.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b507d19f817ebaca79574b16eb2ae412e5c0835542c93fe9983f1e432aca195", size = 523428 }, + { url = "https://files.pythonhosted.org/packages/9f/e5/059b9f65a8c9149361a8b75094864ab83b94718344db511fd6117936ed2a/rpds_py-0.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168b025f8fd8d8d10957405f3fdcef3dc20f5982d398f90851f4abc58c566c52", size = 402923 }, + { url = "https://files.pythonhosted.org/packages/f5/48/64cabb7daced2968dd08e8a1b7988bf358d7bd5bcd5dc89a652f4668543c/rpds_py-0.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb56c6210ef77caa58e16e8c17d35c63fe3f5b60fd9ba9d424470c3400bcf9ed", size = 384094 }, + { url = "https://files.pythonhosted.org/packages/ae/e1/dc9094d6ff566bff87add8a510c89b9e158ad2ecd97ee26e677da29a9e1b/rpds_py-0.27.1-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:d252f2d8ca0195faa707f8eb9368955760880b2b42a8ee16d382bf5dd807f89a", size = 401093 }, + { url = "https://files.pythonhosted.org/packages/37/8e/ac8577e3ecdd5593e283d46907d7011618994e1d7ab992711ae0f78b9937/rpds_py-0.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6e5e54da1e74b91dbc7996b56640f79b195d5925c2b78efaa8c5d53e1d88edde", size = 417969 }, + { url = "https://files.pythonhosted.org/packages/66/6d/87507430a8f74a93556fe55c6485ba9c259949a853ce407b1e23fea5ba31/rpds_py-0.27.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ffce0481cc6e95e5b3f0a47ee17ffbd234399e6d532f394c8dce320c3b089c21", size = 558302 }, + { url = "https://files.pythonhosted.org/packages/3a/bb/1db4781ce1dda3eecc735e3152659a27b90a02ca62bfeea17aee45cc0fbc/rpds_py-0.27.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a205fdfe55c90c2cd8e540ca9ceba65cbe6629b443bc05db1f590a3db8189ff9", size = 589259 }, + { url = "https://files.pythonhosted.org/packages/7b/0e/ae1c8943d11a814d01b482e1f8da903f88047a962dff9bbdadf3bd6e6fd1/rpds_py-0.27.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:689fb5200a749db0415b092972e8eba85847c23885c8543a8b0f5c009b1a5948", size = 554983 }, + { url = "https://files.pythonhosted.org/packages/b2/d5/0b2a55415931db4f112bdab072443ff76131b5ac4f4dc98d10d2d357eb03/rpds_py-0.27.1-cp311-cp311-win32.whl", hash = "sha256:3182af66048c00a075010bc7f4860f33913528a4b6fc09094a6e7598e462fe39", size = 217154 }, + { url = "https://files.pythonhosted.org/packages/24/75/3b7ffe0d50dc86a6a964af0d1cc3a4a2cdf437cb7b099a4747bbb96d1819/rpds_py-0.27.1-cp311-cp311-win_amd64.whl", hash = "sha256:b4938466c6b257b2f5c4ff98acd8128ec36b5059e5c8f8372d79316b1c36bb15", size = 228627 }, + { url = "https://files.pythonhosted.org/packages/8d/3f/4fd04c32abc02c710f09a72a30c9a55ea3cc154ef8099078fd50a0596f8e/rpds_py-0.27.1-cp311-cp311-win_arm64.whl", hash = "sha256:2f57af9b4d0793e53266ee4325535a31ba48e2f875da81a9177c9926dfa60746", size = 220998 }, + { url = "https://files.pythonhosted.org/packages/bd/fe/38de28dee5df58b8198c743fe2bea0c785c6d40941b9950bac4cdb71a014/rpds_py-0.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ae2775c1973e3c30316892737b91f9283f9908e3cc7625b9331271eaaed7dc90", size = 361887 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/4b6c7eedc7dd90986bf0fab6ea2a091ec11c01b15f8ba0a14d3f80450468/rpds_py-0.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2643400120f55c8a96f7c9d858f7be0c88d383cd4653ae2cf0d0c88f668073e5", size = 345795 }, + { url = "https://files.pythonhosted.org/packages/6f/0e/e650e1b81922847a09cca820237b0edee69416a01268b7754d506ade11ad/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16323f674c089b0360674a4abd28d5042947d54ba620f72514d69be4ff64845e", size = 385121 }, + { url = "https://files.pythonhosted.org/packages/1b/ea/b306067a712988e2bff00dcc7c8f31d26c29b6d5931b461aa4b60a013e33/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a1f4814b65eacac94a00fc9a526e3fdafd78e439469644032032d0d63de4881", size = 398976 }, + { url = "https://files.pythonhosted.org/packages/2c/0a/26dc43c8840cb8fe239fe12dbc8d8de40f2365e838f3d395835dde72f0e5/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ba32c16b064267b22f1850a34051121d423b6f7338a12b9459550eb2096e7ec", size = 525953 }, + { url = "https://files.pythonhosted.org/packages/22/14/c85e8127b573aaf3a0cbd7fbb8c9c99e735a4a02180c84da2a463b766e9e/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5c20f33fd10485b80f65e800bbe5f6785af510b9f4056c5a3c612ebc83ba6cb", size = 407915 }, + { url = "https://files.pythonhosted.org/packages/ed/7b/8f4fee9ba1fb5ec856eb22d725a4efa3deb47f769597c809e03578b0f9d9/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:466bfe65bd932da36ff279ddd92de56b042f2266d752719beb97b08526268ec5", size = 386883 }, + { url = "https://files.pythonhosted.org/packages/86/47/28fa6d60f8b74fcdceba81b272f8d9836ac0340570f68f5df6b41838547b/rpds_py-0.27.1-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:41e532bbdcb57c92ba3be62c42e9f096431b4cf478da9bc3bc6ce5c38ab7ba7a", size = 405699 }, + { url = "https://files.pythonhosted.org/packages/d0/fd/c5987b5e054548df56953a21fe2ebed51fc1ec7c8f24fd41c067b68c4a0a/rpds_py-0.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f149826d742b406579466283769a8ea448eed82a789af0ed17b0cd5770433444", size = 423713 }, + { url = "https://files.pythonhosted.org/packages/ac/ba/3c4978b54a73ed19a7d74531be37a8bcc542d917c770e14d372b8daea186/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:80c60cfb5310677bd67cb1e85a1e8eb52e12529545441b43e6f14d90b878775a", size = 562324 }, + { url = "https://files.pythonhosted.org/packages/b5/6c/6943a91768fec16db09a42b08644b960cff540c66aab89b74be6d4a144ba/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7ee6521b9baf06085f62ba9c7a3e5becffbc32480d2f1b351559c001c38ce4c1", size = 593646 }, + { url = "https://files.pythonhosted.org/packages/11/73/9d7a8f4be5f4396f011a6bb7a19fe26303a0dac9064462f5651ced2f572f/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a512c8263249a9d68cac08b05dd59d2b3f2061d99b322813cbcc14c3c7421998", size = 558137 }, + { url = "https://files.pythonhosted.org/packages/6e/96/6772cbfa0e2485bcceef8071de7821f81aeac8bb45fbfd5542a3e8108165/rpds_py-0.27.1-cp312-cp312-win32.whl", hash = "sha256:819064fa048ba01b6dadc5116f3ac48610435ac9a0058bbde98e569f9e785c39", size = 221343 }, + { url = "https://files.pythonhosted.org/packages/67/b6/c82f0faa9af1c6a64669f73a17ee0eeef25aff30bb9a1c318509efe45d84/rpds_py-0.27.1-cp312-cp312-win_amd64.whl", hash = "sha256:d9199717881f13c32c4046a15f024971a3b78ad4ea029e8da6b86e5aa9cf4594", size = 232497 }, + { url = "https://files.pythonhosted.org/packages/e1/96/2817b44bd2ed11aebacc9251da03689d56109b9aba5e311297b6902136e2/rpds_py-0.27.1-cp312-cp312-win_arm64.whl", hash = "sha256:33aa65b97826a0e885ef6e278fbd934e98cdcfed80b63946025f01e2f5b29502", size = 222790 }, + { url = "https://files.pythonhosted.org/packages/cc/77/610aeee8d41e39080c7e14afa5387138e3c9fa9756ab893d09d99e7d8e98/rpds_py-0.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e4b9fcfbc021633863a37e92571d6f91851fa656f0180246e84cbd8b3f6b329b", size = 361741 }, + { url = "https://files.pythonhosted.org/packages/3a/fc/c43765f201c6a1c60be2043cbdb664013def52460a4c7adace89d6682bf4/rpds_py-0.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1441811a96eadca93c517d08df75de45e5ffe68aa3089924f963c782c4b898cf", size = 345574 }, + { url = "https://files.pythonhosted.org/packages/20/42/ee2b2ca114294cd9847d0ef9c26d2b0851b2e7e00bf14cc4c0b581df0fc3/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55266dafa22e672f5a4f65019015f90336ed31c6383bd53f5e7826d21a0e0b83", size = 385051 }, + { url = "https://files.pythonhosted.org/packages/fd/e8/1e430fe311e4799e02e2d1af7c765f024e95e17d651612425b226705f910/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d78827d7ac08627ea2c8e02c9e5b41180ea5ea1f747e9db0915e3adf36b62dcf", size = 398395 }, + { url = "https://files.pythonhosted.org/packages/82/95/9dc227d441ff2670651c27a739acb2535ccaf8b351a88d78c088965e5996/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae92443798a40a92dc5f0b01d8a7c93adde0c4dc965310a29ae7c64d72b9fad2", size = 524334 }, + { url = "https://files.pythonhosted.org/packages/87/01/a670c232f401d9ad461d9a332aa4080cd3cb1d1df18213dbd0d2a6a7ab51/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c46c9dd2403b66a2a3b9720ec4b74d4ab49d4fabf9f03dfdce2d42af913fe8d0", size = 407691 }, + { url = "https://files.pythonhosted.org/packages/03/36/0a14aebbaa26fe7fab4780c76f2239e76cc95a0090bdb25e31d95c492fcd/rpds_py-0.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2efe4eb1d01b7f5f1939f4ef30ecea6c6b3521eec451fb93191bf84b2a522418", size = 386868 }, + { url = "https://files.pythonhosted.org/packages/3b/03/8c897fb8b5347ff6c1cc31239b9611c5bf79d78c984430887a353e1409a1/rpds_py-0.27.1-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:15d3b4d83582d10c601f481eca29c3f138d44c92187d197aff663a269197c02d", size = 405469 }, + { url = "https://files.pythonhosted.org/packages/da/07/88c60edc2df74850d496d78a1fdcdc7b54360a7f610a4d50008309d41b94/rpds_py-0.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4ed2e16abbc982a169d30d1a420274a709949e2cbdef119fe2ec9d870b42f274", size = 422125 }, + { url = "https://files.pythonhosted.org/packages/6b/86/5f4c707603e41b05f191a749984f390dabcbc467cf833769b47bf14ba04f/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a75f305c9b013289121ec0f1181931975df78738cdf650093e6b86d74aa7d8dd", size = 562341 }, + { url = "https://files.pythonhosted.org/packages/b2/92/3c0cb2492094e3cd9baf9e49bbb7befeceb584ea0c1a8b5939dca4da12e5/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:67ce7620704745881a3d4b0ada80ab4d99df390838839921f99e63c474f82cf2", size = 592511 }, + { url = "https://files.pythonhosted.org/packages/10/bb/82e64fbb0047c46a168faa28d0d45a7851cd0582f850b966811d30f67ad8/rpds_py-0.27.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d992ac10eb86d9b6f369647b6a3f412fc0075cfd5d799530e84d335e440a002", size = 557736 }, + { url = "https://files.pythonhosted.org/packages/00/95/3c863973d409210da7fb41958172c6b7dbe7fc34e04d3cc1f10bb85e979f/rpds_py-0.27.1-cp313-cp313-win32.whl", hash = "sha256:4f75e4bd8ab8db624e02c8e2fc4063021b58becdbe6df793a8111d9343aec1e3", size = 221462 }, + { url = "https://files.pythonhosted.org/packages/ce/2c/5867b14a81dc217b56d95a9f2a40fdbc56a1ab0181b80132beeecbd4b2d6/rpds_py-0.27.1-cp313-cp313-win_amd64.whl", hash = "sha256:f9025faafc62ed0b75a53e541895ca272815bec18abe2249ff6501c8f2e12b83", size = 232034 }, + { url = "https://files.pythonhosted.org/packages/c7/78/3958f3f018c01923823f1e47f1cc338e398814b92d83cd278364446fac66/rpds_py-0.27.1-cp313-cp313-win_arm64.whl", hash = "sha256:ed10dc32829e7d222b7d3b93136d25a406ba9788f6a7ebf6809092da1f4d279d", size = 222392 }, + { url = "https://files.pythonhosted.org/packages/01/76/1cdf1f91aed5c3a7bf2eba1f1c4e4d6f57832d73003919a20118870ea659/rpds_py-0.27.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:92022bbbad0d4426e616815b16bc4127f83c9a74940e1ccf3cfe0b387aba0228", size = 358355 }, + { url = "https://files.pythonhosted.org/packages/c3/6f/bf142541229374287604caf3bb2a4ae17f0a580798fd72d3b009b532db4e/rpds_py-0.27.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:47162fdab9407ec3f160805ac3e154df042e577dd53341745fc7fb3f625e6d92", size = 342138 }, + { url = "https://files.pythonhosted.org/packages/1a/77/355b1c041d6be40886c44ff5e798b4e2769e497b790f0f7fd1e78d17e9a8/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb89bec23fddc489e5d78b550a7b773557c9ab58b7946154a10a6f7a214a48b2", size = 380247 }, + { url = "https://files.pythonhosted.org/packages/d6/a4/d9cef5c3946ea271ce2243c51481971cd6e34f21925af2783dd17b26e815/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e48af21883ded2b3e9eb48cb7880ad8598b31ab752ff3be6457001d78f416723", size = 390699 }, + { url = "https://files.pythonhosted.org/packages/3a/06/005106a7b8c6c1a7e91b73169e49870f4af5256119d34a361ae5240a0c1d/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f5b7bd8e219ed50299e58551a410b64daafb5017d54bbe822e003856f06a802", size = 521852 }, + { url = "https://files.pythonhosted.org/packages/e5/3e/50fb1dac0948e17a02eb05c24510a8fe12d5ce8561c6b7b7d1339ab7ab9c/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08f1e20bccf73b08d12d804d6e1c22ca5530e71659e6673bce31a6bb71c1e73f", size = 402582 }, + { url = "https://files.pythonhosted.org/packages/cb/b0/f4e224090dc5b0ec15f31a02d746ab24101dd430847c4d99123798661bfc/rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dc5dceeaefcc96dc192e3a80bbe1d6c410c469e97bdd47494a7d930987f18b2", size = 384126 }, + { url = "https://files.pythonhosted.org/packages/54/77/ac339d5f82b6afff1df8f0fe0d2145cc827992cb5f8eeb90fc9f31ef7a63/rpds_py-0.27.1-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d76f9cc8665acdc0c9177043746775aa7babbf479b5520b78ae4002d889f5c21", size = 399486 }, + { url = "https://files.pythonhosted.org/packages/d6/29/3e1c255eee6ac358c056a57d6d6869baa00a62fa32eea5ee0632039c50a3/rpds_py-0.27.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:134fae0e36022edad8290a6661edf40c023562964efea0cc0ec7f5d392d2aaef", size = 414832 }, + { url = "https://files.pythonhosted.org/packages/3f/db/6d498b844342deb3fa1d030598db93937a9964fcf5cb4da4feb5f17be34b/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb11a4f1b2b63337cfd3b4d110af778a59aae51c81d195768e353d8b52f88081", size = 557249 }, + { url = "https://files.pythonhosted.org/packages/60/f3/690dd38e2310b6f68858a331399b4d6dbb9132c3e8ef8b4333b96caf403d/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:13e608ac9f50a0ed4faec0e90ece76ae33b34c0e8656e3dceb9a7db994c692cd", size = 587356 }, + { url = "https://files.pythonhosted.org/packages/86/e3/84507781cccd0145f35b1dc32c72675200c5ce8d5b30f813e49424ef68fc/rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dd2135527aa40f061350c3f8f89da2644de26cd73e4de458e79606384f4f68e7", size = 555300 }, + { url = "https://files.pythonhosted.org/packages/e5/ee/375469849e6b429b3516206b4580a79e9ef3eb12920ddbd4492b56eaacbe/rpds_py-0.27.1-cp313-cp313t-win32.whl", hash = "sha256:3020724ade63fe320a972e2ffd93b5623227e684315adce194941167fee02688", size = 216714 }, + { url = "https://files.pythonhosted.org/packages/21/87/3fc94e47c9bd0742660e84706c311a860dcae4374cf4a03c477e23ce605a/rpds_py-0.27.1-cp313-cp313t-win_amd64.whl", hash = "sha256:8ee50c3e41739886606388ba3ab3ee2aae9f35fb23f833091833255a31740797", size = 228943 }, + { url = "https://files.pythonhosted.org/packages/70/36/b6e6066520a07cf029d385de869729a895917b411e777ab1cde878100a1d/rpds_py-0.27.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:acb9aafccaae278f449d9c713b64a9e68662e7799dbd5859e2c6b3c67b56d334", size = 362472 }, + { url = "https://files.pythonhosted.org/packages/af/07/b4646032e0dcec0df9c73a3bd52f63bc6c5f9cda992f06bd0e73fe3fbebd/rpds_py-0.27.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b7fb801aa7f845ddf601c49630deeeccde7ce10065561d92729bfe81bd21fb33", size = 345676 }, + { url = "https://files.pythonhosted.org/packages/b0/16/2f1003ee5d0af4bcb13c0cf894957984c32a6751ed7206db2aee7379a55e/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0dd05afb46597b9a2e11c351e5e4283c741237e7f617ffb3252780cca9336a", size = 385313 }, + { url = "https://files.pythonhosted.org/packages/05/cd/7eb6dd7b232e7f2654d03fa07f1414d7dfc980e82ba71e40a7c46fd95484/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b6dfb0e058adb12d8b1d1b25f686e94ffa65d9995a5157afe99743bf7369d62b", size = 399080 }, + { url = "https://files.pythonhosted.org/packages/20/51/5829afd5000ec1cb60f304711f02572d619040aa3ec033d8226817d1e571/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed090ccd235f6fa8bb5861684567f0a83e04f52dfc2e5c05f2e4b1309fcf85e7", size = 523868 }, + { url = "https://files.pythonhosted.org/packages/05/2c/30eebca20d5db95720ab4d2faec1b5e4c1025c473f703738c371241476a2/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf876e79763eecf3e7356f157540d6a093cef395b65514f17a356f62af6cc136", size = 408750 }, + { url = "https://files.pythonhosted.org/packages/90/1a/cdb5083f043597c4d4276eae4e4c70c55ab5accec078da8611f24575a367/rpds_py-0.27.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12ed005216a51b1d6e2b02a7bd31885fe317e45897de81d86dcce7d74618ffff", size = 387688 }, + { url = "https://files.pythonhosted.org/packages/7c/92/cf786a15320e173f945d205ab31585cc43969743bb1a48b6888f7a2b0a2d/rpds_py-0.27.1-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:ee4308f409a40e50593c7e3bb8cbe0b4d4c66d1674a316324f0c2f5383b486f9", size = 407225 }, + { url = "https://files.pythonhosted.org/packages/33/5c/85ee16df5b65063ef26017bef33096557a4c83fbe56218ac7cd8c235f16d/rpds_py-0.27.1-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b08d152555acf1f455154d498ca855618c1378ec810646fcd7c76416ac6dc60", size = 423361 }, + { url = "https://files.pythonhosted.org/packages/4b/8e/1c2741307fcabd1a334ecf008e92c4f47bb6f848712cf15c923becfe82bb/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:dce51c828941973a5684d458214d3a36fcd28da3e1875d659388f4f9f12cc33e", size = 562493 }, + { url = "https://files.pythonhosted.org/packages/04/03/5159321baae9b2222442a70c1f988cbbd66b9be0675dd3936461269be360/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:c1476d6f29eb81aa4151c9a31219b03f1f798dc43d8af1250a870735516a1212", size = 592623 }, + { url = "https://files.pythonhosted.org/packages/ff/39/c09fd1ad28b85bc1d4554a8710233c9f4cefd03d7717a1b8fbfd171d1167/rpds_py-0.27.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:3ce0cac322b0d69b63c9cdb895ee1b65805ec9ffad37639f291dd79467bee675", size = 558800 }, + { url = "https://files.pythonhosted.org/packages/c5/d6/99228e6bbcf4baa764b18258f519a9035131d91b538d4e0e294313462a98/rpds_py-0.27.1-cp314-cp314-win32.whl", hash = "sha256:dfbfac137d2a3d0725758cd141f878bf4329ba25e34979797c89474a89a8a3a3", size = 221943 }, + { url = "https://files.pythonhosted.org/packages/be/07/c802bc6b8e95be83b79bdf23d1aa61d68324cb1006e245d6c58e959e314d/rpds_py-0.27.1-cp314-cp314-win_amd64.whl", hash = "sha256:a6e57b0abfe7cc513450fcf529eb486b6e4d3f8aee83e92eb5f1ef848218d456", size = 233739 }, + { url = "https://files.pythonhosted.org/packages/c8/89/3e1b1c16d4c2d547c5717377a8df99aee8099ff050f87c45cb4d5fa70891/rpds_py-0.27.1-cp314-cp314-win_arm64.whl", hash = "sha256:faf8d146f3d476abfee026c4ae3bdd9ca14236ae4e4c310cbd1cf75ba33d24a3", size = 223120 }, + { url = "https://files.pythonhosted.org/packages/62/7e/dc7931dc2fa4a6e46b2a4fa744a9fe5c548efd70e0ba74f40b39fa4a8c10/rpds_py-0.27.1-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:ba81d2b56b6d4911ce735aad0a1d4495e808b8ee4dc58715998741a26874e7c2", size = 358944 }, + { url = "https://files.pythonhosted.org/packages/e6/22/4af76ac4e9f336bfb1a5f240d18a33c6b2fcaadb7472ac7680576512b49a/rpds_py-0.27.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:84f7d509870098de0e864cad0102711c1e24e9b1a50ee713b65928adb22269e4", size = 342283 }, + { url = "https://files.pythonhosted.org/packages/1c/15/2a7c619b3c2272ea9feb9ade67a45c40b3eeb500d503ad4c28c395dc51b4/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e960fc78fecd1100539f14132425e1d5fe44ecb9239f8f27f079962021523e", size = 380320 }, + { url = "https://files.pythonhosted.org/packages/a2/7d/4c6d243ba4a3057e994bb5bedd01b5c963c12fe38dde707a52acdb3849e7/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:62f85b665cedab1a503747617393573995dac4600ff51869d69ad2f39eb5e817", size = 391760 }, + { url = "https://files.pythonhosted.org/packages/b4/71/b19401a909b83bcd67f90221330bc1ef11bc486fe4e04c24388d28a618ae/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fed467af29776f6556250c9ed85ea5a4dd121ab56a5f8b206e3e7a4c551e48ec", size = 522476 }, + { url = "https://files.pythonhosted.org/packages/e4/44/1a3b9715c0455d2e2f0f6df5ee6d6f5afdc423d0773a8a682ed2b43c566c/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2729615f9d430af0ae6b36cf042cb55c0936408d543fb691e1a9e36648fd35a", size = 403418 }, + { url = "https://files.pythonhosted.org/packages/1c/4b/fb6c4f14984eb56673bc868a66536f53417ddb13ed44b391998100a06a96/rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b207d881a9aef7ba753d69c123a35d96ca7cb808056998f6b9e8747321f03b8", size = 384771 }, + { url = "https://files.pythonhosted.org/packages/c0/56/d5265d2d28b7420d7b4d4d85cad8ef891760f5135102e60d5c970b976e41/rpds_py-0.27.1-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:639fd5efec029f99b79ae47e5d7e00ad8a773da899b6309f6786ecaf22948c48", size = 400022 }, + { url = "https://files.pythonhosted.org/packages/8f/e9/9f5fc70164a569bdd6ed9046486c3568d6926e3a49bdefeeccfb18655875/rpds_py-0.27.1-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fecc80cb2a90e28af8a9b366edacf33d7a91cbfe4c2c4544ea1246e949cfebeb", size = 416787 }, + { url = "https://files.pythonhosted.org/packages/d4/64/56dd03430ba491db943a81dcdef115a985aac5f44f565cd39a00c766d45c/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42a89282d711711d0a62d6f57d81aa43a1368686c45bc1c46b7f079d55692734", size = 557538 }, + { url = "https://files.pythonhosted.org/packages/3f/36/92cc885a3129993b1d963a2a42ecf64e6a8e129d2c7cc980dbeba84e55fb/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:cf9931f14223de59551ab9d38ed18d92f14f055a5f78c1d8ad6493f735021bbb", size = 588512 }, + { url = "https://files.pythonhosted.org/packages/dd/10/6b283707780a81919f71625351182b4f98932ac89a09023cb61865136244/rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f39f58a27cc6e59f432b568ed8429c7e1641324fbe38131de852cd77b2d534b0", size = 555813 }, + { url = "https://files.pythonhosted.org/packages/04/2e/30b5ea18c01379da6272a92825dd7e53dc9d15c88a19e97932d35d430ef7/rpds_py-0.27.1-cp314-cp314t-win32.whl", hash = "sha256:d5fa0ee122dc09e23607a28e6d7b150da16c662e66409bbe85230e4c85bb528a", size = 217385 }, + { url = "https://files.pythonhosted.org/packages/32/7d/97119da51cb1dd3f2f3c0805f155a3aa4a95fa44fe7d78ae15e69edf4f34/rpds_py-0.27.1-cp314-cp314t-win_amd64.whl", hash = "sha256:6567d2bb951e21232c2f660c24cf3470bb96de56cdcb3f071a83feeaff8a2772", size = 230097 }, + { url = "https://files.pythonhosted.org/packages/d5/63/b7cc415c345625d5e62f694ea356c58fb964861409008118f1245f8c3347/rpds_py-0.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7ba22cb9693df986033b91ae1d7a979bc399237d45fccf875b76f62bb9e52ddf", size = 371360 }, + { url = "https://files.pythonhosted.org/packages/e5/8c/12e1b24b560cf378b8ffbdb9dc73abd529e1adcfcf82727dfd29c4a7b88d/rpds_py-0.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b640501be9288c77738b5492b3fd3abc4ba95c50c2e41273c8a1459f08298d3", size = 353933 }, + { url = "https://files.pythonhosted.org/packages/9b/85/1bb2210c1f7a1b99e91fea486b9f0f894aa5da3a5ec7097cbad7dec6d40f/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb08b65b93e0c6dd70aac7f7890a9c0938d5ec71d5cb32d45cf844fb8ae47636", size = 382962 }, + { url = "https://files.pythonhosted.org/packages/cc/c9/a839b9f219cf80ed65f27a7f5ddbb2809c1b85c966020ae2dff490e0b18e/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d7ff07d696a7a38152ebdb8212ca9e5baab56656749f3d6004b34ab726b550b8", size = 394412 }, + { url = "https://files.pythonhosted.org/packages/02/2d/b1d7f928b0b1f4fc2e0133e8051d199b01d7384875adc63b6ddadf3de7e5/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb7c72262deae25366e3b6c0c0ba46007967aea15d1eea746e44ddba8ec58dcc", size = 523972 }, + { url = "https://files.pythonhosted.org/packages/a9/af/2cbf56edd2d07716df1aec8a726b3159deb47cb5c27e1e42b71d705a7c2f/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b002cab05d6339716b03a4a3a2ce26737f6231d7b523f339fa061d53368c9d8", size = 403273 }, + { url = "https://files.pythonhosted.org/packages/c0/93/425e32200158d44ff01da5d9612c3b6711fe69f606f06e3895511f17473b/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23f6b69d1c26c4704fec01311963a41d7de3ee0570a84ebde4d544e5a1859ffc", size = 385278 }, + { url = "https://files.pythonhosted.org/packages/eb/1a/1a04a915ecd0551bfa9e77b7672d1937b4b72a0fc204a17deef76001cfb2/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:530064db9146b247351f2a0250b8f00b289accea4596a033e94be2389977de71", size = 402084 }, + { url = "https://files.pythonhosted.org/packages/51/f7/66585c0fe5714368b62951d2513b684e5215beaceab2c6629549ddb15036/rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7b90b0496570bd6b0321724a330d8b545827c4df2034b6ddfc5f5275f55da2ad", size = 419041 }, + { url = "https://files.pythonhosted.org/packages/8e/7e/83a508f6b8e219bba2d4af077c35ba0e0cdd35a751a3be6a7cba5a55ad71/rpds_py-0.27.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:879b0e14a2da6a1102a3fc8af580fc1ead37e6d6692a781bd8c83da37429b5ab", size = 560084 }, + { url = "https://files.pythonhosted.org/packages/66/66/bb945683b958a1b19eb0fe715594630d0f36396ebdef4d9b89c2fa09aa56/rpds_py-0.27.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:0d807710df3b5faa66c731afa162ea29717ab3be17bdc15f90f2d9f183da4059", size = 590115 }, + { url = "https://files.pythonhosted.org/packages/12/00/ccfaafaf7db7e7adace915e5c2f2c2410e16402561801e9c7f96683002d3/rpds_py-0.27.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:3adc388fc3afb6540aec081fa59e6e0d3908722771aa1e37ffe22b220a436f0b", size = 556561 }, + { url = "https://files.pythonhosted.org/packages/e1/b7/92b6ed9aad103bfe1c45df98453dfae40969eef2cb6c6239c58d7e96f1b3/rpds_py-0.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c796c0c1cc68cb08b0284db4229f5af76168172670c74908fdbd4b7d7f515819", size = 229125 }, + { url = "https://files.pythonhosted.org/packages/0c/ed/e1fba02de17f4f76318b834425257c8ea297e415e12c68b4361f63e8ae92/rpds_py-0.27.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cdfe4bb2f9fe7458b7453ad3c33e726d6d1c7c0a72960bcc23800d77384e42df", size = 371402 }, + { url = "https://files.pythonhosted.org/packages/af/7c/e16b959b316048b55585a697e94add55a4ae0d984434d279ea83442e460d/rpds_py-0.27.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:8fabb8fd848a5f75a2324e4a84501ee3a5e3c78d8603f83475441866e60b94a3", size = 354084 }, + { url = "https://files.pythonhosted.org/packages/de/c1/ade645f55de76799fdd08682d51ae6724cb46f318573f18be49b1e040428/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda8719d598f2f7f3e0f885cba8646644b55a187762bec091fa14a2b819746a9", size = 383090 }, + { url = "https://files.pythonhosted.org/packages/1f/27/89070ca9b856e52960da1472efcb6c20ba27cfe902f4f23ed095b9cfc61d/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c64d07e95606ec402a0a1c511fe003873fa6af630bda59bac77fac8b4318ebc", size = 394519 }, + { url = "https://files.pythonhosted.org/packages/b3/28/be120586874ef906aa5aeeae95ae8df4184bc757e5b6bd1c729ccff45ed5/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93a2ed40de81bcff59aabebb626562d48332f3d028ca2036f1d23cbb52750be4", size = 523817 }, + { url = "https://files.pythonhosted.org/packages/a8/ef/70cc197bc11cfcde02a86f36ac1eed15c56667c2ebddbdb76a47e90306da/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:387ce8c44ae94e0ec50532d9cb0edce17311024c9794eb196b90e1058aadeb66", size = 403240 }, + { url = "https://files.pythonhosted.org/packages/cf/35/46936cca449f7f518f2f4996e0e8344db4b57e2081e752441154089d2a5f/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf94f812c95b5e60ebaf8bfb1898a7d7cb9c1af5744d4a67fa47796e0465d4e", size = 385194 }, + { url = "https://files.pythonhosted.org/packages/e1/62/29c0d3e5125c3270b51415af7cbff1ec587379c84f55a5761cc9efa8cd06/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:4848ca84d6ded9b58e474dfdbad4b8bfb450344c0551ddc8d958bf4b36aa837c", size = 402086 }, + { url = "https://files.pythonhosted.org/packages/8f/66/03e1087679227785474466fdd04157fb793b3b76e3fcf01cbf4c693c1949/rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2bde09cbcf2248b73c7c323be49b280180ff39fadcfe04e7b6f54a678d02a7cf", size = 419272 }, + { url = "https://files.pythonhosted.org/packages/6a/24/e3e72d265121e00b063aef3e3501e5b2473cf1b23511d56e529531acf01e/rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:94c44ee01fd21c9058f124d2d4f0c9dc7634bec93cd4b38eefc385dabe71acbf", size = 560003 }, + { url = "https://files.pythonhosted.org/packages/26/ca/f5a344c534214cc2d41118c0699fffbdc2c1bc7046f2a2b9609765ab9c92/rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:df8b74962e35c9249425d90144e721eed198e6555a0e22a563d29fe4486b51f6", size = 590482 }, + { url = "https://files.pythonhosted.org/packages/ce/08/4349bdd5c64d9d193c360aa9db89adeee6f6682ab8825dca0a3f535f434f/rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:dc23e6820e3b40847e2f4a7726462ba0cf53089512abe9ee16318c366494c17a", size = 556523 }, +] + +[[package]] +name = "tomli" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077 }, + { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429 }, + { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067 }, + { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030 }, + { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898 }, + { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894 }, + { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319 }, + { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273 }, + { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310 }, + { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309 }, + { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762 }, + { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453 }, + { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486 }, + { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349 }, + { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159 }, + { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243 }, + { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645 }, + { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584 }, + { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875 }, + { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418 }, + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708 }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582 }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543 }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691 }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170 }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530 }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666 }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954 }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724 }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383 }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257 }, +] + +[[package]] +name = "tqdm" +version = "4.67.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "platform_system == 'Windows'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540 }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614 }, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795 }, +] diff --git a/arch/validate_arch_config.sh b/arch/validate_arch_config.sh index 493d1b2f..f253bf3a 100644 --- a/arch/validate_arch_config.sh +++ b/arch/validate_arch_config.sh @@ -5,7 +5,7 @@ failed_files=() for file in $(find . -name arch_config.yaml -o -name arch_config_full_reference.yaml); do echo "Validating ${file}..." touch $(pwd)/${file}_rendered - if ! docker run --rm -v "$(pwd)/${file}:/app/arch_config.yaml:ro" -v "$(pwd)/${file}_rendered:/app/arch_config_rendered.yaml:rw" --entrypoint /bin/sh katanemo/archgw:latest -c "python config_generator.py" 2>&1 > /dev/null ; then + if ! docker run --rm -v "$(pwd)/${file}:/app/arch_config.yaml:ro" -v "$(pwd)/${file}_rendered:/app/arch_config_rendered.yaml:rw" --entrypoint /bin/sh katanemo/archgw:latest -c "python -m cli.config_generator" 2>&1 > /dev/null ; then echo "Validation failed for $file" failed_files+=("$file") fi diff --git a/crates/Cargo.lock b/crates/Cargo.lock index db033142..d313aa5b 100644 --- a/crates/Cargo.lock +++ b/crates/Cargo.lock @@ -68,6 +68,16 @@ version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +[[package]] +name = "assert-json-diff" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "async-trait" version = "0.1.88" @@ -163,6 +173,7 @@ dependencies = [ "http-body-util", "hyper 1.6.0", "hyper-util", + "mockito", "opentelemetry", "opentelemetry-http", "opentelemetry-otlp", @@ -234,6 +245,15 @@ dependencies = [ "windows-link", ] +[[package]] +name = "colored" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fde0e0ec90c9dfb3b4b1a0891a7dcd0e2bffde2f7efed5fe7c9bb00e5bfb915e" +dependencies = [ + "windows-sys 0.59.0", +] + [[package]] name = "common" version = "0.1.0" @@ -1257,6 +1277,30 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "mockito" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7760e0e418d9b7e5777c0374009ca4c93861b9066f18cb334a20ce50ab63aa48" +dependencies = [ + "assert-json-diff", + "bytes", + "colored", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "hyper 1.6.0", + "hyper-util", + "log", + "rand 0.9.1", + "regex", + "serde_json", + "serde_urlencoded", + "similar", + "tokio", +] + [[package]] name = "native-tls" version = "0.2.14" @@ -2193,6 +2237,12 @@ dependencies = [ "libc", ] +[[package]] +name = "similar" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" + [[package]] name = "slab" version = "0.4.9" diff --git a/crates/brightstaff/Cargo.toml b/crates/brightstaff/Cargo.toml index 5cea1327..d424b0e6 100644 --- a/crates/brightstaff/Cargo.toml +++ b/crates/brightstaff/Cargo.toml @@ -28,8 +28,14 @@ serde_with = "3.13.0" serde_yaml = "0.9.34" thiserror = "2.0.12" tokio = { version = "1.44.2", features = ["full"] } -tokio-stream = "0.1.17" +tokio-stream = "0.1" time = { version = "0.3", features = ["formatting", "macros"] } +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } + +[dev-dependencies] +mockito = "1.0" +tokio-stream = "0.1.17" tracing = "0.1.41" tracing-opentelemetry = "0.30.0" tracing-subscriber = { version = "0.3.19", features = ["env-filter", "fmt", "time"] } diff --git a/crates/brightstaff/src/handlers/agent_chat_completions.rs b/crates/brightstaff/src/handlers/agent_chat_completions.rs new file mode 100644 index 00000000..a1a00f88 --- /dev/null +++ b/crates/brightstaff/src/handlers/agent_chat_completions.rs @@ -0,0 +1,172 @@ +use std::sync::Arc; + +use bytes::Bytes; +use hermesllm::apis::openai::ChatCompletionsRequest; +use http_body_util::combinators::BoxBody; +use http_body_util::BodyExt; +use hyper::{Request, Response}; +use tracing::{debug, info, warn}; + +use super::agent_selector::{AgentSelectionError, AgentSelector}; +use super::pipeline_processor::{PipelineError, PipelineProcessor}; +use super::response_handler::ResponseHandler; +use crate::router::llm_router::RouterService; + +/// Main errors for agent chat completions +#[derive(Debug, thiserror::Error)] +pub enum AgentFilterChainError { + #[error("Agent selection error: {0}")] + Selection(#[from] AgentSelectionError), + #[error("Pipeline processing error: {0}")] + Pipeline(#[from] PipelineError), + #[error("Response handling error: {0}")] + Response(#[from] super::response_handler::ResponseError), + #[error("Request parsing error: {0}")] + RequestParsing(#[from] serde_json::Error), + #[error("HTTP error: {0}")] + Http(#[from] hyper::Error), +} + +pub async fn agent_chat( + request: Request, + router_service: Arc, + _: String, + agents_list: Arc>>>, + listeners: Arc>>, +) -> Result>, hyper::Error> { + match handle_agent_chat(request, router_service, agents_list, listeners).await { + Ok(response) => Ok(response), + Err(err) => { + // Print detailed error information with full error chain + let mut error_chain = Vec::new(); + let mut current_error: &dyn std::error::Error = &err; + + // Collect the full error chain + loop { + error_chain.push(current_error.to_string()); + match current_error.source() { + Some(source) => current_error = source, + None => break, + } + } + + // Log the complete error chain + warn!("Agent chat error chain: {:#?}", error_chain); + warn!("Root error: {:?}", err); + + // Create structured error response as JSON + let error_json = serde_json::json!({ + "error": { + "type": "AgentFilterChainError", + "message": err.to_string(), + "error_chain": error_chain, + "debug_info": format!("{:?}", err) + } + }); + + // Log the error for debugging + info!("Structured error info: {}", error_json); + + // Return JSON error response + Ok(ResponseHandler::create_json_error_response(&error_json)) + } + } +} + +async fn handle_agent_chat( + request: Request, + router_service: Arc, + agents_list: Arc>>>, + listeners: Arc>>, +) -> Result>, AgentFilterChainError> { + // Initialize services + let agent_selector = AgentSelector::new(router_service); + let pipeline_processor = PipelineProcessor::default(); + let response_handler = ResponseHandler::new(); + + // Extract listener name from headers + let listener_name = request + .headers() + .get("x-arch-agent-listener-name") + .and_then(|name| name.to_str().ok()); + + // Find the appropriate listener + let listener = { + let listeners = listeners.read().await; + agent_selector + .find_listener(listener_name, &listeners) + .await? + }; + + info!("Handling request for listener: {}", listener.name); + + // Parse request body + let request_headers = request.headers().clone(); + let chat_request_bytes = request.collect().await?.to_bytes(); + + debug!( + "Received request body (raw utf8): {}", + String::from_utf8_lossy(&chat_request_bytes) + ); + + let chat_completions_request: ChatCompletionsRequest = + serde_json::from_slice(&chat_request_bytes).map_err(|err| { + warn!( + "Failed to parse request body as ChatCompletionsRequest: {}", + err + ); + AgentFilterChainError::RequestParsing(err) + })?; + + // Extract trace parent for routing + let trace_parent = request_headers + .iter() + .find(|(key, _)| key.as_str() == "traceparent") + .map(|(_, value)| value.to_str().unwrap_or_default().to_string()); + + // Select appropriate agent using arch router llm model + let selected_agent = agent_selector + .select_agent(&chat_completions_request.messages, &listener, trace_parent) + .await?; + + debug!("Processing agent pipeline: {}", selected_agent.id); + + // Create agent map for pipeline processing + let agent_map = { + let agents = agents_list.read().await; + let agents = agents.as_ref().unwrap(); + agent_selector.create_agent_map(agents) + }; + + // Process the filter chain + let processed_messages = pipeline_processor + .process_filter_chain( + &chat_completions_request, + &selected_agent, + &agent_map, + &request_headers, + ) + .await?; + + // Get terminal agent and send final response + let terminal_agent_name = selected_agent.id; + let terminal_agent = agent_map.get(&terminal_agent_name).unwrap(); + + debug!("Processing terminal agent: {}", terminal_agent_name); + debug!("Terminal agent details: {:?}", terminal_agent); + + let llm_response = pipeline_processor + .invoke_upstream_agent( + &processed_messages, + &chat_completions_request, + terminal_agent, + &request_headers, + ) + .await?; + + // Create streaming response + response_handler + .create_streaming_response(llm_response) + .await + .map_err(AgentFilterChainError::from) +} diff --git a/crates/brightstaff/src/handlers/agent_selector.rs b/crates/brightstaff/src/handlers/agent_selector.rs new file mode 100644 index 00000000..0fff1198 --- /dev/null +++ b/crates/brightstaff/src/handlers/agent_selector.rs @@ -0,0 +1,296 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use common::configuration::{ + Agent, AgentFilterChain, Listener, ModelUsagePreference, RoutingPreference, +}; +use hermesllm::apis::openai::Message; +use tracing::{debug, warn}; + +use crate::router::llm_router::RouterService; + +/// Errors that can occur during agent selection +#[derive(Debug, thiserror::Error)] +pub enum AgentSelectionError { + #[error("Listener not found for name: {0}")] + ListenerNotFound(String), + #[error("No agents configured for listener: {0}")] + NoAgentsConfigured(String), + #[error("Routing service error: {0}")] + RoutingError(String), + #[error("Default agent not found for listener: {0}")] + DefaultAgentNotFound(String), +} + +/// Service for selecting agents based on routing preferences and listener configuration +pub struct AgentSelector { + router_service: Arc, +} + +impl AgentSelector { + pub fn new(router_service: Arc) -> Self { + Self { router_service } + } + + /// Find listener by name from the request headers + pub async fn find_listener( + &self, + listener_name: Option<&str>, + listeners: &[common::configuration::Listener], + ) -> Result { + let listener = listeners + .iter() + .find(|l| listener_name.map(|name| l.name == name).unwrap_or(false)) + .cloned() + .ok_or_else(|| { + AgentSelectionError::ListenerNotFound( + listener_name.unwrap_or("unknown").to_string(), + ) + })?; + + Ok(listener) + } + + /// Create agent name to agent mapping for efficient lookup + pub fn create_agent_map(&self, agents: &[Agent]) -> HashMap { + agents + .iter() + .map(|agent| (agent.id.clone(), agent.clone())) + .collect() + } + + /// Select appropriate agent based on routing preferences + pub async fn select_agent( + &self, + messages: &[Message], + listener: &Listener, + trace_parent: Option, + ) -> Result { + let agents = listener + .agents + .as_ref() + .ok_or_else(|| AgentSelectionError::NoAgentsConfigured(listener.name.clone()))?; + + // If only one agent, skip routing + if agents.len() == 1 { + debug!("Only one agent available, skipping routing"); + return Ok(agents[0].clone()); + } + + let usage_preferences = self.convert_agent_description_to_routing_preferences(agents); + debug!( + "Agents usage preferences for agent routing str: {}", + serde_json::to_string(&usage_preferences).unwrap_or_default() + ); + + match self + .router_service + .determine_route(messages, trace_parent, Some(usage_preferences)) + .await + { + Ok(Some((_, agent_name))) => { + debug!("Determined agent: {}", agent_name); + let selected_agent = agents + .iter() + .find(|a| a.id == agent_name) + .cloned() + .ok_or_else(|| { + AgentSelectionError::RoutingError(format!( + "Selected agent '{}' not found in listener agents", + agent_name + )) + })?; + Ok(selected_agent) + } + Ok(None) => { + debug!("No agent determined using routing preferences, using default agent"); + self.get_default_agent(agents, &listener.name) + } + Err(err) => Err(AgentSelectionError::RoutingError(err.to_string())), + } + } + + /// Get the default agent or the first agent if no default is specified + fn get_default_agent( + &self, + agents: &[AgentFilterChain], + listener_name: &str, + ) -> Result { + agents + .iter() + .find(|a| a.default.unwrap_or(false)) + .cloned() + .or_else(|| { + warn!( + "No default agent found, routing request to first agent: {}", + agents[0].id + ); + Some(agents[0].clone()) + }) + .ok_or_else(|| AgentSelectionError::DefaultAgentNotFound(listener_name.to_string())) + } + + /// Convert agent descriptions to routing preferences + fn convert_agent_description_to_routing_preferences( + &self, + agents: &[AgentFilterChain], + ) -> Vec { + agents + .iter() + .map(|agent| ModelUsagePreference { + model: agent.id.clone(), + routing_preferences: vec![RoutingPreference { + name: agent.id.clone(), + description: agent.description.as_ref().unwrap_or(&String::new()).clone(), + }], + }) + .collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use common::configuration::{AgentFilterChain, Listener}; + + fn create_test_router_service() -> Arc { + Arc::new(RouterService::new( + vec![], // empty providers for testing + "http://localhost:8080".to_string(), + "test-model".to_string(), + "test-provider".to_string(), + )) + } + + fn create_test_agent(name: &str, description: &str, is_default: bool) -> AgentFilterChain { + AgentFilterChain { + id: name.to_string(), + description: Some(description.to_string()), + default: Some(is_default), + filter_chain: vec![name.to_string()], + } + } + + fn create_test_listener(name: &str, agents: Vec) -> Listener { + Listener { + name: name.to_string(), + agents: Some(agents), + port: 8080, + router: None, + } + } + + fn create_test_agent_struct(name: &str) -> Agent { + Agent { + id: name.to_string(), + kind: Some("test".to_string()), + url: "http://localhost:8080".to_string(), + } + } + + #[tokio::test] + async fn test_find_listener_success() { + let router_service = create_test_router_service(); + let selector = AgentSelector::new(router_service); + + let listener1 = create_test_listener("test-listener", vec![]); + let listener2 = create_test_listener("other-listener", vec![]); + let listeners = vec![listener1.clone(), listener2]; + + let result = selector + .find_listener(Some("test-listener"), &listeners) + .await; + + assert!(result.is_ok()); + assert_eq!(result.unwrap().name, "test-listener"); + } + + #[tokio::test] + async fn test_find_listener_not_found() { + let router_service = create_test_router_service(); + let selector = AgentSelector::new(router_service); + + let listeners = vec![create_test_listener("other-listener", vec![])]; + + let result = selector + .find_listener(Some("nonexistent"), &listeners) + .await; + + assert!(result.is_err()); + matches!( + result.unwrap_err(), + AgentSelectionError::ListenerNotFound(_) + ); + } + + #[test] + fn test_create_agent_map() { + let router_service = create_test_router_service(); + let selector = AgentSelector::new(router_service); + + let agents = vec![ + create_test_agent_struct("agent1"), + create_test_agent_struct("agent2"), + ]; + + let agent_map = selector.create_agent_map(&agents); + + assert_eq!(agent_map.len(), 2); + assert!(agent_map.contains_key("agent1")); + assert!(agent_map.contains_key("agent2")); + } + + #[test] + fn test_convert_agent_description_to_routing_preferences() { + let router_service = create_test_router_service(); + let selector = AgentSelector::new(router_service); + + let agents = vec![ + create_test_agent("agent1", "First agent description", true), + create_test_agent("agent2", "Second agent description", false), + ]; + + let preferences = selector.convert_agent_description_to_routing_preferences(&agents); + + assert_eq!(preferences.len(), 2); + assert_eq!(preferences[0].model, "agent1"); + assert_eq!(preferences[0].routing_preferences[0].name, "agent1"); + assert_eq!( + preferences[0].routing_preferences[0].description, + "First agent description" + ); + } + + #[test] + fn test_get_default_agent() { + let router_service = create_test_router_service(); + let selector = AgentSelector::new(router_service); + + let agents = vec![ + create_test_agent("agent1", "First agent", false), + create_test_agent("agent2", "Default agent", true), + create_test_agent("agent3", "Third agent", false), + ]; + + let result = selector.get_default_agent(&agents, "test-listener"); + + assert!(result.is_ok()); + assert_eq!(result.unwrap().id, "agent2"); + } + + #[test] + fn test_get_default_agent_fallback_to_first() { + let router_service = create_test_router_service(); + let selector = AgentSelector::new(router_service); + + let agents = vec![ + create_test_agent("agent1", "First agent", false), + create_test_agent("agent2", "Second agent", false), + ]; + + let result = selector.get_default_agent(&agents, "test-listener"); + + assert!(result.is_ok()); + assert_eq!(result.unwrap().id, "agent1"); + } +} diff --git a/crates/brightstaff/src/handlers/chat_completions.rs b/crates/brightstaff/src/handlers/chat_completions.rs index 53313c36..b96f1f52 100644 --- a/crates/brightstaff/src/handlers/chat_completions.rs +++ b/crates/brightstaff/src/handlers/chat_completions.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; -use std::collections::HashMap; use bytes::Bytes; use common::configuration::{ModelAlias, ModelUsagePreference}; use common::consts::ARCH_PROVIDER_HINT_HEADER; @@ -11,6 +9,8 @@ use http_body_util::{BodyExt, Full, StreamBody}; use hyper::body::Frame; use hyper::header::{self}; use hyper::{Request, Response, StatusCode}; +use std::collections::HashMap; +use std::sync::Arc; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tokio_stream::StreamExt; @@ -30,14 +30,19 @@ pub async fn chat( full_qualified_llm_provider_url: String, model_aliases: Arc>>, ) -> Result>, hyper::Error> { - let request_path = request.uri().path().to_string(); let mut request_headers = request.headers().clone(); let chat_request_bytes = request.collect().await?.to_bytes(); - debug!("Received request body (raw utf8): {}", String::from_utf8_lossy(&chat_request_bytes)); + debug!( + "Received request body (raw utf8): {}", + String::from_utf8_lossy(&chat_request_bytes) + ); - let mut client_request = match ProviderRequestType::try_from((&chat_request_bytes[..], &SupportedAPIs::from_endpoint(request_path.as_str()).unwrap())) { + let mut client_request = match ProviderRequestType::try_from(( + &chat_request_bytes[..], + &SupportedAPIs::from_endpoint(request_path.as_str()).unwrap(), + )) { Ok(request) => request, Err(err) => { warn!("Failed to parse request as ProviderRequestType: {}", err); @@ -77,7 +82,10 @@ pub async fn chat( // Convert to ChatCompletionsRequest regardless of input type (clone to avoid moving original) let chat_completions_request_for_arch_router: ChatCompletionsRequest = - match ProviderRequestType::try_from((client_request, &SupportedAPIs::OpenAIChatCompletions(hermesllm::apis::OpenAIApi::ChatCompletions))) { + match ProviderRequestType::try_from(( + client_request, + &SupportedAPIs::OpenAIChatCompletions(hermesllm::apis::OpenAIApi::ChatCompletions), + )) { Ok(ProviderRequestType::ChatCompletionsRequest(req)) => req, Ok(ProviderRequestType::MessagesRequest(_)) => { // This should not happen after conversion to OpenAI format @@ -86,9 +94,12 @@ pub async fn chat( let mut bad_request = Response::new(full(err_msg)); *bad_request.status_mut() = StatusCode::BAD_REQUEST; return Ok(bad_request); - }, + } Err(err) => { - warn!("Failed to convert request to ChatCompletionsRequest: {}", err); + warn!( + "Failed to convert request to ChatCompletionsRequest: {}", + err + ); let err_msg = format!("Failed to convert request: {}", err); let mut bad_request = Response::new(full(err_msg)); *bad_request.status_mut() = StatusCode::BAD_REQUEST; @@ -106,28 +117,29 @@ pub async fn chat( .find(|(ty, _)| ty.as_str() == "traceparent") .map(|(_, value)| value.to_str().unwrap_or_default().to_string()); - let usage_preferences_str: Option = - routing_metadata.as_ref().and_then(|metadata| { - metadata - .get("archgw_preference_config") - .map(|value| value.to_string()) - }); + let usage_preferences_str: Option = routing_metadata.as_ref().and_then(|metadata| { + metadata + .get("archgw_preference_config") + .map(|value| value.to_string()) + }); let usage_preferences: Option> = usage_preferences_str .as_ref() .and_then(|s| serde_yaml::from_str(s).ok()); - let latest_message_for_log = - chat_completions_request_for_arch_router - .messages - .last() - .map_or("None".to_string(), |msg| { - msg.content.to_string().replace('\n', "\\n") - }); + let latest_message_for_log = chat_completions_request_for_arch_router + .messages + .last() + .map_or("None".to_string(), |msg| { + msg.content.to_string().replace('\n', "\\n") + }); const MAX_MESSAGE_LENGTH: usize = 50; let latest_message_for_log = if latest_message_for_log.chars().count() > MAX_MESSAGE_LENGTH { - let truncated: String = latest_message_for_log.chars().take(MAX_MESSAGE_LENGTH).collect(); + let truncated: String = latest_message_for_log + .chars() + .take(MAX_MESSAGE_LENGTH) + .collect(); format!("{}...", truncated) } else { latest_message_for_log @@ -153,12 +165,11 @@ pub async fn chat( Ok(route) => match route { Some((_, model_name)) => model_name, None => { - info!( + info!( "No route determined, using default model from request: {}", chat_completions_request_for_arch_router.model ); chat_completions_request_for_arch_router.model.clone() - } }, Err(err) => { diff --git a/crates/brightstaff/src/handlers/integration_tests.rs b/crates/brightstaff/src/handlers/integration_tests.rs new file mode 100644 index 00000000..e09ed4f2 --- /dev/null +++ b/crates/brightstaff/src/handlers/integration_tests.rs @@ -0,0 +1,155 @@ +use std::sync::Arc; + +use hermesllm::apis::openai::{ChatCompletionsRequest, Message, MessageContent, Role}; +use hyper::header::HeaderMap; + +use crate::handlers::agent_selector::{AgentSelectionError, AgentSelector}; +use crate::handlers::pipeline_processor::PipelineProcessor; +use crate::handlers::response_handler::ResponseHandler; +use crate::router::llm_router::RouterService; + +/// Integration test that demonstrates the modular agent chat flow +/// This test shows how the three main components work together: +/// 1. AgentSelector - selects the appropriate agent based on routing +/// 2. PipelineProcessor - executes the agent pipeline +/// 3. ResponseHandler - handles response streaming +#[cfg(test)] +mod integration_tests { + use super::*; + use common::configuration::{Agent, AgentFilterChain, Listener}; + + fn create_test_router_service() -> Arc { + Arc::new(RouterService::new( + vec![], // empty providers for testing + "http://localhost:8080".to_string(), + "test-model".to_string(), + "test-provider".to_string(), + )) + } + + fn create_test_message(role: Role, content: &str) -> Message { + Message { + role, + content: MessageContent::Text(content.to_string()), + name: None, + tool_calls: None, + tool_call_id: None, + } + } + + #[tokio::test] + async fn test_modular_agent_chat_flow() { + // Setup services + let router_service = create_test_router_service(); + let agent_selector = AgentSelector::new(router_service); + let pipeline_processor = PipelineProcessor::default(); + + // Create test data + let agents = vec![ + Agent { + id: "filter-agent".to_string(), + kind: Some("filter".to_string()), + url: "http://localhost:8081".to_string(), + }, + Agent { + id: "terminal-agent".to_string(), + kind: Some("terminal".to_string()), + url: "http://localhost:8082".to_string(), + }, + ]; + + let agent_pipeline = AgentFilterChain { + id: "terminal-agent".to_string(), + filter_chain: vec!["filter-agent".to_string(), "terminal-agent".to_string()], + description: Some("Test pipeline".to_string()), + default: Some(true), + }; + + let listener = Listener { + name: "test-listener".to_string(), + agents: Some(vec![agent_pipeline.clone()]), + port: 8080, + router: None, + }; + + let listeners = vec![listener]; + let messages = vec![create_test_message(Role::User, "Hello world!")]; + + // Test 1: Agent Selection + let selected_listener = agent_selector + .find_listener(Some("test-listener"), &listeners) + .await; + + assert!(selected_listener.is_ok()); + let listener = selected_listener.unwrap(); + assert_eq!(listener.name, "test-listener"); + + // Test 2: Agent Map Creation + let agent_map = agent_selector.create_agent_map(&agents); + assert_eq!(agent_map.len(), 2); + assert!(agent_map.contains_key("filter-agent")); + assert!(agent_map.contains_key("terminal-agent")); + + // Test 3: Pipeline Processing (empty filter chain for testing) + let request = ChatCompletionsRequest { + messages: messages.clone(), + model: "test-model".to_string(), + ..Default::default() + }; + + // Create a pipeline with empty filter chain to avoid network calls + let test_pipeline = AgentFilterChain { + id: "terminal-agent".to_string(), + filter_chain: vec![], // Empty filter chain - no network calls needed + description: None, + default: None, + }; + + let headers = HeaderMap::new(); + let result = pipeline_processor + .process_filter_chain(&request, &test_pipeline, &agent_map, &headers) + .await; + + println!("Pipeline processing result: {:?}", result); + + assert!(result.is_ok()); + let processed_messages = result.unwrap(); + // With empty filter chain, should return the original messages unchanged + assert_eq!(processed_messages.len(), 1); + if let MessageContent::Text(content) = &processed_messages[0].content { + assert_eq!(content, "Hello world!"); + } else { + panic!("Expected text content"); + } + + // Test 4: Error Response Creation + let error_response = ResponseHandler::create_bad_request("Test error"); + assert_eq!(error_response.status(), hyper::StatusCode::BAD_REQUEST); + + println!("✅ All modular components working correctly!"); + } + + #[tokio::test] + async fn test_error_handling_flow() { + let router_service = create_test_router_service(); + let agent_selector = AgentSelector::new(router_service); + + // Test listener not found + let result = agent_selector.find_listener(Some("nonexistent"), &[]).await; + + assert!(result.is_err()); + assert!(matches!( + result.unwrap_err(), + AgentSelectionError::ListenerNotFound(_) + )); + + // Test error response creation + let error_response = ResponseHandler::create_internal_error("Pipeline failed"); + assert_eq!( + error_response.status(), + hyper::StatusCode::INTERNAL_SERVER_ERROR + ); + + println!("✅ Error handling working correctly!"); + } +} diff --git a/crates/brightstaff/src/handlers/mod.rs b/crates/brightstaff/src/handlers/mod.rs index 6de38b5b..66c5449b 100644 --- a/crates/brightstaff/src/handlers/mod.rs +++ b/crates/brightstaff/src/handlers/mod.rs @@ -1,2 +1,9 @@ +pub mod agent_chat_completions; +pub mod agent_selector; pub mod chat_completions; pub mod models; +pub mod pipeline_processor; +pub mod response_handler; + +#[cfg(test)] +mod integration_tests; diff --git a/crates/brightstaff/src/handlers/pipeline_processor.rs b/crates/brightstaff/src/handlers/pipeline_processor.rs new file mode 100644 index 00000000..b62ce175 --- /dev/null +++ b/crates/brightstaff/src/handlers/pipeline_processor.rs @@ -0,0 +1,228 @@ +use std::collections::HashMap; + +use common::configuration::{Agent, AgentFilterChain}; +use common::consts::{ARCH_UPSTREAM_HOST_HEADER, ENVOY_RETRY_HEADER}; +use hermesllm::apis::openai::{ChatCompletionsRequest, Message}; +use hyper::header::HeaderMap; +use tracing::{debug, warn}; + +/// Errors that can occur during pipeline processing +#[derive(Debug, thiserror::Error)] +pub enum PipelineError { + #[error("HTTP request failed: {0}")] + RequestFailed(#[from] reqwest::Error), + #[error("Failed to parse response: {0}")] + ParseError(#[from] serde_json::Error), + #[error("Agent '{0}' not found in agent map")] + AgentNotFound(String), + #[error("No choices in response from agent '{0}'")] + NoChoicesInResponse(String), + #[error("No content in response from agent '{0}'")] + NoContentInResponse(String), +} + +/// Service for processing agent pipelines +pub struct PipelineProcessor { + client: reqwest::Client, + url: String, +} + +impl Default for PipelineProcessor { + fn default() -> Self { + Self { + client: reqwest::Client::new(), + url: "http://localhost:11000/v1/chat/completions".to_string(), + } + } +} + +impl PipelineProcessor { + pub fn new(url: String) -> Self { + Self { + client: reqwest::Client::new(), + url, + } + } + + /// Process the filter chain of agents (all except the terminal agent) + pub async fn process_filter_chain( + &self, + initial_request: &ChatCompletionsRequest, + agent_filter_chain: &AgentFilterChain, + agent_map: &HashMap, + request_headers: &HeaderMap, + ) -> Result, PipelineError> { + let mut chat_completions_history = initial_request.messages.clone(); + + for agent_name in &agent_filter_chain.filter_chain { + debug!("Processing filter agent: {}", agent_name); + + let agent = agent_map + .get(agent_name) + .ok_or_else(|| PipelineError::AgentNotFound(agent_name.clone()))?; + + debug!("Agent details: {:?}", agent); + + let response_content = self + .send_agent_filter_chain_request( + &chat_completions_history, + initial_request, + agent, + request_headers, + ) + .await?; + + debug!("Received response from filter agent {}", agent_name); + + // Parse the response content as new message history + chat_completions_history = + serde_json::from_str(&response_content).inspect_err(|err| { + warn!( + "Failed to parse response from agent {}, err: {}, response: {}", + agent_name, err, response_content + ) + })?; + } + + Ok(chat_completions_history) + } + + /// Send request to a specific agent and return the response content + async fn send_agent_filter_chain_request( + &self, + messages: &[Message], + original_request: &ChatCompletionsRequest, + agent: &Agent, + request_headers: &HeaderMap, + ) -> Result { + let mut request = original_request.clone(); + request.messages = messages.to_vec(); + + let request_body = serde_json::to_string(&request)?; + debug!("Sending request to agent {}", agent.id); + + let mut agent_headers = request_headers.clone(); + agent_headers.remove(hyper::header::CONTENT_LENGTH); + agent_headers.insert( + ARCH_UPSTREAM_HOST_HEADER, + hyper::header::HeaderValue::from_str(&agent.id) + .map_err(|_| PipelineError::AgentNotFound(agent.id.clone()))?, + ); + + agent_headers.insert( + ENVOY_RETRY_HEADER, + hyper::header::HeaderValue::from_str("3").unwrap(), + ); + + let response = self + .client + .post(&self.url) + .headers(agent_headers) + .body(request_body) + .send() + .await?; + + let response_bytes = response.bytes().await?; + + // Parse the response as JSON to extract the content + let response_json: serde_json::Value = serde_json::from_slice(&response_bytes)?; + + let content = response_json + .get("choices") + .and_then(|choices| choices.as_array()) + .and_then(|choices| choices.first()) + .and_then(|choice| choice.get("message")) + .and_then(|message| message.get("content")) + .and_then(|content| content.as_str()) + .ok_or_else(|| PipelineError::NoContentInResponse(agent.id.clone()))? + .to_string(); + + Ok(content) + } + + /// Send request to terminal agent and return the raw response for streaming + pub async fn invoke_upstream_agent( + &self, + messages: &[Message], + original_request: &ChatCompletionsRequest, + terminal_agent: &Agent, + request_headers: &HeaderMap, + ) -> Result { + let mut request = original_request.clone(); + request.messages = messages.to_vec(); + + let request_body = serde_json::to_string(&request)?; + debug!("Sending request to terminal agent {}", terminal_agent.id); + + let mut agent_headers = request_headers.clone(); + agent_headers.remove(hyper::header::CONTENT_LENGTH); + agent_headers.insert( + ARCH_UPSTREAM_HOST_HEADER, + hyper::header::HeaderValue::from_str(&terminal_agent.id) + .map_err(|_| PipelineError::AgentNotFound(terminal_agent.id.clone()))?, + ); + + agent_headers.insert( + ENVOY_RETRY_HEADER, + hyper::header::HeaderValue::from_str("3").unwrap(), + ); + + let response = self + .client + .post(&self.url) + .headers(agent_headers) + .body(request_body) + .send() + .await?; + + Ok(response) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use hermesllm::apis::openai::{Message, MessageContent, Role}; + use std::collections::HashMap; + + fn create_test_message(role: Role, content: &str) -> Message { + Message { + role, + content: MessageContent::Text(content.to_string()), + name: None, + tool_calls: None, + tool_call_id: None, + } + } + + fn create_test_pipeline(agents: Vec<&str>) -> AgentFilterChain { + AgentFilterChain { + id: "test-agent".to_string(), + filter_chain: agents.iter().map(|s| s.to_string()).collect(), + description: None, + default: None, + } + } + + #[tokio::test] + async fn test_agent_not_found_error() { + let processor = PipelineProcessor::default(); + let agent_map = HashMap::new(); + let request_headers = HeaderMap::new(); + + let initial_request = ChatCompletionsRequest { + messages: vec![create_test_message(Role::User, "Hello")], + model: "test-model".to_string(), + ..Default::default() + }; + + let pipeline = create_test_pipeline(vec!["nonexistent-agent", "terminal-agent"]); + + let result = processor + .process_filter_chain(&initial_request, &pipeline, &agent_map, &request_headers) + .await; + + assert!(result.is_err()); + matches!(result.unwrap_err(), PipelineError::AgentNotFound(_)); + } +} diff --git a/crates/brightstaff/src/handlers/response_handler.rs b/crates/brightstaff/src/handlers/response_handler.rs new file mode 100644 index 00000000..2d647d2c --- /dev/null +++ b/crates/brightstaff/src/handlers/response_handler.rs @@ -0,0 +1,191 @@ +use bytes::Bytes; +use http_body_util::combinators::BoxBody; +use http_body_util::{BodyExt, Full, StreamBody}; +use hyper::body::Frame; +use hyper::{Response, StatusCode}; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; +use tokio_stream::StreamExt; +use tracing::warn; + +/// Errors that can occur during response handling +#[derive(Debug, thiserror::Error)] +pub enum ResponseError { + #[error("Failed to create response: {0}")] + ResponseCreationFailed(#[from] hyper::http::Error), + #[error("Stream error: {0}")] + StreamError(String), +} + +/// Service for handling HTTP responses and streaming +pub struct ResponseHandler; + +impl ResponseHandler { + pub fn new() -> Self { + Self + } + + /// Create a full response body from bytes + pub fn create_full_body>(chunk: T) -> BoxBody { + Full::new(chunk.into()) + .map_err(|never| match never {}) + .boxed() + } + + /// Create an error response with a given status code and message + pub fn create_error_response( + status: StatusCode, + message: &str, + ) -> Response> { + let mut response = Response::new(Self::create_full_body(message.to_string())); + *response.status_mut() = status; + response + } + + /// Create a bad request response + pub fn create_bad_request(message: &str) -> Response> { + Self::create_error_response(StatusCode::BAD_REQUEST, message) + } + + /// Create an internal server error response + pub fn create_internal_error(message: &str) -> Response> { + Self::create_error_response(StatusCode::INTERNAL_SERVER_ERROR, message) + } + + /// Create a JSON error response + pub fn create_json_error_response( + error_json: &serde_json::Value, + ) -> Response> { + let json_string = error_json.to_string(); + let mut response = Response::new(Self::create_full_body(json_string)); + *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; + response.headers_mut().insert( + hyper::header::CONTENT_TYPE, + "application/json".parse().unwrap(), + ); + response + } + + /// Create a streaming response from a reqwest response + pub async fn create_streaming_response( + &self, + llm_response: reqwest::Response, + ) -> Result>, ResponseError> { + // Copy headers from the original response + let response_headers = llm_response.headers(); + let mut response_builder = Response::builder(); + + let headers = response_builder.headers_mut().ok_or_else(|| { + ResponseError::StreamError("Failed to get mutable headers".to_string()) + })?; + + for (header_name, header_value) in response_headers.iter() { + headers.insert(header_name, header_value.clone()); + } + + // Create channel for async streaming + let (tx, rx) = mpsc::channel::(16); + + // Spawn task to stream data + tokio::spawn(async move { + let mut byte_stream = llm_response.bytes_stream(); + + while let Some(item) = byte_stream.next().await { + let chunk = match item { + Ok(chunk) => chunk, + Err(err) => { + warn!("Error receiving chunk: {:?}", err); + break; + } + }; + + if tx.send(chunk).await.is_err() { + warn!("Receiver dropped"); + break; + } + } + }); + + let stream = ReceiverStream::new(rx).map(|chunk| Ok::<_, hyper::Error>(Frame::data(chunk))); + let stream_body = BoxBody::new(StreamBody::new(stream)); + + response_builder + .body(stream_body) + .map_err(ResponseError::from) + } +} + +impl Default for ResponseHandler { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use hyper::StatusCode; + + #[test] + fn test_create_bad_request() { + let response = ResponseHandler::create_bad_request("Invalid request"); + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + } + + #[test] + fn test_create_internal_error() { + let response = ResponseHandler::create_internal_error("Server error"); + assert_eq!(response.status(), StatusCode::INTERNAL_SERVER_ERROR); + } + + #[test] + fn test_create_error_response() { + let response = + ResponseHandler::create_error_response(StatusCode::NOT_FOUND, "Resource not found"); + assert_eq!(response.status(), StatusCode::NOT_FOUND); + } + + #[test] + fn test_create_json_error_response() { + let error_json = serde_json::json!({ + "error": { + "type": "TestError", + "message": "Test error message" + } + }); + + let response = ResponseHandler::create_json_error_response(&error_json); + assert_eq!(response.status(), StatusCode::INTERNAL_SERVER_ERROR); + assert_eq!( + response.headers().get("content-type").unwrap(), + "application/json" + ); + } + + #[tokio::test] + async fn test_create_streaming_response_with_mock() { + use mockito::Server; + + let mut server = Server::new_async().await; + let mock = server + .mock("GET", "/test") + .with_status(200) + .with_header("content-type", "text/plain") + .with_body("streaming response") + .create_async() + .await; + + let client = reqwest::Client::new(); + let llm_response = client.get(&(server.url() + "/test")).send().await.unwrap(); + + let handler = ResponseHandler::new(); + let result = handler.create_streaming_response(llm_response).await; + + mock.assert_async().await; + assert!(result.is_ok()); + + let response = result.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + assert!(response.headers().contains_key("content-type")); + } +} diff --git a/crates/brightstaff/src/main.rs b/crates/brightstaff/src/main.rs index ed4776fe..57dd9fe9 100644 --- a/crates/brightstaff/src/main.rs +++ b/crates/brightstaff/src/main.rs @@ -1,3 +1,4 @@ +use brightstaff::handlers::agent_chat_completions::agent_chat; use brightstaff::handlers::chat_completions::chat; use brightstaff::handlers::models::list_models; use brightstaff::router::llm_router::RouterService; @@ -61,15 +62,17 @@ async fn main() -> Result<(), Box> { let arch_config = Arc::new(config); - let llm_providers = Arc::new(RwLock::new(arch_config.llm_providers.clone())); + let llm_providers = Arc::new(RwLock::new(arch_config.model_providers.clone())); + let agents_list = Arc::new(RwLock::new(arch_config.agents.clone())); + let listeners = Arc::new(RwLock::new(arch_config.listeners.clone())); debug!( "arch_config: {:?}", &serde_json::to_string(arch_config.as_ref()).unwrap() ); - let llm_provider_url = env::var("LLM_PROVIDER_ENDPOINT") - .unwrap_or_else(|_| "http://localhost:12001".to_string()); + let llm_provider_url = + env::var("LLM_PROVIDER_ENDPOINT").unwrap_or_else(|_| "http://localhost:12001".to_string()); info!("llm provider url: {}", llm_provider_url); info!("listening on http://{}", bind_address); @@ -84,11 +87,11 @@ async fn main() -> Result<(), Box> { let routing_llm_provider = arch_config .routing .as_ref() - .and_then(|r| r.llm_provider.clone()) + .and_then(|r| r.model_provider.clone()) .unwrap_or_else(|| DEFAULT_ROUTING_LLM_PROVIDER.to_string()); let router_service: Arc = Arc::new(RouterService::new( - arch_config.llm_providers.clone(), + arch_config.model_providers.clone(), llm_provider_url.clone() + CHAT_COMPLETIONS_PATH, routing_model_name, routing_llm_provider, @@ -96,7 +99,6 @@ async fn main() -> Result<(), Box> { let model_aliases = Arc::new(arch_config.model_aliases.clone()); - loop { let (stream, _) = listener.accept().await?; let peer_addr = stream.peer_addr()?; @@ -107,24 +109,44 @@ async fn main() -> Result<(), Box> { let llm_provider_url = llm_provider_url.clone(); let llm_providers = llm_providers.clone(); + let agents_list = agents_list.clone(); + let listeners = listeners.clone(); let service = service_fn(move |req| { - let router_service = Arc::clone(&router_service); let parent_cx = extract_context_from_request(&req); let llm_provider_url = llm_provider_url.clone(); let llm_providers = llm_providers.clone(); let model_aliases = Arc::clone(&model_aliases); + let agents_list = agents_list.clone(); + let listeners = listeners.clone(); async move { match (req.method(), req.uri().path()) { (&Method::POST, CHAT_COMPLETIONS_PATH | MESSAGES_PATH) => { - let fully_qualified_url = format!("{}{}", llm_provider_url, req.uri().path()); + let fully_qualified_url = + format!("{}{}", llm_provider_url, req.uri().path()); chat(req, router_service, fully_qualified_url, model_aliases) .with_context(parent_cx) .await } - (&Method::GET, "/v1/models") => Ok(list_models(llm_providers).await), - (&Method::OPTIONS, "/v1/models") => { + (&Method::POST, "/agents/v1/chat/completions") => { + let fully_qualified_url = + format!("{}{}", llm_provider_url, req.uri().path()); + agent_chat( + req, + router_service, + fully_qualified_url, + agents_list, + listeners, + ) + .with_context(parent_cx) + .await + } + (&Method::GET, "/v1/models" | "/agents/v1/models") => { + Ok(list_models(llm_providers).await) + } + // hack for now to get openw-web-ui to work + (&Method::OPTIONS, "/v1/models" | "/agents/v1/models") => { let mut response = Response::new(empty()); *response.status_mut() = StatusCode::NO_CONTENT; response @@ -148,6 +170,7 @@ async fn main() -> Result<(), Box> { Ok(response) } _ => { + debug!("No route for {} {}", req.method(), req.uri().path()); let mut not_found = Response::new(empty()); *not_found.status_mut() = StatusCode::NOT_FOUND; Ok(not_found) diff --git a/crates/brightstaff/src/router/llm_router.rs b/crates/brightstaff/src/router/llm_router.rs index 3b09c115..5f71f9c8 100644 --- a/crates/brightstaff/src/router/llm_router.rs +++ b/crates/brightstaff/src/router/llm_router.rs @@ -79,7 +79,13 @@ impl RouterService { trace_parent: Option, usage_preferences: Option>, ) -> Result> { - if !self.llm_usage_defined { + if messages.is_empty() { + return Ok(None); + } + + if (usage_preferences.is_none() || usage_preferences.as_ref().unwrap().len() < 2) + && !self.llm_usage_defined + { return Ok(None); } diff --git a/crates/brightstaff/src/router/router_model_v1.rs b/crates/brightstaff/src/router/router_model_v1.rs index 1c1c14ef..758cf83a 100644 --- a/crates/brightstaff/src/router/router_model_v1.rs +++ b/crates/brightstaff/src/router/router_model_v1.rs @@ -1,9 +1,7 @@ use std::collections::HashMap; -use common::{ - configuration::{ModelUsagePreference, RoutingPreference}, -}; -use hermesllm::apis::openai::{ChatCompletionsRequest, MessageContent, Message, Role}; +use common::configuration::{ModelUsagePreference, RoutingPreference}; +use hermesllm::apis::openai::{ChatCompletionsRequest, Message, MessageContent, Role}; use serde::{Deserialize, Serialize}; use tracing::{debug, warn}; diff --git a/crates/brightstaff/src/utils/tracing.rs b/crates/brightstaff/src/utils/tracing.rs index 7acb249a..6da4b631 100644 --- a/crates/brightstaff/src/utils/tracing.rs +++ b/crates/brightstaff/src/utils/tracing.rs @@ -1,20 +1,27 @@ -use std::sync::OnceLock; use std::fmt; +use std::sync::OnceLock; use opentelemetry::global; use opentelemetry_sdk::{propagation::TraceContextPropagator, trace::SdkTracerProvider}; use opentelemetry_stdout::SpanExporter; -use tracing_subscriber::EnvFilter; -use tracing_subscriber::fmt::{format, time::FormatTime, FmtContext, FormatEvent, FormatFields}; -use tracing::{Event, Subscriber}; use time::macros::format_description; +use tracing::{Event, Subscriber}; +use tracing_subscriber::fmt::{format, time::FormatTime, FmtContext, FormatEvent, FormatFields}; +use tracing_subscriber::EnvFilter; struct BracketedTime; impl FormatTime for BracketedTime { fn format_time(&self, w: &mut format::Writer<'_>) -> fmt::Result { let now = time::OffsetDateTime::now_utc(); - write!(w, "[{}]", now.format(&format_description!("[year]-[month]-[day] [hour]:[minute]:[second].[subsecond digits:3]")).unwrap()) + write!( + w, + "[{}]", + now.format(&format_description!( + "[year]-[month]-[day] [hour]:[minute]:[second].[subsecond digits:3]" + )) + .unwrap() + ) } } @@ -34,7 +41,11 @@ where let timer = BracketedTime; timer.format_time(&mut writer)?; - write!(writer, "[{}] ", event.metadata().level().to_string().to_lowercase())?; + write!( + writer, + "[{}] ", + event.metadata().level().to_string().to_lowercase() + )?; ctx.field_format().format_fields(writer.by_ref(), event)?; diff --git a/crates/common/src/configuration.rs b/crates/common/src/configuration.rs index 301a8206..c881afa4 100644 --- a/crates/common/src/configuration.rs +++ b/crates/common/src/configuration.rs @@ -9,7 +9,7 @@ use crate::api::open_ai::{ #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Routing { - pub llm_provider: Option, + pub model_provider: Option, pub model: Option, } @@ -18,11 +18,34 @@ pub struct ModelAlias { pub target: String, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Agent { + pub id: String, + pub kind: Option, + pub url: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AgentFilterChain { + pub id: String, + pub default: Option, + pub description: Option, + pub filter_chain: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Listener { + pub name: String, + pub router: Option, + pub agents: Option>, + pub port: u16, +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Configuration { pub version: String, pub endpoints: Option>, - pub llm_providers: Vec, + pub model_providers: Vec, pub model_aliases: Option>, pub overrides: Option, pub system_prompt: Option, @@ -33,6 +56,8 @@ pub struct Configuration { pub tracing: Option, pub mode: Option, pub routing: Option, + pub agents: Option>, + pub listeners: Vec, } #[derive(Debug, Clone, Serialize, Deserialize, Default)] diff --git a/crates/common/src/consts.rs b/crates/common/src/consts.rs index 0eb5a036..14972485 100644 --- a/crates/common/src/consts.rs +++ b/crates/common/src/consts.rs @@ -29,3 +29,4 @@ pub const HALLUCINATION_TEMPLATE: &str = pub const OTEL_COLLECTOR_HTTP: &str = "opentelemetry_collector_http"; pub const OTEL_POST_PATH: &str = "/v1/traces"; pub const LLM_ROUTE_HEADER: &str = "x-arch-llm-route"; +pub const ENVOY_RETRY_HEADER: &str = "x-envoy-max-retries"; diff --git a/crates/common/src/routing.rs b/crates/common/src/routing.rs index 2e9bac09..f4baf896 100644 --- a/crates/common/src/routing.rs +++ b/crates/common/src/routing.rs @@ -33,7 +33,6 @@ pub fn get_llm_provider( return provider; } - if llm_providers.default().is_some() { return llm_providers.default().unwrap(); } diff --git a/crates/hermesllm/src/apis/anthropic.rs b/crates/hermesllm/src/apis/anthropic.rs index abfde5b7..a261be3c 100644 --- a/crates/hermesllm/src/apis/anthropic.rs +++ b/crates/hermesllm/src/apis/anthropic.rs @@ -5,10 +5,10 @@ use serde_with::skip_serializing_none; use std::collections::HashMap; use super::ApiDefinition; +use crate::clients::transformer::ExtractText; use crate::providers::request::{ProviderRequest, ProviderRequestError}; use crate::providers::response::{ProviderResponse, ProviderStreamResponse}; -use crate::clients::transformer::ExtractText; -use crate::{MESSAGES_PATH}; +use crate::MESSAGES_PATH; // Enum for all supported Anthropic APIs #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -52,9 +52,7 @@ impl ApiDefinition for AnthropicApi { } fn all_variants() -> Vec { - vec![ - AnthropicApi::Messages, - ] + vec![AnthropicApi::Messages] } } @@ -100,7 +98,6 @@ pub struct McpServer { pub tool_configuration: Option, } - #[skip_serializing_none] #[derive(Serialize, Deserialize, Debug, Clone)] pub struct MessagesRequest { @@ -121,10 +118,8 @@ pub struct MessagesRequest { pub stop_sequences: Option>, pub tools: Option>, pub tool_choice: Option, - } - // Messages API specific types #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] #[serde(rename_all = "lowercase")] @@ -235,34 +230,21 @@ impl ExtractText for Vec { } } - #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "snake_case")] #[serde(tag = "type")] pub enum MessagesImageSource { - Base64 { - media_type: String, - data: String, - }, - Url { - url: String, - }, + Base64 { media_type: String, data: String }, + Url { url: String }, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "snake_case")] #[serde(tag = "type")] pub enum MessagesDocumentSource { - Base64 { - media_type: String, - data: String, - }, - Url { - url: String, - }, - File { - file_id: String, - }, + Base64 { media_type: String, data: String }, + Url { url: String }, + File { file_id: String }, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -276,7 +258,7 @@ impl ExtractText for MessagesMessageContent { fn extract_text(&self) -> String { match self { MessagesMessageContent::Single(text) => text.clone(), - MessagesMessageContent::Blocks(parts) => parts.extract_text() + MessagesMessageContent::Blocks(parts) => parts.extract_text(), } } } @@ -320,7 +302,6 @@ pub struct MessagesToolChoice { pub disable_parallel_tool_use: Option, } - #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] #[serde(rename_all = "snake_case")] pub enum MessagesStopReason { @@ -457,7 +438,11 @@ impl ProviderResponse for MessagesResponse { Some(self) } fn extract_usage_counts(&self) -> Option<(usize, usize, usize)> { - Some((self.usage.input_tokens as usize, self.usage.output_tokens as usize, (self.usage.input_tokens + self.usage.output_tokens) as usize)) + Some(( + self.usage.input_tokens as usize, + self.usage.output_tokens as usize, + (self.usage.input_tokens + self.usage.output_tokens) as usize, + )) } } @@ -535,7 +520,7 @@ impl ProviderRequest for MessagesRequest { } fn metadata(&self) -> &Option> { - return &self.metadata; + return &self.metadata; } fn remove_metadata_key(&mut self, key: &str) -> bool { @@ -572,13 +557,11 @@ impl MessagesRole { impl ProviderStreamResponse for MessagesStreamEvent { fn content_delta(&self) -> Option<&str> { match self { - MessagesStreamEvent::ContentBlockDelta { delta, .. } => { - match delta { - MessagesContentDelta::TextDelta { text } => Some(text), - MessagesContentDelta::ThinkingDelta { thinking } => Some(thinking), - _ => None, - } - } + MessagesStreamEvent::ContentBlockDelta { delta, .. } => match delta { + MessagesContentDelta::TextDelta { text } => Some(text), + MessagesContentDelta::ThinkingDelta { thinking } => Some(thinking), + _ => None, + }, _ => None, } } @@ -627,7 +610,8 @@ mod tests { }); // Deserialize JSON into MessagesRequest - let deserialized_request: MessagesRequest = serde_json::from_value(original_json.clone()).unwrap(); + let deserialized_request: MessagesRequest = + serde_json::from_value(original_json.clone()).unwrap(); // Validate required fields are properly set assert_eq!(deserialized_request.model, "claude-3-sonnet-20240229"); @@ -687,7 +671,8 @@ mod tests { }); // Deserialize JSON into MessagesRequest - let deserialized_request: MessagesRequest = serde_json::from_value(original_json.clone()).unwrap(); + let deserialized_request: MessagesRequest = + serde_json::from_value(original_json.clone()).unwrap(); // Validate required fields assert_eq!(deserialized_request.model, "claude-3-sonnet-20240229"); @@ -730,7 +715,10 @@ mod tests { assert_eq!(serialized_json["messages"], original_json["messages"]); assert_eq!(serialized_json["max_tokens"], original_json["max_tokens"]); assert_eq!(serialized_json["system"], original_json["system"]); - assert_eq!(serialized_json["service_tier"], original_json["service_tier"]); + assert_eq!( + serialized_json["service_tier"], + original_json["service_tier"] + ); assert_eq!(serialized_json["thinking"], original_json["thinking"]); assert_eq!(serialized_json["metadata"], original_json["metadata"]); @@ -818,7 +806,8 @@ mod tests { }); // Deserialize JSON into MessagesRequest - let deserialized_request: MessagesRequest = serde_json::from_value(original_json.clone()).unwrap(); + let deserialized_request: MessagesRequest = + serde_json::from_value(original_json.clone()).unwrap(); // Validate top-level fields assert_eq!(deserialized_request.model, "claude-3-sonnet-20240229"); @@ -833,7 +822,10 @@ mod tests { // Validate text content block if let MessagesContentBlock::Text { text, .. } = &content_blocks[0] { - assert_eq!(text, "What can you see in this image and what's the weather like?"); + assert_eq!( + text, + "What can you see in this image and what's the weather like?" + ); } else { panic!("Expected text content block"); } @@ -861,20 +853,32 @@ mod tests { // Validate thinking content block if let MessagesContentBlock::Thinking { thinking, .. } = &content_blocks[0] { - assert_eq!(thinking, "Let me analyze the image and then check the weather..."); + assert_eq!( + thinking, + "Let me analyze the image and then check the weather..." + ); } else { panic!("Expected thinking content block"); } // Validate text content block if let MessagesContentBlock::Text { text, .. } = &content_blocks[1] { - assert_eq!(text, "I can see the image. Let me check the weather for you."); + assert_eq!( + text, + "I can see the image. Let me check the weather for you." + ); } else { panic!("Expected text content block"); } // Validate tool use content block - if let MessagesContentBlock::ToolUse { ref id, ref name, ref input, .. } = content_blocks[2] { + if let MessagesContentBlock::ToolUse { + ref id, + ref name, + ref input, + .. + } = content_blocks[2] + { assert_eq!(id, "toolu_weather123"); assert_eq!(name, "get_weather"); assert_eq!(input["location"], "San Francisco, CA"); @@ -892,7 +896,10 @@ mod tests { let tool = &tools[0]; assert_eq!(tool.name, "get_weather"); - assert_eq!(tool.description, Some("Get current weather information for a location".to_string())); + assert_eq!( + tool.description, + Some("Get current weather information for a location".to_string()) + ); assert_eq!(tool.input_schema["type"], "object"); assert!(tool.input_schema["properties"]["location"].is_object()); @@ -938,10 +945,16 @@ mod tests { assert_eq!(deserialized_mcp.name, "test-server"); assert_eq!(deserialized_mcp.server_type, McpServerType::Url); assert_eq!(deserialized_mcp.url, "https://example.com/mcp"); - assert_eq!(deserialized_mcp.authorization_token, Some("secret-token".to_string())); + assert_eq!( + deserialized_mcp.authorization_token, + Some("secret-token".to_string()) + ); if let Some(tool_config) = &deserialized_mcp.tool_configuration { - assert_eq!(tool_config.allowed_tools, Some(vec!["tool1".to_string(), "tool2".to_string()])); + assert_eq!( + tool_config.allowed_tools, + Some(vec!["tool1".to_string(), "tool2".to_string()]) + ); assert_eq!(tool_config.enabled, Some(true)); } else { panic!("Expected tool configuration"); @@ -957,7 +970,8 @@ mod tests { "url": "https://minimal.com/mcp" }); - let deserialized_minimal: McpServer = serde_json::from_value(minimal_mcp_json.clone()).unwrap(); + let deserialized_minimal: McpServer = + serde_json::from_value(minimal_mcp_json.clone()).unwrap(); assert_eq!(deserialized_minimal.name, "minimal-server"); assert_eq!(deserialized_minimal.server_type, McpServerType::Url); assert_eq!(deserialized_minimal.url, "https://minimal.com/mcp"); @@ -991,12 +1005,16 @@ mod tests { } }); - let deserialized_response: MessagesResponse = serde_json::from_value(response_json.clone()).unwrap(); + let deserialized_response: MessagesResponse = + serde_json::from_value(response_json.clone()).unwrap(); assert_eq!(deserialized_response.id, "msg_01ABC123"); assert_eq!(deserialized_response.obj_type, "message"); assert_eq!(deserialized_response.role, MessagesRole::Assistant); assert_eq!(deserialized_response.model, "claude-3-sonnet-20240229"); - assert_eq!(deserialized_response.stop_reason, MessagesStopReason::EndTurn); + assert_eq!( + deserialized_response.stop_reason, + MessagesStopReason::EndTurn + ); assert!(deserialized_response.stop_sequence.is_none()); assert!(deserialized_response.container.is_none()); @@ -1011,7 +1029,10 @@ mod tests { // Check usage assert_eq!(deserialized_response.usage.input_tokens, 10); assert_eq!(deserialized_response.usage.output_tokens, 25); - assert_eq!(deserialized_response.usage.cache_creation_input_tokens, Some(5)); + assert_eq!( + deserialized_response.usage.cache_creation_input_tokens, + Some(5) + ); assert_eq!(deserialized_response.usage.cache_read_input_tokens, Some(3)); let serialized_response_json = serde_json::to_value(&deserialized_response).unwrap(); @@ -1027,7 +1048,8 @@ mod tests { } }); - let deserialized_event: MessagesStreamEvent = serde_json::from_value(stream_event_json.clone()).unwrap(); + let deserialized_event: MessagesStreamEvent = + serde_json::from_value(stream_event_json.clone()).unwrap(); if let MessagesStreamEvent::ContentBlockDelta { index, ref delta } = deserialized_event { assert_eq!(index, 0); if let MessagesContentDelta::TextDelta { text } = delta { @@ -1055,8 +1077,15 @@ mod tests { } }); - let deserialized_tool_use: MessagesContentBlock = serde_json::from_value(tool_use_json.clone()).unwrap(); - if let MessagesContentBlock::ToolUse { ref id, ref name, ref input, .. } = deserialized_tool_use { + let deserialized_tool_use: MessagesContentBlock = + serde_json::from_value(tool_use_json.clone()).unwrap(); + if let MessagesContentBlock::ToolUse { + ref id, + ref name, + ref input, + .. + } = deserialized_tool_use + { assert_eq!(id, "toolu_01ABC123"); assert_eq!(name, "get_weather"); assert_eq!(input["location"], "San Francisco, CA"); @@ -1079,8 +1108,15 @@ mod tests { ] }); - let deserialized_tool_result: MessagesContentBlock = serde_json::from_value(tool_result_json.clone()).unwrap(); - if let MessagesContentBlock::ToolResult { ref tool_use_id, ref is_error, ref content, .. } = deserialized_tool_result { + let deserialized_tool_result: MessagesContentBlock = + serde_json::from_value(tool_result_json.clone()).unwrap(); + if let MessagesContentBlock::ToolResult { + ref tool_use_id, + ref is_error, + ref content, + .. + } = deserialized_tool_result + { assert_eq!(tool_use_id, "toolu_01ABC123"); assert!(is_error.is_none()); if let ToolResultContent::Blocks(blocks) = content { @@ -1229,7 +1265,8 @@ mod tests { }); // Deserialize the complex MessagesRequest - let deserialized_request: MessagesRequest = serde_json::from_value(complex_request_json.clone()).unwrap(); + let deserialized_request: MessagesRequest = + serde_json::from_value(complex_request_json.clone()).unwrap(); // Verify basic fields assert_eq!(deserialized_request.model, "claude-sonnet-4-20250514"); @@ -1239,8 +1276,15 @@ mod tests { // Verify system message with cache_control if let Some(MessagesSystemPrompt::Blocks(ref system_blocks)) = deserialized_request.system { assert_eq!(system_blocks.len(), 2); - if let MessagesContentBlock::Text { text, cache_control } = &system_blocks[0] { - assert_eq!(text, "You are Claude Code, Anthropic's official CLI for Claude."); + if let MessagesContentBlock::Text { + text, + cache_control, + } = &system_blocks[0] + { + assert_eq!( + text, + "You are Claude Code, Anthropic's official CLI for Claude." + ); assert_eq!(cache_control, &Some(MessagesCacheControl::Ephemeral)); } else { panic!("Expected text system message with cache_control"); @@ -1253,7 +1297,13 @@ mod tests { let assistant_message = &deserialized_request.messages[1]; assert_eq!(assistant_message.role, MessagesRole::Assistant); if let MessagesMessageContent::Blocks(ref content_blocks) = assistant_message.content { - if let MessagesContentBlock::ToolUse { id, name, input, cache_control } = &content_blocks[0] { + if let MessagesContentBlock::ToolUse { + id, + name, + input, + cache_control, + } = &content_blocks[0] + { assert_eq!(id, "call_kV50LtJQKHvvzZui5TW56DUl"); assert_eq!(name, "TodoWrite"); assert_eq!(cache_control, &Some(MessagesCacheControl::Ephemeral)); @@ -1272,7 +1322,12 @@ mod tests { let user_message = &deserialized_request.messages[2]; assert_eq!(user_message.role, MessagesRole::User); if let MessagesMessageContent::Blocks(ref content_blocks) = user_message.content { - if let MessagesContentBlock::ToolResult { tool_use_id, content, .. } = &content_blocks[0] { + if let MessagesContentBlock::ToolResult { + tool_use_id, + content, + .. + } = &content_blocks[0] + { assert_eq!(tool_use_id, "call_kV50LtJQKHvvzZui5TW56DUl"); if let ToolResultContent::Text(text) = content { assert!(text.contains("Todos have been modified successfully")); @@ -1284,7 +1339,11 @@ mod tests { } // Verify text content with cache_control - if let MessagesContentBlock::Text { text, cache_control } = &content_blocks[2] { + if let MessagesContentBlock::Text { + text, + cache_control, + } = &content_blocks[2] + { assert_eq!(text, "try again"); assert_eq!(cache_control, &Some(MessagesCacheControl::Ephemeral)); } else { @@ -1296,11 +1355,15 @@ mod tests { // Test serialization round-trip let serialized_request = serde_json::to_value(&deserialized_request).unwrap(); - let re_deserialized_request: MessagesRequest = serde_json::from_value(serialized_request).unwrap(); + let re_deserialized_request: MessagesRequest = + serde_json::from_value(serialized_request).unwrap(); // Verify round-trip consistency assert_eq!(deserialized_request.model, re_deserialized_request.model); - assert_eq!(deserialized_request.messages.len(), re_deserialized_request.messages.len()); + assert_eq!( + deserialized_request.messages.len(), + re_deserialized_request.messages.len() + ); } #[test] @@ -1339,7 +1402,8 @@ mod tests { } }); - let deserialized_event: MessagesStreamEvent = serde_json::from_value(thinking_delta_json.clone()).unwrap(); + let deserialized_event: MessagesStreamEvent = + serde_json::from_value(thinking_delta_json.clone()).unwrap(); if let MessagesStreamEvent::ContentBlockDelta { index, ref delta } = deserialized_event { assert_eq!(index, 0); if let MessagesContentDelta::ThinkingDelta { thinking } = delta { @@ -1352,7 +1416,10 @@ mod tests { } // Test that thinking delta is returned by content_delta() - assert_eq!(deserialized_event.content_delta(), Some(".\n\nI need to consider:\n1. Current")); + assert_eq!( + deserialized_event.content_delta(), + Some(".\n\nI need to consider:\n1. Current") + ); let serialized_event_json = serde_json::to_value(&deserialized_event).unwrap(); assert_eq!(thinking_delta_json, serialized_event_json); @@ -1376,7 +1443,8 @@ mod tests { } }); - let deserialized_request: MessagesRequest = serde_json::from_value(request_json.clone()).unwrap(); + let deserialized_request: MessagesRequest = + serde_json::from_value(request_json.clone()).unwrap(); assert_eq!(deserialized_request.model, "claude-sonnet-4-20250514"); assert_eq!(deserialized_request.max_tokens, 2048); diff --git a/crates/hermesllm/src/apis/mod.rs b/crates/hermesllm/src/apis/mod.rs index b175988c..99158dfa 100644 --- a/crates/hermesllm/src/apis/mod.rs +++ b/crates/hermesllm/src/apis/mod.rs @@ -3,7 +3,6 @@ pub mod openai; pub use anthropic::*; pub use openai::*; - pub trait ApiDefinition { /// Returns the endpoint path for this API fn endpoint(&self) -> &'static str; @@ -49,11 +48,7 @@ mod tests { #[test] fn test_api_detection_from_endpoints() { // Test that we can detect APIs from endpoints using the trait - let endpoints = vec![ - CHAT_COMPLETIONS_PATH, - MESSAGES_PATH, - "/v1/unknown" - ]; + let endpoints = vec![CHAT_COMPLETIONS_PATH, MESSAGES_PATH, "/v1/unknown"]; let mut detected_apis = Vec::new(); @@ -67,11 +62,14 @@ mod tests { } } - assert_eq!(detected_apis, vec![ - "OpenAI: ChatCompletions", - "Anthropic: Messages", - "Unknown API" - ]); + assert_eq!( + detected_apis, + vec![ + "OpenAI: ChatCompletions", + "Anthropic: Messages", + "Unknown API" + ] + ); } #[test] diff --git a/crates/hermesllm/src/apis/openai.rs b/crates/hermesllm/src/apis/openai.rs index 63b5fc58..58e4c8a5 100644 --- a/crates/hermesllm/src/apis/openai.rs +++ b/crates/hermesllm/src/apis/openai.rs @@ -5,11 +5,11 @@ use std::collections::HashMap; use std::fmt::Display; use thiserror::Error; +use super::ApiDefinition; +use crate::clients::transformer::ExtractText; use crate::providers::request::{ProviderRequest, ProviderRequestError}; use crate::providers::response::{ProviderResponse, ProviderStreamResponse, TokenUsage}; -use super::ApiDefinition; -use crate::clients::transformer::{ExtractText}; -use crate::{CHAT_COMPLETIONS_PATH}; +use crate::CHAT_COMPLETIONS_PATH; // ============================================================================ // OPENAI API ENUMERATION @@ -46,7 +46,7 @@ impl ApiDefinition for OpenAIApi { } fn supports_tools(&self) -> bool { - match self { + match self { OpenAIApi::ChatCompletions => true, } } @@ -58,9 +58,7 @@ impl ApiDefinition for OpenAIApi { } fn all_variants() -> Vec { - vec![ - OpenAIApi::ChatCompletions, - ] + vec![OpenAIApi::ChatCompletions] } } @@ -190,7 +188,9 @@ impl ResponseMessage { pub fn to_message(&self) -> Message { Message { role: self.role.clone(), - content: self.content.as_ref() + content: self + .content + .as_ref() .map(|s| MessageContent::Text(s.clone())) .unwrap_or(MessageContent::Text(String::new())), name: None, // Response messages don't have names in the same way request messages do @@ -215,7 +215,7 @@ impl ExtractText for MessageContent { fn extract_text(&self) -> String { match self { MessageContent::Text(text) => text.clone(), - MessageContent::Parts(parts) => parts.extract_text() + MessageContent::Parts(parts) => parts.extract_text(), } } } @@ -274,7 +274,6 @@ pub struct ImageUrl { /// A single message in a chat conversation - /// A tool call made by the assistant #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct ToolCall { @@ -374,7 +373,6 @@ pub enum StaticContentType { Parts(Vec), } - /// Chat completions API response #[skip_serializing_none] #[derive(Serialize, Deserialize, Debug, Clone)] @@ -496,7 +494,6 @@ pub struct ChatCompletionsStreamResponse { pub service_tier: Option, } - /// A choice in a streaming response #[skip_serializing_none] #[derive(Serialize, Deserialize, Debug, Clone)] @@ -566,7 +563,6 @@ pub struct Models { pub data: Vec, } - // Error type for streaming operations #[derive(Debug, thiserror::Error)] pub enum OpenAIStreamError { @@ -597,13 +593,13 @@ pub enum OpenAIError { /// Trait Implementations /// =========================================================================== - /// Parameterized conversion for ChatCompletionsRequest impl TryFrom<&[u8]> for ChatCompletionsRequest { type Error = OpenAIStreamError; fn try_from(bytes: &[u8]) -> Result { - let mut req: ChatCompletionsRequest = serde_json::from_slice(bytes).map_err(OpenAIStreamError::from)?; + let mut req: ChatCompletionsRequest = + serde_json::from_slice(bytes).map_err(OpenAIStreamError::from)?; // Use the centralized suppression logic req.suppress_max_tokens_if_o3(); req.fix_temperature_if_gpt5(); @@ -651,13 +647,18 @@ impl ProviderRequest for ChatCompletionsRequest { fn extract_messages_text(&self) -> String { self.messages.iter().fold(String::new(), |acc, m| { - acc + " " + &match &m.content { - MessageContent::Text(text) => text.clone(), - MessageContent::Parts(parts) => parts.iter().map(|part| match part { - ContentPart::Text { text } => text.clone(), - ContentPart::ImageUrl { .. } => "[Image]".to_string(), - }).collect::>().join(" ") - } + acc + " " + + &match &m.content { + MessageContent::Text(text) => text.clone(), + MessageContent::Parts(parts) => parts + .iter() + .map(|part| match part { + ContentPart::Text { text } => text.clone(), + ContentPart::ImageUrl { .. } => "[Image]".to_string(), + }) + .collect::>() + .join(" "), + } }) } @@ -721,14 +722,14 @@ impl ProviderStreamResponse for ChatCompletionsStreamResponse { } fn role(&self) -> Option<&str> { - self.choices - .first() - .and_then(|choice| choice.delta.role.as_ref().map(|r| match r { + self.choices.first().and_then(|choice| { + choice.delta.role.as_ref().map(|r| match r { Role::System => "system", Role::User => "user", Role::Assistant => "assistant", Role::Tool => "tool", - })) + }) + }) } fn event_type(&self) -> Option<&str> { @@ -736,7 +737,6 @@ impl ProviderStreamResponse for ChatCompletionsStreamResponse { } } - #[cfg(test)] mod tests { use super::*; @@ -756,7 +756,8 @@ mod tests { }); // Deserialize JSON into ChatCompletionsRequest - let deserialized_request: ChatCompletionsRequest = serde_json::from_value(original_json.clone()).unwrap(); + let deserialized_request: ChatCompletionsRequest = + serde_json::from_value(original_json.clone()).unwrap(); // Validate required fields are properly set assert_eq!(deserialized_request.model, "gpt-4"); @@ -799,7 +800,8 @@ mod tests { }); // Deserialize JSON into ChatCompletionsRequest - let deserialized_request: ChatCompletionsRequest = serde_json::from_value(original_json.clone()).unwrap(); + let deserialized_request: ChatCompletionsRequest = + serde_json::from_value(original_json.clone()).unwrap(); // Validate required fields assert_eq!(deserialized_request.model, "gpt-4"); @@ -836,7 +838,10 @@ mod tests { assert_eq!(serialized_json["messages"], original_json["messages"]); assert_eq!(serialized_json["max_tokens"], original_json["max_tokens"]); assert_eq!(serialized_json["stream"], original_json["stream"]); - assert_eq!(serialized_json["stream_options"], original_json["stream_options"]); + assert_eq!( + serialized_json["stream_options"], + original_json["stream_options"] + ); assert_eq!(serialized_json["metadata"], original_json["metadata"]); // Handle temperature with floating point tolerance @@ -917,7 +922,8 @@ mod tests { }); // Deserialize JSON into ChatCompletionsRequest - let deserialized_request: ChatCompletionsRequest = serde_json::from_value(original_json.clone()).unwrap(); + let deserialized_request: ChatCompletionsRequest = + serde_json::from_value(original_json.clone()).unwrap(); // Validate top-level fields assert_eq!(deserialized_request.model, "gpt-4-vision-preview"); @@ -953,7 +959,10 @@ mod tests { let assistant_message = &deserialized_request.messages[1]; assert_eq!(assistant_message.role, Role::Assistant); if let MessageContent::Text(text) = &assistant_message.content { - assert_eq!(text, "I can see a beautiful cityscape. Let me check the weather for you."); + assert_eq!( + text, + "I can see a beautiful cityscape. Let me check the weather for you." + ); } else { panic!("Expected text content for assistant message"); } @@ -967,7 +976,10 @@ mod tests { assert_eq!(tool_call.id, "call_weather123"); assert_eq!(tool_call.call_type, "function"); assert_eq!(tool_call.function.name, "get_weather"); - assert_eq!(tool_call.function.arguments, "{\"location\": \"New York, NY\"}"); + assert_eq!( + tool_call.function.arguments, + "{\"location\": \"New York, NY\"}" + ); // Validate third message (tool response) let tool_message = &deserialized_request.messages[2]; @@ -977,7 +989,10 @@ mod tests { } else { panic!("Expected text content for tool message"); } - assert_eq!(tool_message.tool_call_id, Some("call_weather123".to_string())); + assert_eq!( + tool_message.tool_call_id, + Some("call_weather123".to_string()) + ); // Validate tools array assert!(deserialized_request.tools.is_some()); @@ -987,7 +1002,10 @@ mod tests { let tool = &tools[0]; assert_eq!(tool.tool_type, "function"); assert_eq!(tool.function.name, "get_weather"); - assert_eq!(tool.function.description, Some("Get current weather information for a location".to_string())); + assert_eq!( + tool.function.description, + Some("Get current weather information for a location".to_string()) + ); assert_eq!(tool.function.strict, Some(true)); // Validate tool parameters schema @@ -1093,7 +1111,8 @@ mod tests { ] }); - let deserialized_assistant: Message = serde_json::from_value(assistant_json.clone()).unwrap(); + let deserialized_assistant: Message = + serde_json::from_value(assistant_json.clone()).unwrap(); assert_eq!(deserialized_assistant.role, Role::Assistant); if let MessageContent::Text(content) = &deserialized_assistant.content { assert_eq!(content, "I'll help with that."); @@ -1142,9 +1161,13 @@ mod tests { ] }); - let deserialized_response: ResponseMessage = serde_json::from_value(response_json.clone()).unwrap(); + let deserialized_response: ResponseMessage = + serde_json::from_value(response_json.clone()).unwrap(); assert_eq!(deserialized_response.role, Role::Assistant); - assert_eq!(deserialized_response.content, Some("Response content".to_string())); + assert_eq!( + deserialized_response.content, + Some("Response content".to_string()) + ); assert!(deserialized_response.annotations.is_some()); assert!(deserialized_response.refusal.is_none()); assert!(deserialized_response.function_call.is_none()); @@ -1186,7 +1209,10 @@ mod tests { let none_deserialized: ToolChoice = serde_json::from_value(json!("none")).unwrap(); assert_eq!(auto_deserialized, ToolChoice::Type(ToolChoiceType::Auto)); - assert_eq!(required_deserialized, ToolChoice::Type(ToolChoiceType::Required)); + assert_eq!( + required_deserialized, + ToolChoice::Type(ToolChoiceType::Required) + ); assert_eq!(none_deserialized, ToolChoice::Type(ToolChoiceType::None)); // Test that invalid string values fail deserialization (type safety!) @@ -1237,7 +1263,10 @@ mod tests { assert_eq!(response.created, 1756574706); assert_eq!(response.model, "gpt-4o-2024-08-06"); assert_eq!(response.service_tier, Some("default".to_string())); - assert_eq!(response.system_fingerprint, Some("fp_f33640a400".to_string())); + assert_eq!( + response.system_fingerprint, + Some("fp_f33640a400".to_string()) + ); assert_eq!(response.choices.len(), 1); assert_eq!(response.usage.prompt_tokens, 65); assert_eq!(response.usage.completion_tokens, 184); diff --git a/crates/hermesllm/src/clients/endpoints.rs b/crates/hermesllm/src/clients/endpoints.rs index e5c01f05..263ca674 100644 --- a/crates/hermesllm/src/clients/endpoints.rs +++ b/crates/hermesllm/src/clients/endpoints.rs @@ -21,7 +21,10 @@ //! assert!(endpoints.contains(&"/v1/messages")); //! ``` -use crate::{apis::{AnthropicApi, ApiDefinition, OpenAIApi}, ProviderId}; +use crate::{ + apis::{AnthropicApi, ApiDefinition, OpenAIApi}, + ProviderId, +}; use std::fmt; /// Unified enum representing all supported API endpoints across providers @@ -34,8 +37,12 @@ pub enum SupportedAPIs { impl fmt::Display for SupportedAPIs { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - SupportedAPIs::OpenAIChatCompletions(api) => write!(f, "OpenAI API ({})", api.endpoint()), - SupportedAPIs::AnthropicMessagesAPI(api) => write!(f, "Anthropic API ({})", api.endpoint()), + SupportedAPIs::OpenAIChatCompletions(api) => { + write!(f, "OpenAI API ({})", api.endpoint()) + } + SupportedAPIs::AnthropicMessagesAPI(api) => { + write!(f, "Anthropic API ({})", api.endpoint()) + } } } } @@ -62,61 +69,60 @@ impl SupportedAPIs { } } - pub fn target_endpoint_for_provider(&self, provider_id: &ProviderId, request_path: &str, model_id: &str) -> String { + pub fn target_endpoint_for_provider( + &self, + provider_id: &ProviderId, + request_path: &str, + model_id: &str, + ) -> String { let default_endpoint = "/v1/chat/completions".to_string(); match self { - SupportedAPIs::AnthropicMessagesAPI(AnthropicApi::Messages) => { - match provider_id { - ProviderId::Anthropic => "/v1/messages".to_string(), - _ => default_endpoint, + SupportedAPIs::AnthropicMessagesAPI(AnthropicApi::Messages) => match provider_id { + ProviderId::Anthropic => "/v1/messages".to_string(), + _ => default_endpoint, + }, + _ => match provider_id { + ProviderId::Groq => { + if request_path.starts_with("/v1/") { + format!("/openai{}", request_path) + } else { + default_endpoint + } } - } - _ => { - match provider_id { - ProviderId::Groq => { - if request_path.starts_with("/v1/") { - format!("/openai{}", request_path) - } else { - default_endpoint - } + ProviderId::Zhipu => { + if request_path.starts_with("/v1/") { + "/api/paas/v4/chat/completions".to_string() + } else { + default_endpoint } - ProviderId::Zhipu => { - if request_path.starts_with("/v1/") { - "/api/paas/v4/chat/completions".to_string() - } else { - default_endpoint - } - } - ProviderId::Qwen => { - if request_path.starts_with("/v1/") { - "/compatible-mode/v1/chat/completions".to_string() - } else { - default_endpoint - } - } - ProviderId::AzureOpenAI => { - if request_path.starts_with("/v1/") { - format!("/openai/deployments/{}/chat/completions?api-version=2025-01-01-preview", model_id) - } else { - default_endpoint - } - } - ProviderId::Gemini => { - if request_path.starts_with("/v1/") { - "/v1beta/openai/chat/completions".to_string() - } else { - default_endpoint - } - } - _ => default_endpoint, } - } + ProviderId::Qwen => { + if request_path.starts_with("/v1/") { + "/compatible-mode/v1/chat/completions".to_string() + } else { + default_endpoint + } + } + ProviderId::AzureOpenAI => { + if request_path.starts_with("/v1/") { + format!("/openai/deployments/{}/chat/completions?api-version=2025-01-01-preview", model_id) + } else { + default_endpoint + } + } + ProviderId::Gemini => { + if request_path.starts_with("/v1/") { + "/v1beta/openai/chat/completions".to_string() + } else { + default_endpoint + } + } + _ => default_endpoint, + }, } } } - - /// Get all supported endpoint paths pub fn supported_endpoints() -> Vec<&'static str> { let mut endpoints = Vec::new(); @@ -196,15 +202,26 @@ mod tests { // All OpenAI endpoints should be in the result for endpoint in openai_endpoints { - assert!(endpoints.contains(&endpoint), "Missing OpenAI endpoint: {}", endpoint); + assert!( + endpoints.contains(&endpoint), + "Missing OpenAI endpoint: {}", + endpoint + ); } // All Anthropic endpoints should be in the result for endpoint in anthropic_endpoints { - assert!(endpoints.contains(&endpoint), "Missing Anthropic endpoint: {}", endpoint); + assert!( + endpoints.contains(&endpoint), + "Missing Anthropic endpoint: {}", + endpoint + ); } // Total should match - assert_eq!(endpoints.len(), OpenAIApi::all_variants().len() + AnthropicApi::all_variants().len()); + assert_eq!( + endpoints.len(), + OpenAIApi::all_variants().len() + AnthropicApi::all_variants().len() + ); } } diff --git a/crates/hermesllm/src/clients/mod.rs b/crates/hermesllm/src/clients/mod.rs index 73972445..b93f910e 100644 --- a/crates/hermesllm/src/clients/mod.rs +++ b/crates/hermesllm/src/clients/mod.rs @@ -1,9 +1,9 @@ +pub mod endpoints; pub mod lib; pub mod transformer; -pub mod endpoints; // Re-export the main items for easier access +pub use endpoints::{identify_provider, SupportedAPIs}; pub use lib::*; -pub use endpoints::{SupportedAPIs, identify_provider}; // Note: transformer module contains TryFrom trait implementations that are automatically available diff --git a/crates/hermesllm/src/clients/transformer.rs b/crates/hermesllm/src/clients/transformer.rs index f6e508d4..11caae6f 100644 --- a/crates/hermesllm/src/clients/transformer.rs +++ b/crates/hermesllm/src/clients/transformer.rs @@ -42,10 +42,10 @@ //! # Ok::<(), Box>(()) //! ``` +use super::TransformError; +use crate::apis::*; use serde_json::Value; use std::time::{SystemTime, UNIX_EPOCH}; -use crate::apis::*; -use super::TransformError; // ============================================================================ // CONSTANTS @@ -66,7 +66,9 @@ pub trait ExtractText { /// Trait for utility functions on content collections trait ContentUtils { fn extract_tool_calls(&self) -> Result>, TransformError>; - fn split_for_openai(&self) -> Result<(Vec, Vec, Vec<(String, String, bool)>), TransformError>; + fn split_for_openai( + &self, + ) -> Result<(Vec, Vec, Vec<(String, String, bool)>), TransformError>; } // ============================================================================ @@ -75,7 +77,6 @@ trait ContentUtils { type AnthropicMessagesRequest = MessagesRequest; - impl TryFrom for ChatCompletionsRequest { type Error = TransformError; @@ -95,7 +96,8 @@ impl TryFrom for ChatCompletionsRequest { // Convert tools and tool choice let openai_tools = req.tools.map(|tools| convert_anthropic_tools(tools)); - let (openai_tool_choice, parallel_tool_calls) = convert_anthropic_tool_choice(req.tool_choice); + let (openai_tool_choice, parallel_tool_calls) = + convert_anthropic_tool_choice(req.tool_choice); let mut _chat_completions_req: ChatCompletionsRequest = ChatCompletionsRequest { model: req.model, @@ -137,13 +139,15 @@ impl TryFrom for AnthropicMessagesRequest { // Convert tools and tool choice let anthropic_tools = req.tools.map(|tools| convert_openai_tools(tools)); - let anthropic_tool_choice = convert_openai_tool_choice(req.tool_choice, req.parallel_tool_calls); + let anthropic_tool_choice = + convert_openai_tool_choice(req.tool_choice, req.parallel_tool_calls); Ok(AnthropicMessagesRequest { model: req.model, system: system_prompt, messages, - max_tokens: req.max_completion_tokens + max_tokens: req + .max_completion_tokens .or(req.max_tokens) .unwrap_or(DEFAULT_MAX_TOKENS), container: None, @@ -179,7 +183,11 @@ impl TryFrom for ChatCompletionsResponse { MessageContent::Text(text) => Some(text), MessageContent::Parts(parts) => { let text = parts.extract_text(); - if text.is_empty() { None } else { Some(text) } + if text.is_empty() { + None + } else { + Some(text) + } } }; @@ -225,11 +233,15 @@ impl TryFrom for MessagesResponse { type Error = TransformError; fn try_from(resp: ChatCompletionsResponse) -> Result { - let choice = resp.choices.into_iter().next() + let choice = resp + .choices + .into_iter() + .next() .ok_or_else(|| TransformError::MissingField("choices".to_string()))?; let content = convert_openai_message_to_anthropic_content(&choice.message.to_message())?; - let stop_reason = choice.finish_reason + let stop_reason = choice + .finish_reason .map(|fr| fr.into()) .unwrap_or(MessagesStopReason::EndTurn); @@ -263,33 +275,27 @@ impl TryFrom for ChatCompletionsStreamResponse { fn try_from(event: MessagesStreamEvent) -> Result { match event { - MessagesStreamEvent::MessageStart { message } => { - Ok(create_openai_chunk( - &message.id, - &message.model, - MessageDelta { - role: Some(Role::Assistant), - content: None, - refusal: None, - function_call: None, - tool_calls: None, - }, - None, - None, - )) - } + MessagesStreamEvent::MessageStart { message } => Ok(create_openai_chunk( + &message.id, + &message.model, + MessageDelta { + role: Some(Role::Assistant), + content: None, + refusal: None, + function_call: None, + tool_calls: None, + }, + None, + None, + )), MessagesStreamEvent::ContentBlockStart { content_block, .. } => { convert_content_block_start(content_block) } - MessagesStreamEvent::ContentBlockDelta { delta, .. } => { - convert_content_delta(delta) - } + MessagesStreamEvent::ContentBlockDelta { delta, .. } => convert_content_delta(delta), - MessagesStreamEvent::ContentBlockStop { .. } => { - Ok(create_empty_openai_chunk()) - } + MessagesStreamEvent::ContentBlockStop { .. } => Ok(create_empty_openai_chunk()), MessagesStreamEvent::MessageDelta { delta, usage } => { let finish_reason: Option = Some(delta.stop_reason.into()); @@ -310,34 +316,30 @@ impl TryFrom for ChatCompletionsStreamResponse { )) } - MessagesStreamEvent::MessageStop => { - Ok(create_openai_chunk( - "stream", - "unknown", - MessageDelta { - role: None, - content: None, - refusal: None, - function_call: None, - tool_calls: None, - }, - Some(FinishReason::Stop), - None, - )) - } + MessagesStreamEvent::MessageStop => Ok(create_openai_chunk( + "stream", + "unknown", + MessageDelta { + role: None, + content: None, + refusal: None, + function_call: None, + tool_calls: None, + }, + Some(FinishReason::Stop), + None, + )), - MessagesStreamEvent::Ping => { - Ok(ChatCompletionsStreamResponse { - id: "stream".to_string(), - object: Some("chat.completion.chunk".to_string()), - created: current_timestamp(), - model: "unknown".to_string(), - choices: vec![], - usage: None, - system_fingerprint: None, - service_tier: None, - }) - } + MessagesStreamEvent::Ping => Ok(ChatCompletionsStreamResponse { + id: "stream".to_string(), + object: Some("chat.completion.chunk".to_string()), + created: current_timestamp(), + model: "unknown".to_string(), + choices: vec![], + usage: None, + system_fingerprint: None, + service_tier: None, + }), } } } @@ -442,9 +444,7 @@ impl Into for MessagesSystemPrompt { fn into(self) -> Message { let system_content = match self { MessagesSystemPrompt::Single(text) => MessageContent::Text(text), - MessagesSystemPrompt::Blocks(blocks) => { - MessageContent::Text(blocks.extract_text()) - } + MessagesSystemPrompt::Blocks(blocks) => MessageContent::Text(blocks.extract_text()), }; Message { @@ -461,7 +461,7 @@ impl Into for Message { fn into(self) -> MessagesSystemPrompt { let system_text = match self.content { MessageContent::Text(text) => text, - MessageContent::Parts(parts) => parts.extract_text() + MessageContent::Parts(parts) => parts.extract_text(), }; MessagesSystemPrompt::Single(system_text) } @@ -505,7 +505,11 @@ impl TryFrom for Vec { role: message.role.into(), content, name: None, - tool_calls: if tool_calls.is_empty() { None } else { Some(tool_calls) }, + tool_calls: if tool_calls.is_empty() { + None + } else { + Some(tool_calls) + }, tool_call_id: None, }; result.push(main_message); @@ -526,8 +530,11 @@ impl TryFrom for MessagesMessage { Role::Assistant => MessagesRole::Assistant, Role::Tool => { // Tool messages become user messages with tool results - let tool_call_id = message.tool_call_id - .ok_or_else(|| TransformError::MissingField("tool_call_id required for Tool messages".to_string()))?; + let tool_call_id = message.tool_call_id.ok_or_else(|| { + TransformError::MissingField( + "tool_call_id required for Tool messages".to_string(), + ) + })?; return Ok(MessagesMessage { role: MessagesRole::User, @@ -545,7 +552,9 @@ impl TryFrom for MessagesMessage { }); } Role::System => { - return Err(TransformError::UnsupportedConversion("System messages should be handled separately".to_string())); + return Err(TransformError::UnsupportedConversion( + "System messages should be handled separately".to_string(), + )); } }; @@ -573,24 +582,36 @@ impl ContentUtils for Vec { for block in self { match block { - MessagesContentBlock::ToolUse { id, name, input, .. } | - MessagesContentBlock::ServerToolUse { id, name, input } | - MessagesContentBlock::McpToolUse { id, name, input } => { + MessagesContentBlock::ToolUse { + id, name, input, .. + } + | MessagesContentBlock::ServerToolUse { id, name, input } + | MessagesContentBlock::McpToolUse { id, name, input } => { let arguments = serde_json::to_string(&input)?; tool_calls.push(ToolCall { id: id.clone(), call_type: "function".to_string(), - function: FunctionCall { name: name.clone(), arguments }, + function: FunctionCall { + name: name.clone(), + arguments, + }, }); } _ => continue, } } - Ok(if tool_calls.is_empty() { None } else { Some(tool_calls) }) + Ok(if tool_calls.is_empty() { + None + } else { + Some(tool_calls) + }) } - fn split_for_openai(&self) -> Result<(Vec, Vec, Vec<(String, String, bool)>), TransformError> { + fn split_for_openai( + &self, + ) -> Result<(Vec, Vec, Vec<(String, String, bool)>), TransformError> + { let mut content_parts = Vec::new(); let mut tool_calls = Vec::new(); let mut tool_results = Vec::new(); @@ -609,25 +630,55 @@ impl ContentUtils for Vec { }, }); } - MessagesContentBlock::ToolUse { id, name, input, .. } | - MessagesContentBlock::ServerToolUse { id, name, input } | - MessagesContentBlock::McpToolUse { id, name, input } => { + MessagesContentBlock::ToolUse { + id, name, input, .. + } + | MessagesContentBlock::ServerToolUse { id, name, input } + | MessagesContentBlock::McpToolUse { id, name, input } => { let arguments = serde_json::to_string(&input)?; tool_calls.push(ToolCall { id: id.clone(), call_type: "function".to_string(), - function: FunctionCall { name: name.clone(), arguments }, + function: FunctionCall { + name: name.clone(), + arguments, + }, }); } - MessagesContentBlock::ToolResult { tool_use_id, content, is_error, .. } => { + MessagesContentBlock::ToolResult { + tool_use_id, + content, + is_error, + .. + } => { let result_text = content.extract_text(); - tool_results.push((tool_use_id.clone(), result_text, is_error.unwrap_or(false))); + tool_results.push(( + tool_use_id.clone(), + result_text, + is_error.unwrap_or(false), + )); } - MessagesContentBlock::WebSearchToolResult { tool_use_id, content, is_error } | - MessagesContentBlock::CodeExecutionToolResult { tool_use_id, content, is_error } | - MessagesContentBlock::McpToolResult { tool_use_id, content, is_error } => { + MessagesContentBlock::WebSearchToolResult { + tool_use_id, + content, + is_error, + } + | MessagesContentBlock::CodeExecutionToolResult { + tool_use_id, + content, + is_error, + } + | MessagesContentBlock::McpToolResult { + tool_use_id, + content, + is_error, + } => { let result_text = content.extract_text(); - tool_results.push((tool_use_id.clone(), result_text, is_error.unwrap_or(false))); + tool_results.push(( + tool_use_id.clone(), + result_text, + is_error.unwrap_or(false), + )); } _ => { // Skip unsupported content types @@ -696,7 +747,10 @@ impl Into for Usage { /// Helper to create a current unix timestamp fn current_timestamp() -> u64 { - SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() } /// Helper to create OpenAI streaming chunk @@ -705,7 +759,7 @@ fn create_openai_chunk( model: &str, delta: MessageDelta, finish_reason: Option, - usage: Option + usage: Option, ) -> ChatCompletionsStreamResponse { ChatCompletionsStreamResponse { id: id.to_string(), @@ -743,7 +797,8 @@ fn create_empty_openai_chunk() -> ChatCompletionsStreamResponse { /// Convert Anthropic tools to OpenAI format fn convert_anthropic_tools(tools: Vec) -> Vec { - tools.into_iter() + tools + .into_iter() .map(|tool| Tool { tool_type: "function".to_string(), function: Function { @@ -758,7 +813,8 @@ fn convert_anthropic_tools(tools: Vec) -> Vec { /// Convert OpenAI tools to Anthropic format fn convert_openai_tools(tools: Vec) -> Vec { - tools.into_iter() + tools + .into_iter() .map(|tool| MessagesTool { name: tool.function.name, description: tool.function.description, @@ -768,7 +824,9 @@ fn convert_openai_tools(tools: Vec) -> Vec { } /// Convert Anthropic tool choice to OpenAI format -fn convert_anthropic_tool_choice(tool_choice: Option) -> (Option, Option) { +fn convert_anthropic_tool_choice( + tool_choice: Option, +) -> (Option, Option) { match tool_choice { Some(choice) => { let openai_choice = match choice.kind { @@ -789,45 +847,46 @@ fn convert_anthropic_tool_choice(tool_choice: Option) -> (Op let parallel = choice.disable_parallel_tool_use.map(|disable| !disable); (Some(openai_choice), parallel) } - None => (None, None) + None => (None, None), } } /// Convert OpenAI tool choice to Anthropic format fn convert_openai_tool_choice( tool_choice: Option, - parallel_tool_calls: Option + parallel_tool_calls: Option, ) -> Option { - tool_choice.map(|choice| { - match choice { - ToolChoice::Type(tool_type) => match tool_type { - ToolChoiceType::Auto => MessagesToolChoice { - kind: MessagesToolChoiceType::Auto, - name: None, - disable_parallel_tool_use: parallel_tool_calls.map(|p| !p), - }, - ToolChoiceType::Required => MessagesToolChoice { - kind: MessagesToolChoiceType::Any, - name: None, - disable_parallel_tool_use: parallel_tool_calls.map(|p| !p), - }, - ToolChoiceType::None => MessagesToolChoice { - kind: MessagesToolChoiceType::None, - name: None, - disable_parallel_tool_use: None, - }, - }, - ToolChoice::Function { function, .. } => MessagesToolChoice { - kind: MessagesToolChoiceType::Tool, - name: Some(function.name), + tool_choice.map(|choice| match choice { + ToolChoice::Type(tool_type) => match tool_type { + ToolChoiceType::Auto => MessagesToolChoice { + kind: MessagesToolChoiceType::Auto, + name: None, disable_parallel_tool_use: parallel_tool_calls.map(|p| !p), }, - } + ToolChoiceType::Required => MessagesToolChoice { + kind: MessagesToolChoiceType::Any, + name: None, + disable_parallel_tool_use: parallel_tool_calls.map(|p| !p), + }, + ToolChoiceType::None => MessagesToolChoice { + kind: MessagesToolChoiceType::None, + name: None, + disable_parallel_tool_use: None, + }, + }, + ToolChoice::Function { function, .. } => MessagesToolChoice { + kind: MessagesToolChoiceType::Tool, + name: Some(function.name), + disable_parallel_tool_use: parallel_tool_calls.map(|p| !p), + }, }) } /// Build OpenAI message content from parts and tool calls -fn build_openai_content(content_parts: Vec, tool_calls: &[ToolCall]) -> MessageContent { +fn build_openai_content( + content_parts: Vec, + tool_calls: &[ToolCall], +) -> MessageContent { if content_parts.len() == 1 && tool_calls.is_empty() { match &content_parts[0] { ContentPart::Text { text } => MessageContent::Text(text.clone()), @@ -855,7 +914,9 @@ fn build_anthropic_content(content_blocks: Vec) -> Message } /// Convert Anthropic content blocks to OpenAI message content -fn convert_anthropic_content_to_openai(content: &[MessagesContentBlock]) -> Result { +fn convert_anthropic_content_to_openai( + content: &[MessagesContentBlock], +) -> Result { let mut text_parts = Vec::new(); for block in content { @@ -877,21 +938,29 @@ fn convert_anthropic_content_to_openai(content: &[MessagesContentBlock]) -> Resu } /// Convert OpenAI message to Anthropic content blocks -fn convert_openai_message_to_anthropic_content(message: &Message) -> Result, TransformError> { +fn convert_openai_message_to_anthropic_content( + message: &Message, +) -> Result, TransformError> { let mut blocks = Vec::new(); // Handle regular content match &message.content { MessageContent::Text(text) => { if !text.is_empty() { - blocks.push(MessagesContentBlock::Text { text: text.clone(), cache_control: None }); + blocks.push(MessagesContentBlock::Text { + text: text.clone(), + cache_control: None, + }); } } MessageContent::Parts(parts) => { for part in parts { match part { ContentPart::Text { text } => { - blocks.push(MessagesContentBlock::Text { text: text.clone(), cache_control: None }); + blocks.push(MessagesContentBlock::Text { + text: text.clone(), + cache_control: None, + }); } ContentPart::ImageUrl { image_url } => { let source = convert_image_url_to_source(image_url); @@ -947,23 +1016,29 @@ fn convert_image_url_to_source(image_url: &ImageUrl) -> MessagesImageSource { data: data.to_string(), } } else { - MessagesImageSource::Url { url: image_url.url.clone() } + MessagesImageSource::Url { + url: image_url.url.clone(), + } } } else { - MessagesImageSource::Url { url: image_url.url.clone() } + MessagesImageSource::Url { + url: image_url.url.clone(), + } } } /// Convert content block start to OpenAI chunk -fn convert_content_block_start(content_block: MessagesContentBlock) -> Result { +fn convert_content_block_start( + content_block: MessagesContentBlock, +) -> Result { match content_block { MessagesContentBlock::Text { .. } => { // No immediate output for text block start Ok(create_empty_openai_chunk()) } - MessagesContentBlock::ToolUse { id, name, .. } | - MessagesContentBlock::ServerToolUse { id, name, .. } | - MessagesContentBlock::McpToolUse { id, name, .. } => { + MessagesContentBlock::ToolUse { id, name, .. } + | MessagesContentBlock::ServerToolUse { id, name, .. } + | MessagesContentBlock::McpToolUse { id, name, .. } => { // Tool use start → OpenAI chunk with tool_calls Ok(create_openai_chunk( "stream", @@ -987,71 +1062,71 @@ fn convert_content_block_start(content_block: MessagesContentBlock) -> Result Err(TransformError::UnsupportedContent("Unsupported content block type in stream start".to_string())), + _ => Err(TransformError::UnsupportedContent( + "Unsupported content block type in stream start".to_string(), + )), } } /// Convert content delta to OpenAI chunk -fn convert_content_delta(delta: MessagesContentDelta) -> Result { +fn convert_content_delta( + delta: MessagesContentDelta, +) -> Result { match delta { - MessagesContentDelta::TextDelta { text } => { - Ok(create_openai_chunk( - "stream", - "unknown", - MessageDelta { - role: None, - content: Some(text), - refusal: None, - function_call: None, - tool_calls: None, - }, - None, - None, - )) - } - MessagesContentDelta::ThinkingDelta { thinking } => { - Ok(create_openai_chunk( - "stream", - "unknown", - MessageDelta { - role: None, - content: Some(format!("thinking: {}", thinking)), - refusal: None, - function_call: None, - tool_calls: None, - }, - None, - None, - )) - } - MessagesContentDelta::InputJsonDelta { partial_json } => { - Ok(create_openai_chunk( - "stream", - "unknown", - MessageDelta { - role: None, - content: None, - refusal: None, - function_call: None, - tool_calls: Some(vec![ToolCallDelta { - index: 0, - id: None, - call_type: None, - function: Some(FunctionCallDelta { - name: None, - arguments: Some(partial_json), - }), - }]), - }, - None, - None, - )) - } + MessagesContentDelta::TextDelta { text } => Ok(create_openai_chunk( + "stream", + "unknown", + MessageDelta { + role: None, + content: Some(text), + refusal: None, + function_call: None, + tool_calls: None, + }, + None, + None, + )), + MessagesContentDelta::ThinkingDelta { thinking } => Ok(create_openai_chunk( + "stream", + "unknown", + MessageDelta { + role: None, + content: Some(format!("thinking: {}", thinking)), + refusal: None, + function_call: None, + tool_calls: None, + }, + None, + None, + )), + MessagesContentDelta::InputJsonDelta { partial_json } => Ok(create_openai_chunk( + "stream", + "unknown", + MessageDelta { + role: None, + content: None, + refusal: None, + function_call: None, + tool_calls: Some(vec![ToolCallDelta { + index: 0, + id: None, + call_type: None, + function: Some(FunctionCallDelta { + name: None, + arguments: Some(partial_json), + }), + }]), + }, + None, + None, + )), } } /// Convert tool call deltas to Anthropic stream events -fn convert_tool_call_deltas(tool_calls: Vec) -> Result { +fn convert_tool_call_deltas( + tool_calls: Vec, +) -> Result { for tool_call in tool_calls { if let Some(id) = &tool_call.id { // Tool call start @@ -1160,11 +1235,20 @@ mod tests { // Check key fields are preserved assert_eq!(original_anthropic.model, roundtrip_anthropic.model); - assert_eq!(original_anthropic.max_tokens, roundtrip_anthropic.max_tokens); - assert_eq!(original_anthropic.temperature, roundtrip_anthropic.temperature); + assert_eq!( + original_anthropic.max_tokens, + roundtrip_anthropic.max_tokens + ); + assert_eq!( + original_anthropic.temperature, + roundtrip_anthropic.temperature + ); assert_eq!(original_anthropic.top_p, roundtrip_anthropic.top_p); assert_eq!(original_anthropic.stream, roundtrip_anthropic.stream); - assert_eq!(original_anthropic.messages.len(), roundtrip_anthropic.messages.len()); + assert_eq!( + original_anthropic.messages.len(), + roundtrip_anthropic.messages.len() + ); } #[test] @@ -1308,7 +1392,10 @@ mod tests { let tool_calls = choice.delta.tool_calls.as_ref().unwrap(); assert_eq!(tool_calls.len(), 1); assert_eq!(tool_calls[0].id, Some("call_123".to_string())); - assert_eq!(tool_calls[0].function.as_ref().unwrap().name, Some("get_weather".to_string())); + assert_eq!( + tool_calls[0].function.as_ref().unwrap().name, + Some("get_weather".to_string()) + ); } #[test] @@ -1328,7 +1415,10 @@ mod tests { let tool_calls = choice.delta.tool_calls.as_ref().unwrap(); assert_eq!(tool_calls.len(), 1); - assert_eq!(tool_calls[0].function.as_ref().unwrap().arguments, Some(r#"{"location": "San Francisco"#.to_string())); + assert_eq!( + tool_calls[0].function.as_ref().unwrap().arguments, + Some(r#"{"location": "San Francisco"#.to_string()) + ); } #[test] @@ -1491,7 +1581,10 @@ mod tests { let anthropic_event: MessagesStreamEvent = openai_resp.try_into().unwrap(); match anthropic_event { - MessagesStreamEvent::ContentBlockStart { index, content_block } => { + MessagesStreamEvent::ContentBlockStart { + index, + content_block, + } => { assert_eq!(index, 0); match content_block { MessagesContentBlock::ToolUse { id, name, .. } => { @@ -1634,16 +1727,28 @@ mod tests { // Verify tool start let tool_calls = &openai_start.choices[0].delta.tool_calls.as_ref().unwrap(); assert_eq!(tool_calls[0].id, Some("call_weather".to_string())); - assert_eq!(tool_calls[0].function.as_ref().unwrap().name, Some("get_weather".to_string())); + assert_eq!( + tool_calls[0].function.as_ref().unwrap().name, + Some("get_weather".to_string()) + ); // Verify argument deltas let args1 = &openai_delta1.choices[0].delta.tool_calls.as_ref().unwrap()[0] - .function.as_ref().unwrap().arguments; + .function + .as_ref() + .unwrap() + .arguments; assert_eq!(args1, &Some(r#"{"location": "#.to_string())); let args2 = &openai_delta2.choices[0].delta.tool_calls.as_ref().unwrap()[0] - .function.as_ref().unwrap().arguments; - assert_eq!(args2, &Some(r#"San Francisco", "unit": "fahrenheit"}"#.to_string())); + .function + .as_ref() + .unwrap() + .arguments; + assert_eq!( + args2, + &Some(r#"San Francisco", "unit": "fahrenheit"}"#.to_string()) + ); } #[test] @@ -1671,14 +1776,23 @@ mod tests { }; let openai_resp: ChatCompletionsStreamResponse = event.try_into().unwrap(); - assert_eq!(openai_resp.choices[0].finish_reason, Some(expected_openai_reason)); + assert_eq!( + openai_resp.choices[0].finish_reason, + Some(expected_openai_reason) + ); // Test reverse conversion let roundtrip_event: MessagesStreamEvent = openai_resp.try_into().unwrap(); match roundtrip_event { MessagesStreamEvent::MessageDelta { delta, .. } => { // Note: Some precision may be lost in roundtrip due to mapping differences - assert!(matches!(delta.stop_reason, MessagesStopReason::EndTurn | MessagesStopReason::MaxTokens | MessagesStopReason::ToolUse | MessagesStopReason::StopSequence)); + assert!(matches!( + delta.stop_reason, + MessagesStopReason::EndTurn + | MessagesStopReason::MaxTokens + | MessagesStopReason::ToolUse + | MessagesStopReason::StopSequence + )); } _ => panic!("Expected MessageDelta after roundtrip"), } @@ -1711,7 +1825,8 @@ mod tests { }; // Should convert to Ping when no meaningful content - let anthropic_event: MessagesStreamEvent = openai_resp_with_missing_data.try_into().unwrap(); + let anthropic_event: MessagesStreamEvent = + openai_resp_with_missing_data.try_into().unwrap(); assert!(matches!(anthropic_event, MessagesStreamEvent::Ping)); } diff --git a/crates/hermesllm/src/lib.rs b/crates/hermesllm/src/lib.rs index a9e8c48e..2789947b 100644 --- a/crates/hermesllm/src/lib.rs +++ b/crates/hermesllm/src/lib.rs @@ -1,20 +1,21 @@ //! hermesllm: A library for translating LLM API requests and responses //! between Mistral, Grok, Gemini, and OpenAI-compliant formats. -pub mod providers; pub mod apis; pub mod clients; +pub mod providers; // Re-export important types and traits -pub use providers::request::{ProviderRequestType, ProviderRequest, ProviderRequestError}; -pub use providers::response::{ProviderResponseType, ProviderStreamResponseType, ProviderResponse, ProviderStreamResponse, ProviderResponseError, TokenUsage, SseEvent, SseStreamIter}; pub use providers::id::ProviderId; - +pub use providers::request::{ProviderRequest, ProviderRequestError, ProviderRequestType}; +pub use providers::response::{ + ProviderResponse, ProviderResponseError, ProviderResponseType, ProviderStreamResponse, + ProviderStreamResponseType, SseEvent, SseStreamIter, TokenUsage, +}; //TODO: Refactor such that commons doesn't depend on Hermes. For now this will clean up strings pub const CHAT_COMPLETIONS_PATH: &str = "/v1/chat/completions"; pub const MESSAGES_PATH: &str = "/v1/messages"; - #[cfg(test)] mod tests { use super::*; @@ -30,48 +31,50 @@ mod tests { #[test] fn test_provider_streaming_response() { // Test streaming response parsing with sample SSE data - let sse_data = r#"data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4","choices":[{"index":0,"delta":{"role":"assistant","content":"Hello"},"finish_reason":null}]} + let sse_data = r#"data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-4","choices":[{"index":0,"delta":{"role":"assistant","content":"Hello"},"finish_reason":null}]} data: [DONE] "#; - use crate::clients::endpoints::SupportedAPIs; - let client_api = SupportedAPIs::OpenAIChatCompletions(crate::apis::OpenAIApi::ChatCompletions); - let upstream_api = SupportedAPIs::OpenAIChatCompletions(crate::apis::OpenAIApi::ChatCompletions); + use crate::clients::endpoints::SupportedAPIs; + let client_api = + SupportedAPIs::OpenAIChatCompletions(crate::apis::OpenAIApi::ChatCompletions); + let upstream_api = + SupportedAPIs::OpenAIChatCompletions(crate::apis::OpenAIApi::ChatCompletions); - // Test the new simplified architecture - create SseStreamIter directly - let sse_iter = SseStreamIter::try_from(sse_data.as_bytes()); - assert!(sse_iter.is_ok()); + // Test the new simplified architecture - create SseStreamIter directly + let sse_iter = SseStreamIter::try_from(sse_data.as_bytes()); + assert!(sse_iter.is_ok()); - let mut streaming_iter = sse_iter.unwrap(); + let mut streaming_iter = sse_iter.unwrap(); - // Test that we can iterate over SseEvents - let first_event = streaming_iter.next(); - assert!(first_event.is_some()); + // Test that we can iterate over SseEvents + let first_event = streaming_iter.next(); + assert!(first_event.is_some()); - let sse_event = first_event.unwrap(); + let sse_event = first_event.unwrap(); - // Test SseEvent properties - assert!(!sse_event.is_done()); - assert!(sse_event.data.as_ref().unwrap().contains("Hello")); + // Test SseEvent properties + assert!(!sse_event.is_done()); + assert!(sse_event.data.as_ref().unwrap().contains("Hello")); - // Test that we can parse the event into a provider stream response - let transformed_event = SseEvent::try_from((sse_event, &client_api, &upstream_api)); - if let Err(e) = &transformed_event { - println!("Transform error: {:?}", e); - } - assert!(transformed_event.is_ok()); + // Test that we can parse the event into a provider stream response + let transformed_event = SseEvent::try_from((sse_event, &client_api, &upstream_api)); + if let Err(e) = &transformed_event { + println!("Transform error: {:?}", e); + } + assert!(transformed_event.is_ok()); - let transformed_event = transformed_event.unwrap(); - let provider_response = transformed_event.provider_response(); - assert!(provider_response.is_ok()); + let transformed_event = transformed_event.unwrap(); + let provider_response = transformed_event.provider_response(); + assert!(provider_response.is_ok()); - let stream_response = provider_response.unwrap(); - assert_eq!(stream_response.content_delta(), Some("Hello")); - assert!(!stream_response.is_final()); + let stream_response = provider_response.unwrap(); + assert_eq!(stream_response.content_delta(), Some("Hello")); + assert!(!stream_response.is_final()); - // Test that stream ends properly with [DONE] (SseStreamIter should stop before [DONE]) - let final_event = streaming_iter.next(); - assert!(final_event.is_none()); // Should be None because iterator stops at [DONE] + // Test that stream ends properly with [DONE] (SseStreamIter should stop before [DONE]) + let final_event = streaming_iter.next(); + assert!(final_event.is_none()); // Should be None because iterator stops at [DONE] } } diff --git a/crates/hermesllm/src/providers/id.rs b/crates/hermesllm/src/providers/id.rs index 46b9cf93..b898d7d7 100644 --- a/crates/hermesllm/src/providers/id.rs +++ b/crates/hermesllm/src/providers/id.rs @@ -1,6 +1,6 @@ -use std::fmt::Display; +use crate::apis::{AnthropicApi, OpenAIApi}; use crate::clients::endpoints::SupportedAPIs; -use crate::apis::{OpenAIApi, AnthropicApi}; +use std::fmt::Display; /// Provider identifier enum - simple enum for identifying providers #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -50,41 +50,50 @@ impl ProviderId { pub fn compatible_api_for_client(&self, client_api: &SupportedAPIs) -> SupportedAPIs { match (self, client_api) { // Claude/Anthropic providers natively support Anthropic APIs - (ProviderId::Anthropic, SupportedAPIs::AnthropicMessagesAPI(_)) => SupportedAPIs::AnthropicMessagesAPI(AnthropicApi::Messages), - (ProviderId::Anthropic, SupportedAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions)) => SupportedAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions), + (ProviderId::Anthropic, SupportedAPIs::AnthropicMessagesAPI(_)) => { + SupportedAPIs::AnthropicMessagesAPI(AnthropicApi::Messages) + } + ( + ProviderId::Anthropic, + SupportedAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions), + ) => SupportedAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions), // OpenAI-compatible providers only support OpenAI chat completions - (ProviderId::OpenAI - | ProviderId::Groq - | ProviderId::Mistral - | ProviderId::Deepseek - | ProviderId::Arch - | ProviderId::Gemini - | ProviderId::GitHub - | ProviderId::AzureOpenAI - | ProviderId::XAI - | ProviderId::TogetherAI - | ProviderId::Ollama - | ProviderId::Moonshotai - | ProviderId::Zhipu - | ProviderId::Qwen, - SupportedAPIs::AnthropicMessagesAPI(_)) => SupportedAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions), + ( + ProviderId::OpenAI + | ProviderId::Groq + | ProviderId::Mistral + | ProviderId::Deepseek + | ProviderId::Arch + | ProviderId::Gemini + | ProviderId::GitHub + | ProviderId::AzureOpenAI + | ProviderId::XAI + | ProviderId::TogetherAI + | ProviderId::Ollama + | ProviderId::Moonshotai + | ProviderId::Zhipu + | ProviderId::Qwen, + SupportedAPIs::AnthropicMessagesAPI(_), + ) => SupportedAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions), - (ProviderId::OpenAI - | ProviderId::Groq - | ProviderId::Mistral - | ProviderId::Deepseek - | ProviderId::Arch - | ProviderId::Gemini - | ProviderId::GitHub - | ProviderId::AzureOpenAI - | ProviderId::XAI - | ProviderId::TogetherAI - | ProviderId::Ollama - | ProviderId::Moonshotai - | ProviderId::Zhipu - | ProviderId::Qwen, - SupportedAPIs::OpenAIChatCompletions(_)) => SupportedAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions), + ( + ProviderId::OpenAI + | ProviderId::Groq + | ProviderId::Mistral + | ProviderId::Deepseek + | ProviderId::Arch + | ProviderId::Gemini + | ProviderId::GitHub + | ProviderId::AzureOpenAI + | ProviderId::XAI + | ProviderId::TogetherAI + | ProviderId::Ollama + | ProviderId::Moonshotai + | ProviderId::Zhipu + | ProviderId::Qwen, + SupportedAPIs::OpenAIChatCompletions(_), + ) => SupportedAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions), } } } diff --git a/crates/hermesllm/src/providers/mod.rs b/crates/hermesllm/src/providers/mod.rs index 601af955..97b14285 100644 --- a/crates/hermesllm/src/providers/mod.rs +++ b/crates/hermesllm/src/providers/mod.rs @@ -8,5 +8,5 @@ pub mod request; pub mod response; pub use id::ProviderId; -pub use request::{ProviderRequestType, ProviderRequest, ProviderRequestError} ; -pub use response::{ProviderResponseType, ProviderResponse, ProviderStreamResponse, TokenUsage }; +pub use request::{ProviderRequest, ProviderRequestError, ProviderRequestType}; +pub use response::{ProviderResponse, ProviderResponseType, ProviderStreamResponse, TokenUsage}; diff --git a/crates/hermesllm/src/providers/request.rs b/crates/hermesllm/src/providers/request.rs index 3603edf2..1cee7169 100644 --- a/crates/hermesllm/src/providers/request.rs +++ b/crates/hermesllm/src/providers/request.rs @@ -1,11 +1,11 @@ -use crate::apis::openai::ChatCompletionsRequest; use crate::apis::anthropic::MessagesRequest; +use crate::apis::openai::ChatCompletionsRequest; use crate::clients::endpoints::SupportedAPIs; use serde_json::Value; +use std::collections::HashMap; use std::error::Error; use std::fmt; -use std::collections::HashMap; #[derive(Clone)] pub enum ProviderRequestType { ChatCompletionsRequest(ChatCompletionsRequest), @@ -103,15 +103,18 @@ impl TryFrom<(&[u8], &SupportedAPIs)> for ProviderRequestType { // Use SupportedApi to determine the appropriate request type match client_api { SupportedAPIs::OpenAIChatCompletions(_) => { - let chat_completion_request: ChatCompletionsRequest = ChatCompletionsRequest::try_from(bytes) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; - Ok(ProviderRequestType::ChatCompletionsRequest(chat_completion_request)) - } - SupportedAPIs::AnthropicMessagesAPI(_) => { - let messages_request: MessagesRequest = MessagesRequest::try_from(bytes) + let chat_completion_request: ChatCompletionsRequest = + ChatCompletionsRequest::try_from(bytes) .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; - Ok(ProviderRequestType::MessagesRequest(messages_request)) - } + Ok(ProviderRequestType::ChatCompletionsRequest( + chat_completion_request, + )) + } + SupportedAPIs::AnthropicMessagesAPI(_) => { + let messages_request: MessagesRequest = MessagesRequest::try_from(bytes) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; + Ok(ProviderRequestType::MessagesRequest(messages_request)) + } } } } @@ -120,40 +123,55 @@ impl TryFrom<(&[u8], &SupportedAPIs)> for ProviderRequestType { impl TryFrom<(ProviderRequestType, &SupportedAPIs)> for ProviderRequestType { type Error = ProviderRequestError; - fn try_from((request, upstream_api): (ProviderRequestType, &SupportedAPIs)) -> Result { + fn try_from( + (request, upstream_api): (ProviderRequestType, &SupportedAPIs), + ) -> Result { match (request, upstream_api) { // Same API - no conversion needed, just clone the reference - (ProviderRequestType::ChatCompletionsRequest(chat_req), SupportedAPIs::OpenAIChatCompletions(_)) => { - Ok(ProviderRequestType::ChatCompletionsRequest(chat_req)) - } - (ProviderRequestType::MessagesRequest(messages_req), SupportedAPIs::AnthropicMessagesAPI(_)) => { - Ok(ProviderRequestType::MessagesRequest(messages_req)) - } + ( + ProviderRequestType::ChatCompletionsRequest(chat_req), + SupportedAPIs::OpenAIChatCompletions(_), + ) => Ok(ProviderRequestType::ChatCompletionsRequest(chat_req)), + ( + ProviderRequestType::MessagesRequest(messages_req), + SupportedAPIs::AnthropicMessagesAPI(_), + ) => Ok(ProviderRequestType::MessagesRequest(messages_req)), // Cross-API conversion - cloning is necessary for transformation - (ProviderRequestType::ChatCompletionsRequest(chat_req), SupportedAPIs::AnthropicMessagesAPI(_)) => { - let messages_req = MessagesRequest::try_from(chat_req) - .map_err(|e| ProviderRequestError { - message: format!("Failed to convert ChatCompletionsRequest to MessagesRequest: {}", e), - source: Some(Box::new(e)) + ( + ProviderRequestType::ChatCompletionsRequest(chat_req), + SupportedAPIs::AnthropicMessagesAPI(_), + ) => { + let messages_req = + MessagesRequest::try_from(chat_req).map_err(|e| ProviderRequestError { + message: format!( + "Failed to convert ChatCompletionsRequest to MessagesRequest: {}", + e + ), + source: Some(Box::new(e)), })?; Ok(ProviderRequestType::MessagesRequest(messages_req)) } - (ProviderRequestType::MessagesRequest(messages_req), SupportedAPIs::OpenAIChatCompletions(_)) => { - let chat_req = ChatCompletionsRequest::try_from(messages_req) - .map_err(|e| ProviderRequestError { - message: format!("Failed to convert MessagesRequest to ChatCompletionsRequest: {}", e), - source: Some(Box::new(e)) - })?; + ( + ProviderRequestType::MessagesRequest(messages_req), + SupportedAPIs::OpenAIChatCompletions(_), + ) => { + let chat_req = ChatCompletionsRequest::try_from(messages_req).map_err(|e| { + ProviderRequestError { + message: format!( + "Failed to convert MessagesRequest to ChatCompletionsRequest: {}", + e + ), + source: Some(Box::new(e)), + } + })?; Ok(ProviderRequestType::ChatCompletionsRequest(chat_req)) } } } } - - /// Error types for provider operations #[derive(Debug)] pub struct ProviderRequestError { @@ -169,19 +187,20 @@ impl fmt::Display for ProviderRequestError { impl Error for ProviderRequestError { fn source(&self) -> Option<&(dyn Error + 'static)> { - self.source.as_ref().map(|e| e.as_ref() as &(dyn Error + 'static)) + self.source + .as_ref() + .map(|e| e.as_ref() as &(dyn Error + 'static)) } } - #[cfg(test)] mod tests { use super::*; - use crate::clients::endpoints::SupportedAPIs; use crate::apis::anthropic::AnthropicApi::Messages; - use crate::apis::openai::OpenAIApi::ChatCompletions; use crate::apis::anthropic::MessagesRequest as AnthropicMessagesRequest; - use crate::apis::openai::{ChatCompletionsRequest}; + use crate::apis::openai::ChatCompletionsRequest; + use crate::apis::openai::OpenAIApi::ChatCompletions; + use crate::clients::endpoints::SupportedAPIs; use crate::clients::transformer::ExtractText; use serde_json::json; @@ -202,7 +221,7 @@ mod tests { ProviderRequestType::ChatCompletionsRequest(r) => { assert_eq!(r.model, "gpt-4"); assert_eq!(r.messages.len(), 2); - }, + } _ => panic!("Expected ChatCompletionsRequest variant"), } } @@ -225,7 +244,7 @@ mod tests { ProviderRequestType::MessagesRequest(r) => { assert_eq!(r.model, "claude-3-sonnet"); assert_eq!(r.messages.len(), 1); - }, + } _ => panic!("Expected MessagesRequest variant"), } } @@ -247,7 +266,7 @@ mod tests { ProviderRequestType::ChatCompletionsRequest(r) => { assert_eq!(r.model, "gpt-4"); assert_eq!(r.messages.len(), 2); - }, + } _ => panic!("Expected ChatCompletionsRequest variant"), } } @@ -271,7 +290,7 @@ mod tests { ProviderRequestType::ChatCompletionsRequest(r) => { assert_eq!(r.model, "claude-3-sonnet"); assert_eq!(r.messages.len(), 1); - }, + } _ => panic!("Expected ChatCompletionsRequest variant"), } } @@ -280,13 +299,15 @@ mod tests { fn test_v1_messages_to_v1_chat_completions_roundtrip() { let anthropic_req = AnthropicMessagesRequest { model: "claude-3-sonnet".to_string(), - system: Some(crate::apis::anthropic::MessagesSystemPrompt::Single("You are a helpful assistant".to_string())), - messages: vec![ - crate::apis::anthropic::MessagesMessage { - role: crate::apis::anthropic::MessagesRole::User, - content: crate::apis::anthropic::MessagesMessageContent::Single("Hello!".to_string()), - } - ], + system: Some(crate::apis::anthropic::MessagesSystemPrompt::Single( + "You are a helpful assistant".to_string(), + )), + messages: vec![crate::apis::anthropic::MessagesMessage { + role: crate::apis::anthropic::MessagesRole::User, + content: crate::apis::anthropic::MessagesMessageContent::Single( + "Hello!".to_string(), + ), + }], max_tokens: 128, container: None, mcp_servers: None, @@ -302,16 +323,27 @@ mod tests { metadata: None, }; - let openai_req = ChatCompletionsRequest::try_from(anthropic_req.clone()).expect("Anthropic->OpenAI conversion failed"); - let anthropic_req2 = AnthropicMessagesRequest::try_from(openai_req).expect("OpenAI->Anthropic conversion failed"); + let openai_req = ChatCompletionsRequest::try_from(anthropic_req.clone()) + .expect("Anthropic->OpenAI conversion failed"); + let anthropic_req2 = AnthropicMessagesRequest::try_from(openai_req) + .expect("OpenAI->Anthropic conversion failed"); assert_eq!(anthropic_req.model, anthropic_req2.model); // Compare system prompt text if present assert_eq!( - anthropic_req.system.as_ref().and_then(|s| match s { crate::apis::anthropic::MessagesSystemPrompt::Single(t) => Some(t), _ => None }), - anthropic_req2.system.as_ref().and_then(|s| match s { crate::apis::anthropic::MessagesSystemPrompt::Single(t) => Some(t), _ => None }) + anthropic_req.system.as_ref().and_then(|s| match s { + crate::apis::anthropic::MessagesSystemPrompt::Single(t) => Some(t), + _ => None, + }), + anthropic_req2.system.as_ref().and_then(|s| match s { + crate::apis::anthropic::MessagesSystemPrompt::Single(t) => Some(t), + _ => None, + }) + ); + assert_eq!( + anthropic_req.messages[0].role, + anthropic_req2.messages[0].role ); - assert_eq!(anthropic_req.messages[0].role, anthropic_req2.messages[0].role); // Compare message content text if present assert_eq!( anthropic_req.messages[0].content.extract_text(), @@ -320,49 +352,54 @@ mod tests { assert_eq!(anthropic_req.max_tokens, anthropic_req2.max_tokens); } - #[test] - fn test_v1_chat_completions_to_v1_messages_roundtrip() { - use crate::apis::anthropic::MessagesRequest as AnthropicMessagesRequest; - use crate::apis::openai::{ChatCompletionsRequest, Message, Role, MessageContent}; + #[test] + fn test_v1_chat_completions_to_v1_messages_roundtrip() { + use crate::apis::anthropic::MessagesRequest as AnthropicMessagesRequest; + use crate::apis::openai::{ChatCompletionsRequest, Message, MessageContent, Role}; - let openai_req = ChatCompletionsRequest { - model: "gpt-4".to_string(), - messages: vec![ - Message { - role: Role::System, - content: MessageContent::Text("You are a helpful assistant".to_string()), - name: None, - tool_calls: None, - tool_call_id: None, - }, - Message { - role: Role::User, - content: MessageContent::Text("Hello!".to_string()), - name: None, - tool_calls: None, - tool_call_id: None, - } - ], - temperature: Some(0.7), - top_p: Some(1.0), - max_tokens: Some(128), - stream: Some(false), - stop: Some(vec!["\n".to_string()]), - tools: None, - tool_choice: None, - parallel_tool_calls: None, - ..Default::default() - }; + let openai_req = ChatCompletionsRequest { + model: "gpt-4".to_string(), + messages: vec![ + Message { + role: Role::System, + content: MessageContent::Text("You are a helpful assistant".to_string()), + name: None, + tool_calls: None, + tool_call_id: None, + }, + Message { + role: Role::User, + content: MessageContent::Text("Hello!".to_string()), + name: None, + tool_calls: None, + tool_call_id: None, + }, + ], + temperature: Some(0.7), + top_p: Some(1.0), + max_tokens: Some(128), + stream: Some(false), + stop: Some(vec!["\n".to_string()]), + tools: None, + tool_choice: None, + parallel_tool_calls: None, + ..Default::default() + }; - let anthropic_req = AnthropicMessagesRequest::try_from(openai_req.clone()).expect("OpenAI->Anthropic conversion failed"); - let openai_req2 = ChatCompletionsRequest::try_from(anthropic_req).expect("Anthropic->OpenAI conversion failed"); + let anthropic_req = AnthropicMessagesRequest::try_from(openai_req.clone()) + .expect("OpenAI->Anthropic conversion failed"); + let openai_req2 = ChatCompletionsRequest::try_from(anthropic_req) + .expect("Anthropic->OpenAI conversion failed"); - assert_eq!(openai_req.model, openai_req2.model); - assert_eq!(openai_req.messages[0].role, openai_req2.messages[0].role); - assert_eq!(openai_req.messages[0].content.extract_text(), openai_req2.messages[0].content.extract_text()); - // After roundtrip, deprecated max_tokens should be converted to max_completion_tokens - let original_max_tokens = openai_req.max_completion_tokens.or(openai_req.max_tokens); - let roundtrip_max_tokens = openai_req2.max_completion_tokens.or(openai_req2.max_tokens); - assert_eq!(original_max_tokens, roundtrip_max_tokens); - } + assert_eq!(openai_req.model, openai_req2.model); + assert_eq!(openai_req.messages[0].role, openai_req2.messages[0].role); + assert_eq!( + openai_req.messages[0].content.extract_text(), + openai_req2.messages[0].content.extract_text() + ); + // After roundtrip, deprecated max_tokens should be converted to max_completion_tokens + let original_max_tokens = openai_req.max_completion_tokens.or(openai_req.max_tokens); + let roundtrip_max_tokens = openai_req2.max_completion_tokens.or(openai_req2.max_tokens); + assert_eq!(original_max_tokens, roundtrip_max_tokens); + } } diff --git a/crates/hermesllm/src/providers/response.rs b/crates/hermesllm/src/providers/response.rs index 6bc4e25f..5f4607df 100644 --- a/crates/hermesllm/src/providers/response.rs +++ b/crates/hermesllm/src/providers/response.rs @@ -1,15 +1,15 @@ use crate::providers::id::ProviderId; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; +use std::convert::TryFrom; use std::error::Error; use std::fmt; -use std::convert::TryFrom; use std::str::FromStr; +use crate::apis::anthropic::MessagesResponse; +use crate::apis::anthropic::MessagesStreamEvent; use crate::apis::openai::ChatCompletionsResponse; use crate::apis::openai::ChatCompletionsStreamResponse; -use crate::apis::anthropic::MessagesStreamEvent; use crate::clients::endpoints::SupportedAPIs; -use crate::apis::anthropic::MessagesResponse; /// Trait for token usage information pub trait TokenUsage { @@ -38,7 +38,8 @@ pub trait ProviderResponse: Send + Sync { /// Extract token counts for metrics fn extract_usage_counts(&self) -> Option<(usize, usize, usize)> { - self.usage().map(|u| (u.prompt_tokens(), u.completion_tokens(), u.total_tokens())) + self.usage() + .map(|u| (u.prompt_tokens(), u.completion_tokens(), u.total_tokens())) } } @@ -110,19 +111,19 @@ impl ProviderStreamResponse for ProviderStreamResponseType { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SseEvent { #[serde(rename = "data")] - pub data: Option, // The JSON payload after "data: " + pub data: Option, // The JSON payload after "data: " #[serde(skip_serializing_if = "Option::is_none")] - pub event: Option, // Optional event type (e.g., "message_start", "content_block_delta") + pub event: Option, // Optional event type (e.g., "message_start", "content_block_delta") #[serde(skip_serializing, skip_deserializing)] - pub raw_line: String, // The complete line as received including "data: " prefix and "\n\n" - - #[serde(skip_serializing, skip_deserializing)] - pub sse_transform_buffer: String, // The complete line as received including "data: " prefix and "\n\n" + pub raw_line: String, // The complete line as received including "data: " prefix and "\n\n" #[serde(skip_serializing, skip_deserializing)] - pub provider_stream_response: Option, // Parsed provider stream response object + pub sse_transform_buffer: String, // The complete line as received including "data: " prefix and "\n\n" + + #[serde(skip_serializing, skip_deserializing)] + pub provider_stream_response: Option, // Parsed provider stream response object } impl SseEvent { @@ -145,13 +146,13 @@ impl SseEvent { /// Get the parsed provider response if available pub fn provider_response(&self) -> Result<&dyn ProviderStreamResponse, std::io::Error> { - self.provider_stream_response.as_ref() + self.provider_stream_response + .as_ref() .map(|resp| resp as &dyn ProviderStreamResponse) .ok_or_else(|| { std::io::Error::new(std::io::ErrorKind::NotFound, "Provider response not found") }) } - } impl FromStr for SseEvent { @@ -172,7 +173,8 @@ impl FromStr for SseEvent { sse_transform_buffer: line.to_string(), provider_stream_response: None, }) - } else if line.starts_with("event: ") { //used by Anthropic + } else if line.starts_with("event: ") { + //used by Anthropic let event_type = line[7..].to_string(); if event_type.is_empty() { return Err(SseParseError { @@ -207,12 +209,13 @@ impl Into> for SseEvent { } } - // --- Response transformation logic for client API compatibility --- impl TryFrom<(&[u8], &SupportedAPIs, &ProviderId)> for ProviderResponseType { type Error = std::io::Error; - fn try_from((bytes, client_api, provider_id): (&[u8], &SupportedAPIs, &ProviderId)) -> Result { + fn try_from( + (bytes, client_api, provider_id): (&[u8], &SupportedAPIs, &ProviderId), + ) -> Result { let upstream_api = provider_id.compatible_api_for_client(client_api); match (&upstream_api, client_api) { (SupportedAPIs::OpenAIChatCompletions(_), SupportedAPIs::OpenAIChatCompletions(_)) => { @@ -230,8 +233,13 @@ impl TryFrom<(&[u8], &SupportedAPIs, &ProviderId)> for ProviderResponseType { .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; // Transform to OpenAI ChatCompletions format using the transformer - let chat_resp: ChatCompletionsResponse = anthropic_resp.try_into() - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Transformation error: {}", e)))?; + let chat_resp: ChatCompletionsResponse = + anthropic_resp.try_into().map_err(|e| { + std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Transformation error: {}", e), + ) + })?; Ok(ProviderResponseType::ChatCompletionsResponse(chat_resp)) } (SupportedAPIs::OpenAIChatCompletions(_), SupportedAPIs::AnthropicMessagesAPI(_)) => { @@ -239,8 +247,12 @@ impl TryFrom<(&[u8], &SupportedAPIs, &ProviderId)> for ProviderResponseType { .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; // Transform to Anthropic Messages format using the transformer - let messages_resp: MessagesResponse = openai_resp.try_into() - .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Transformation error: {}", e)))?; + let messages_resp: MessagesResponse = openai_resp.try_into().map_err(|e| { + std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Transformation error: {}", e), + ) + })?; Ok(ProviderResponseType::MessagesResponse(messages_resp)) } } @@ -251,36 +263,50 @@ impl TryFrom<(&[u8], &SupportedAPIs, &ProviderId)> for ProviderResponseType { impl TryFrom<(&[u8], &SupportedAPIs, &SupportedAPIs)> for ProviderStreamResponseType { type Error = Box; - fn try_from((bytes, client_api, upstream_api): (&[u8], &SupportedAPIs, &SupportedAPIs)) -> Result { + fn try_from( + (bytes, client_api, upstream_api): (&[u8], &SupportedAPIs, &SupportedAPIs), + ) -> Result { match (upstream_api, client_api) { (SupportedAPIs::OpenAIChatCompletions(_), SupportedAPIs::OpenAIChatCompletions(_)) => { - let resp: crate::apis::openai::ChatCompletionsStreamResponse = serde_json::from_slice(bytes)?; - Ok(ProviderStreamResponseType::ChatCompletionsStreamResponse(resp)) + let resp: crate::apis::openai::ChatCompletionsStreamResponse = + serde_json::from_slice(bytes)?; + Ok(ProviderStreamResponseType::ChatCompletionsStreamResponse( + resp, + )) } (SupportedAPIs::AnthropicMessagesAPI(_), SupportedAPIs::AnthropicMessagesAPI(_)) => { - let resp: crate::apis::anthropic::MessagesStreamEvent = serde_json::from_slice(bytes)?; + let resp: crate::apis::anthropic::MessagesStreamEvent = + serde_json::from_slice(bytes)?; Ok(ProviderStreamResponseType::MessagesStreamEvent(resp)) } (SupportedAPIs::AnthropicMessagesAPI(_), SupportedAPIs::OpenAIChatCompletions(_)) => { - let anthropic_resp: crate::apis::anthropic::MessagesStreamEvent = serde_json::from_slice(bytes)?; + let anthropic_resp: crate::apis::anthropic::MessagesStreamEvent = + serde_json::from_slice(bytes)?; // Transform to OpenAI ChatCompletions stream format using the transformer - let chat_resp: crate::apis::openai::ChatCompletionsStreamResponse = anthropic_resp.try_into()?; - Ok(ProviderStreamResponseType::ChatCompletionsStreamResponse(chat_resp)) + let chat_resp: crate::apis::openai::ChatCompletionsStreamResponse = + anthropic_resp.try_into()?; + Ok(ProviderStreamResponseType::ChatCompletionsStreamResponse( + chat_resp, + )) } (SupportedAPIs::OpenAIChatCompletions(_), SupportedAPIs::AnthropicMessagesAPI(_)) => { // Special case: Handle [DONE] marker for OpenAI -> Anthropic conversion if bytes == b"[DONE]" { return Ok(ProviderStreamResponseType::MessagesStreamEvent( - crate::apis::anthropic::MessagesStreamEvent::MessageStop + crate::apis::anthropic::MessagesStreamEvent::MessageStop, )); } - let openai_resp: crate::apis::openai::ChatCompletionsStreamResponse = serde_json::from_slice(bytes)?; + let openai_resp: crate::apis::openai::ChatCompletionsStreamResponse = + serde_json::from_slice(bytes)?; // Transform to Anthropic Messages stream format using the transformer - let messages_resp: crate::apis::anthropic::MessagesStreamEvent = openai_resp.try_into()?; - Ok(ProviderStreamResponseType::MessagesStreamEvent(messages_resp)) + let messages_resp: crate::apis::anthropic::MessagesStreamEvent = + openai_resp.try_into()?; + Ok(ProviderStreamResponseType::MessagesStreamEvent( + messages_resp, + )) } } } @@ -290,7 +316,9 @@ impl TryFrom<(&[u8], &SupportedAPIs, &SupportedAPIs)> for ProviderStreamResponse impl TryFrom<(SseEvent, &SupportedAPIs, &SupportedAPIs)> for SseEvent { type Error = Box; - fn try_from((sse_event, client_api, upstream_api): (SseEvent, &SupportedAPIs, &SupportedAPIs)) -> Result { + fn try_from( + (sse_event, client_api, upstream_api): (SseEvent, &SupportedAPIs, &SupportedAPIs), + ) -> Result { // Create a new transformed event based on the original let mut transformed_event = sse_event; @@ -298,7 +326,8 @@ impl TryFrom<(SseEvent, &SupportedAPIs, &SupportedAPIs)> for SseEvent { if transformed_event.data.is_some() { let data_str = transformed_event.data.as_ref().unwrap(); let data_bytes = data_str.as_bytes(); - let transformed_response = ProviderStreamResponseType::try_from((data_bytes, client_api, upstream_api))?; + let transformed_response = + ProviderStreamResponseType::try_from((data_bytes, client_api, upstream_api))?; let transformed_json = serde_json::to_string(&transformed_response)?; transformed_event.sse_transform_buffer = format!("data: {}\n\n", transformed_json); transformed_event.provider_stream_response = Some(transformed_response); @@ -344,7 +373,10 @@ impl TryFrom<(SseEvent, &SupportedAPIs, &SupportedAPIs)> for SseEvent { transformed_event.sse_transform_buffer ); } else { - transformed_event.sse_transform_buffer = format!("event: {}\n{}", event_type, transformed_event.sse_transform_buffer); + transformed_event.sse_transform_buffer = format!( + "event: {}\n{}", + event_type, transformed_event.sse_transform_buffer + ); } } // If event_type is None, we just keep the data line as-is without an event line @@ -396,7 +428,10 @@ where I::Item: AsRef, { pub fn new(lines: I) -> Self { - Self { lines, done_seen: false } + Self { + lines, + done_seen: false, + } } } @@ -451,7 +486,6 @@ pub struct ProviderResponseError { pub source: Option>, } - impl fmt::Display for ProviderResponseError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Provider response error: {}", self.message) @@ -460,17 +494,19 @@ impl fmt::Display for ProviderResponseError { impl Error for ProviderResponseError { fn source(&self) -> Option<&(dyn Error + 'static)> { - self.source.as_ref().map(|e| e.as_ref() as &(dyn Error + 'static)) + self.source + .as_ref() + .map(|e| e.as_ref() as &(dyn Error + 'static)) } } #[cfg(test)] mod tests { use super::*; + use crate::apis::anthropic::AnthropicApi; + use crate::apis::openai::OpenAIApi; use crate::clients::endpoints::SupportedAPIs; use crate::providers::id::ProviderId; - use crate::apis::openai::OpenAIApi; - use crate::apis::anthropic::AnthropicApi; use serde_json::json; #[test] @@ -491,13 +527,17 @@ mod tests { "system_fingerprint": null }); let bytes = serde_json::to_vec(&resp).unwrap(); - let result = ProviderResponseType::try_from((bytes.as_slice(), &SupportedAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions), &ProviderId::OpenAI)); + let result = ProviderResponseType::try_from(( + bytes.as_slice(), + &SupportedAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions), + &ProviderId::OpenAI, + )); assert!(result.is_ok()); match result.unwrap() { ProviderResponseType::ChatCompletionsResponse(r) => { assert_eq!(r.model, "gpt-4"); assert_eq!(r.choices.len(), 1); - }, + } _ => panic!("Expected ChatCompletionsResponse variant"), } } @@ -516,13 +556,17 @@ mod tests { "usage": { "input_tokens": 10, "output_tokens": 25, "cache_creation_input_tokens": 5, "cache_read_input_tokens": 3 } }); let bytes = serde_json::to_vec(&resp).unwrap(); - let result = ProviderResponseType::try_from((bytes.as_slice(), &SupportedAPIs::AnthropicMessagesAPI(AnthropicApi::Messages), &ProviderId::Anthropic)); + let result = ProviderResponseType::try_from(( + bytes.as_slice(), + &SupportedAPIs::AnthropicMessagesAPI(AnthropicApi::Messages), + &ProviderId::Anthropic, + )); assert!(result.is_ok()); match result.unwrap() { ProviderResponseType::MessagesResponse(r) => { assert_eq!(r.model, "claude-3-sonnet-20240229"); assert_eq!(r.content.len(), 1); - }, + } _ => panic!("Expected MessagesResponse variant"), } } @@ -546,14 +590,18 @@ mod tests { "usage": { "prompt_tokens": 10, "completion_tokens": 25, "total_tokens": 35 } }); let bytes = serde_json::to_vec(&resp).unwrap(); - let result = ProviderResponseType::try_from((bytes.as_slice(), &SupportedAPIs::AnthropicMessagesAPI(AnthropicApi::Messages), &ProviderId::OpenAI)); + let result = ProviderResponseType::try_from(( + bytes.as_slice(), + &SupportedAPIs::AnthropicMessagesAPI(AnthropicApi::Messages), + &ProviderId::OpenAI, + )); assert!(result.is_ok()); match result.unwrap() { ProviderResponseType::MessagesResponse(r) => { assert_eq!(r.model, "gpt-4"); assert_eq!(r.usage.input_tokens, 10); assert_eq!(r.usage.output_tokens, 25); - }, + } _ => panic!("Expected MessagesResponse variant"), } } @@ -584,14 +632,18 @@ mod tests { } }); let bytes = serde_json::to_vec(&resp).unwrap(); - let result = ProviderResponseType::try_from((bytes.as_slice(), &SupportedAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions), &ProviderId::Anthropic)); + let result = ProviderResponseType::try_from(( + bytes.as_slice(), + &SupportedAPIs::OpenAIChatCompletions(OpenAIApi::ChatCompletions), + &ProviderId::Anthropic, + )); assert!(result.is_ok()); match result.unwrap() { ProviderResponseType::ChatCompletionsResponse(r) => { assert_eq!(r.model, "claude-3-sonnet-20240229"); assert_eq!(r.usage.prompt_tokens, 10); assert_eq!(r.usage.completion_tokens, 25); - }, + } _ => panic!("Expected ChatCompletionsResponse variant"), } } @@ -603,11 +655,17 @@ mod tests { let event: Result = line.parse(); assert!(event.is_ok()); let event = event.unwrap(); - assert_eq!(event.data, Some("{\"id\":\"test\",\"object\":\"chat.completion.chunk\"}\n\n".to_string())); + assert_eq!( + event.data, + Some("{\"id\":\"test\",\"object\":\"chat.completion.chunk\"}\n\n".to_string()) + ); // Test conversion back to line using Display trait let wire_format = event.to_string(); - assert_eq!(wire_format, "data: {\"id\":\"test\",\"object\":\"chat.completion.chunk\"}\n\n"); + assert_eq!( + wire_format, + "data: {\"id\":\"test\",\"object\":\"chat.completion.chunk\"}\n\n" + ); // Test [DONE] marker - should be valid SSE event let done_line = "data: [DONE]"; @@ -639,10 +697,12 @@ mod tests { event: None, raw_line: r#"data: {"id":"test","object":"chat.completion.chunk"} - "#.to_string(), + "# + .to_string(), sse_transform_buffer: r#"data: {"id":"test","object":"chat.completion.chunk"} - "#.to_string(), + "# + .to_string(), provider_stream_response: None, }; @@ -679,7 +739,8 @@ mod tests { data: Some(r#"{"id": "test", "object": "chat.completion.chunk"}"#.to_string()), event: Some("content_block_delta".to_string()), raw_line: r#"data: {"id": "test", "object": "chat.completion.chunk"}"#.to_string(), - sse_transform_buffer: r#"data: {"id": "test", "object": "chat.completion.chunk"}"#.to_string(), + sse_transform_buffer: r#"data: {"id": "test", "object": "chat.completion.chunk"}"# + .to_string(), provider_stream_response: None, }; assert!(!normal_event.should_skip()); @@ -705,7 +766,7 @@ mod tests { "data: {\"type\": \"ping\"}".to_string(), // This should be filtered out "data: {\"id\": \"msg2\", \"object\": \"chat.completion.chunk\"}".to_string(), "data: {\"type\": \"ping\"}".to_string(), // This should be filtered out - "data: [DONE]".to_string(), // This should end the stream + "data: [DONE]".to_string(), // This should end the stream ]; let mut iter = SseStreamIter::new(test_lines.into_iter()); @@ -773,13 +834,15 @@ mod tests { #[test] fn test_provider_stream_response_event_type() { - use crate::apis::anthropic::{MessagesStreamEvent, MessagesContentDelta}; + use crate::apis::anthropic::{MessagesContentDelta, MessagesStreamEvent}; use crate::apis::openai::ChatCompletionsStreamResponse; // Test Anthropic event type let anthropic_event = MessagesStreamEvent::ContentBlockDelta { index: 0, - delta: MessagesContentDelta::TextDelta { text: "Hello".to_string() }, + delta: MessagesContentDelta::TextDelta { + text: "Hello".to_string(), + }, }; let provider_type = ProviderStreamResponseType::MessagesStreamEvent(anthropic_event); assert_eq!(provider_type.event_type(), Some("content_block_delta")); @@ -806,15 +869,23 @@ mod tests { // Test that [DONE] marker is properly converted to MessageStop in the transformation layer let done_bytes = b"[DONE]"; let client_api = SupportedAPIs::AnthropicMessagesAPI(AnthropicApi::Messages); - let upstream_api = SupportedAPIs::OpenAIChatCompletions(crate::apis::openai::OpenAIApi::ChatCompletions); + let upstream_api = + SupportedAPIs::OpenAIChatCompletions(crate::apis::openai::OpenAIApi::ChatCompletions); - let result = ProviderStreamResponseType::try_from((done_bytes.as_slice(), &client_api, &upstream_api)); + let result = ProviderStreamResponseType::try_from(( + done_bytes.as_slice(), + &client_api, + &upstream_api, + )); assert!(result.is_ok()); if let Ok(ProviderStreamResponseType::MessagesStreamEvent(event)) = result { // Verify it's a MessageStop event assert_eq!(event.event_type(), Some("message_stop")); - assert!(matches!(event, crate::apis::anthropic::MessagesStreamEvent::MessageStop)); + assert!(matches!( + event, + crate::apis::anthropic::MessagesStreamEvent::MessageStop + )); } else { panic!("Expected MessagesStreamEvent::MessageStop"); } diff --git a/crates/llm_gateway/src/filter_context.rs b/crates/llm_gateway/src/filter_context.rs index 258a1a1c..2b8e1a95 100644 --- a/crates/llm_gateway/src/filter_context.rs +++ b/crates/llm_gateway/src/filter_context.rs @@ -74,7 +74,7 @@ impl RootContext for FilterContext { ratelimit::ratelimits(Some(config.ratelimits.unwrap_or_default())); self.overrides = Rc::new(config.overrides); - match config.llm_providers.try_into() { + match config.model_providers.try_into() { Ok(llm_providers) => self.llm_providers = Some(Rc::new(llm_providers)), Err(err) => panic!("{err}"), } diff --git a/demos/use_cases/llm_routing/arch_config.yaml b/demos/use_cases/llm_routing/arch_config.yaml index 176f53e9..c96e7d02 100644 --- a/demos/use_cases/llm_routing/arch_config.yaml +++ b/demos/use_cases/llm_routing/arch_config.yaml @@ -1,13 +1,12 @@ -version: v0.1.0 +version: v0.3.0 listeners: - egress_traffic: + - type: model + name: model_1 address: 0.0.0.0 port: 12000 - message_format: openai - timeout: 30s -llm_providers: +model_providers: - access_key: $OPENAI_API_KEY model: openai/gpt-4o-mini diff --git a/docs/source/resources/includes/arch_config_full_reference_rendered.yaml b/docs/source/resources/includes/arch_config_full_reference_rendered.yaml index 4c791e82..0594bde2 100644 --- a/docs/source/resources/includes/arch_config_full_reference_rendered.yaml +++ b/docs/source/resources/includes/arch_config_full_reference_rendered.yaml @@ -10,17 +10,40 @@ endpoints: endpoint: 127.0.0.1 port: 8001 listeners: - egress_traffic: - address: 0.0.0.0 - message_format: openai - port: 12000 - timeout: 5s - ingress_traffic: - address: 0.0.0.0 - message_format: openai - port: 10000 - timeout: 5s -llm_providers: +- address: 0.0.0.0 + model_providers: + - access_key: $OPENAI_API_KEY + default: true + model: gpt-4o + name: openai/gpt-4o + provider_interface: openai + - access_key: $MISTRAL_API_KEY + model: mistral-8x7b + name: mistral/mistral-8x7b + provider_interface: mistral + - base_url: http://mistral_local + cluster_name: mistral_mistral_local + endpoint: mistral_local + model: mistral-7b-instruct + name: mistral/mistral-7b-instruct + port: 80 + protocol: http + provider_interface: mistral + name: egress_traffic + port: 12000 + timeout: 5s + type: model_listener +- address: 0.0.0.0 + name: ingress_traffic + port: 10000 + timeout: 5s + type: prompt_listener +model_aliases: + arch.summarize.v1: + target: gpt-4o + arch.v1: + target: mistral-8x7b +model_providers: - access_key: $OPENAI_API_KEY default: true model: gpt-4o @@ -38,11 +61,6 @@ llm_providers: port: 80 protocol: http provider_interface: mistral -model_aliases: - arch.summarize.v1: - target: gpt-4o - arch.v1: - target: mistral-8x7b overrides: prompt_target_intent_matching_threshold: 0.6 prompt_guards: