feat: add passthrough_auth option for forwarding client Authorization header (#687)

* feat: add passthrough_auth option for forwarding client Authorization header

* fix tests

* Update comment to reflect upstream forwarding

* Apply suggestions from code review

---------

Co-authored-by: Adil Hafeez <adil.hafeez@gmail.com>
Co-authored-by: Adil Hafeez <adil@katanemo.com>
This commit is contained in:
Tang Quoc Thai 2026-01-15 00:06:28 +01:00 committed by GitHub
parent ba1f783adf
commit 4d53297c17
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 177 additions and 26 deletions

View file

@ -1,26 +1,22 @@
# Arch Gateway configuration version
version: v0.3.0
# External HTTP agents - API type is controlled by request path (/v1/responses, /v1/messages, /v1/chat/completions)
agents:
- id: weather_agent # Example agent for weather
- id: weather_agent # Example agent for weather
url: http://host.docker.internal:10510
- id: flight_agent # Example agent for flights
- id: flight_agent # Example agent for flights
url: http://host.docker.internal:10520
# MCP filters applied to requests/responses (e.g., input validation, query rewriting)
filters:
- id: input_guards # Example filter for input validation
- id: input_guards # Example filter for input validation
url: http://host.docker.internal:10500
# type: mcp (default)
# transport: streamable-http (default)
# tool: input_guards (default - same as filter id)
# LLM provider configurations with API keys and model routing
model_providers:
- model: openai/gpt-4o
@ -36,6 +32,12 @@ model_providers:
- model: mistral/ministral-3b-latest
access_key: $MISTRAL_API_KEY
# Example: Passthrough authentication for LiteLLM or similar proxies
# When passthrough_auth is true, client's Authorization header is forwarded
# instead of using the configured access_key
- model: openai/gpt-4o-litellm
base_url: https://litellm.example.com
passthrough_auth: true
# Model aliases - use friendly names instead of full provider model names
model_aliases:
@ -45,7 +47,6 @@ model_aliases:
smart-llm:
target: gpt-4o
# HTTP listeners - entry points for agent routing, prompt targets, and direct LLM access
listeners:
# Agent listener for routing requests to multiple agents
@ -73,7 +74,6 @@ listeners:
port: 10000
# This listener is used for prompt_targets and function calling
# Reusable service endpoints
endpoints:
app_server:
@ -83,7 +83,6 @@ endpoints:
mistral_local:
endpoint: 127.0.0.1:8001
# Prompt targets for function calling and API orchestration
prompt_targets:
- name: get_current_weather
@ -103,7 +102,6 @@ prompt_targets:
path: /weather
http_method: POST
# OpenTelemetry tracing configuration
tracing:
# Random sampling percentage (1-100)