plano/tests/archgw/config.yaml

59 lines
1.5 KiB
YAML
Raw Normal View History

version: v0.1.0
listeners:
ingress_traffic:
address: 0.0.0.0
port: 10000
message_format: openai
timeout: 30s
endpoints:
weather_forecast_service:
endpoint: host.docker.internal:51001
connect_timeout: 0.005s
llm_providers:
2025-07-11 16:42:16 -07:00
- access_key: $OPENAI_API_KEY
model: openai/gpt-4o-mini
default: true
2025-07-11 16:42:16 -07:00
- access_key: $OPENAI_API_KEY
model: openai/gpt-3.5-turbo-0125
2025-07-11 16:42:16 -07:00
- access_key: $OPENAI_API_KEY
model: openai/gpt-4o
system_prompt: |
You are a helpful assistant.
prompt_targets:
- name: get_current_weather
description: Get current weather at a location.
parameters:
- name: location
description: The location to get the weather for
required: true
type: string
format: city, state
- name: days
description: the number of days for the request
required: true
type: string
endpoint:
name: weather_forecast_service
path: /weather
http_method: POST
- name: default_target
default: true
description: This is the default target for all unmatched prompts.
endpoint:
name: weather_forecast_service
path: /default_target
http_method: POST
system_prompt: |
You are a helpful assistant! Summarize the user's request and provide a helpful response.
# if it is set to false arch will send response that it received from this prompt target to the user
# if true arch will forward the response to the default LLM
auto_llm_dispatch_on_response: false