plano/demos/samples_python/weather_forecast/arch_config.yaml

78 lines
2 KiB
YAML
Raw Normal View History

2024-09-30 17:49:05 -07:00
version: "0.1-beta"
2024-09-30 17:49:05 -07:00
listener:
address: 0.0.0.0
port: 10000
message_format: huggingface
connect_timeout: 0.005s
2024-09-30 17:49:05 -07:00
endpoints:
2024-11-07 22:11:00 -06:00
weather_forecast_service:
endpoint: host.docker.internal:18083
2024-09-30 17:49:05 -07:00
connect_timeout: 0.005s
overrides:
# confidence threshold for prompt target intent matching
prompt_target_intent_matching_threshold: 0.6
2024-09-30 17:49:05 -07:00
llm_providers:
- name: gpt-4o-mini
access_key: $OPENAI_API_KEY
provider_interface: openai
model: gpt-4o-mini
default: true
- name: gpt-3.5-turbo-0125
access_key: $OPENAI_API_KEY
provider_interface: openai
model: gpt-3.5-turbo-0125
- name: gpt-4o
access_key: $OPENAI_API_KEY
provider_interface: openai
model: gpt-4o
system_prompt: |
You are a helpful assistant.
2024-09-30 17:49:05 -07:00
2024-11-25 17:16:35 -08:00
prompt_guards:
input_guards:
jailbreak:
on_exception:
message: Looks like you're curious about my abilities, but I can only provide assistance for weather forecasting.
prompt_targets:
- name: get_current_weather
description: Get current weather at a location.
parameters:
- name: location
description: The location to get the weather for
required: true
type: string
format: City, State
- name: days
description: the number of days for the request
2024-10-07 16:01:12 -07:00
required: true
type: int
endpoint:
2024-11-07 22:11:00 -06:00
name: weather_forecast_service
path: /weather
http_method: POST
2024-09-30 17:49:05 -07:00
- name: default_target
default: true
description: This is the default target for all unmatched prompts.
endpoint:
2024-11-07 22:11:00 -06:00
name: weather_forecast_service
path: /default_target
http_method: POST
system_prompt: |
You are a helpful assistant! Summarize the user's request and provide a helpful response.
# if it is set to false arch will send response that it received from this prompt target to the user
# if true arch will forward the response to the default LLM
auto_llm_dispatch_on_response: false
2024-10-08 16:24:08 -07:00
tracing:
random_sampling: 100
2024-11-07 22:11:00 -06:00
trace_arch_internal: true