mirror of
https://github.com/katanemo/plano.git
synced 2026-04-25 08:46:24 +02:00
78 lines
2 KiB
YAML
78 lines
2 KiB
YAML
version: "0.1-beta"
|
|
|
|
listeners:
|
|
ingress_traffic:
|
|
address: 0.0.0.0
|
|
port: 10000
|
|
message_format: openai
|
|
timeout: 30s
|
|
|
|
endpoints:
|
|
weather_forecast_service:
|
|
endpoint: host.docker.internal:18083
|
|
connect_timeout: 0.005s
|
|
|
|
overrides:
|
|
# confidence threshold for prompt target intent matching
|
|
prompt_target_intent_matching_threshold: 0.6
|
|
|
|
llm_providers:
|
|
- name: gpt-4o-mini
|
|
access_key: $OPENAI_API_KEY
|
|
provider_interface: openai
|
|
model: gpt-4o-mini
|
|
default: true
|
|
|
|
- name: gpt-3.5-turbo-0125
|
|
access_key: $OPENAI_API_KEY
|
|
provider_interface: openai
|
|
model: gpt-3.5-turbo-0125
|
|
|
|
- name: gpt-4o
|
|
access_key: $OPENAI_API_KEY
|
|
provider_interface: openai
|
|
model: gpt-4o
|
|
|
|
system_prompt: |
|
|
You are a helpful assistant.
|
|
|
|
prompt_guards:
|
|
input_guards:
|
|
jailbreak:
|
|
on_exception:
|
|
message: Looks like you're curious about my abilities, but I can only provide assistance for weather forecasting.
|
|
|
|
prompt_targets:
|
|
- name: get_current_weather
|
|
description: Get current weather at a location.
|
|
parameters:
|
|
- name: location
|
|
description: The location to get the weather for
|
|
required: true
|
|
type: string
|
|
format: City, State
|
|
- name: days
|
|
description: the number of days for the request
|
|
required: true
|
|
type: int
|
|
endpoint:
|
|
name: weather_forecast_service
|
|
path: /weather
|
|
http_method: POST
|
|
|
|
- name: default_target
|
|
default: true
|
|
description: This is the default target for all unmatched prompts.
|
|
endpoint:
|
|
name: weather_forecast_service
|
|
path: /default_target
|
|
http_method: POST
|
|
system_prompt: |
|
|
You are a helpful assistant! Summarize the user's request and provide a helpful response.
|
|
# if it is set to false arch will send response that it received from this prompt target to the user
|
|
# if true arch will forward the response to the default LLM
|
|
auto_llm_dispatch_on_response: false
|
|
|
|
tracing:
|
|
random_sampling: 100
|
|
trace_arch_internal: true
|