2024-09-20 17:08:42 -07:00
|
|
|
version: "0.1-beta"
|
|
|
|
|
|
|
|
|
|
listener:
|
2024-09-30 17:49:05 -07:00
|
|
|
address: 0.0.0.0 # or 127.0.0.1
|
|
|
|
|
port: 10000
|
|
|
|
|
# Defines how Arch should parse the content from application/json or text/pain Content-type in the http request
|
|
|
|
|
message_format: huggingface
|
|
|
|
|
|
|
|
|
|
# Centralized way to manage LLMs, manage keys, retry logic, failover and limits in a central way
|
|
|
|
|
llm_providers:
|
2024-09-20 17:08:42 -07:00
|
|
|
- name: "OpenAI"
|
2024-10-03 10:57:01 -07:00
|
|
|
provider: "openai"
|
2024-09-20 17:08:42 -07:00
|
|
|
access_key: $OPENAI_API_KEY
|
2024-09-24 13:54:17 -07:00
|
|
|
model: gpt-4o
|
2024-09-20 17:08:42 -07:00
|
|
|
default: true
|
|
|
|
|
stream: true
|
2024-09-30 17:49:05 -07:00
|
|
|
|
|
|
|
|
# default system prompt used by all prompt targets
|
|
|
|
|
system_prompt: |
|
|
|
|
|
You are a network assistant that just offers facts; not advice on manufacturers or purchasing decisions.
|
2024-09-20 17:08:42 -07:00
|
|
|
|
|
|
|
|
prompt_guards:
|
2024-09-30 17:49:05 -07:00
|
|
|
input_guards:
|
|
|
|
|
jailbreak:
|
2024-09-20 17:08:42 -07:00
|
|
|
on_exception:
|
|
|
|
|
message: "Looks like you're curious about my abilities, but I can only provide assistance within my programmed parameters."
|
|
|
|
|
|
|
|
|
|
prompt_targets:
|
|
|
|
|
- name: "reboot_network_device"
|
|
|
|
|
description: "Helps network operators perform device operations like rebooting a device."
|
2024-09-30 17:49:05 -07:00
|
|
|
endpoint:
|
|
|
|
|
name: app_server
|
|
|
|
|
path: "/agent/action"
|
2024-09-20 17:08:42 -07:00
|
|
|
parameters:
|
|
|
|
|
- name: "device_id"
|
2024-09-30 17:49:05 -07:00
|
|
|
# additional type options include: int | float | bool | string | list | dict
|
|
|
|
|
type: "string"
|
2024-09-20 17:08:42 -07:00
|
|
|
description: "Identifier of the network device to reboot."
|
|
|
|
|
required: true
|
|
|
|
|
- name: "confirmation"
|
2024-09-30 17:49:05 -07:00
|
|
|
type: "string"
|
2024-09-20 17:08:42 -07:00
|
|
|
description: "Confirmation flag to proceed with reboot."
|
2024-09-30 17:49:05 -07:00
|
|
|
default: "no"
|
|
|
|
|
enum: [yes, no]
|
|
|
|
|
|
|
|
|
|
- name: "information_extraction"
|
|
|
|
|
default: true
|
|
|
|
|
description: "This prompt handles all scenarios that are question and answer in nature. Like summarization, information extraction, etc."
|
|
|
|
|
endpoint:
|
|
|
|
|
name: app_server
|
|
|
|
|
path: "/agent/summary"
|
|
|
|
|
# Arch uses the default LLM and treats the response from the endpoint as the prompt to send to the LLM
|
|
|
|
|
auto_llm_dispatch_on_response: true
|
|
|
|
|
# override system prompt for this prompt target
|
|
|
|
|
system_prompt: |
|
|
|
|
|
You are a helpful information extraction assistant. Use the information that is provided to you.
|
2024-09-20 17:08:42 -07:00
|
|
|
|
|
|
|
|
error_target:
|
2024-09-30 17:49:05 -07:00
|
|
|
endpoint:
|
|
|
|
|
name: error_target_1
|
|
|
|
|
path: /error
|
2024-09-20 17:08:42 -07:00
|
|
|
|
2024-10-06 16:54:34 -07:00
|
|
|
# Arch creates a round-robin load balancing between different endpoints, managed via the cluster subsystem.
|
|
|
|
|
endpoints:
|
|
|
|
|
app_server:
|
|
|
|
|
# value could be ip address or a hostname with port
|
|
|
|
|
# this could also be a list of endpoints for load balancing
|
|
|
|
|
# for example endpoint: [ ip1:port, ip2:port ]
|
|
|
|
|
endpoint: "127.0.0.1:80"
|
|
|
|
|
# max time to wait for a connection to be established
|
|
|
|
|
connect_timeout: 0.005s
|