add support for jaeger tracing (#229)

This commit is contained in:
Adil Hafeez 2024-11-07 22:11:00 -06:00 committed by GitHub
parent fb67788be0
commit a72bb804eb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
64 changed files with 5032 additions and 1112 deletions

View file

@ -0,0 +1,27 @@
# Function calling
This demo shows how you can use Arch's core function calling capabilites.
# Starting the demo
1. Please make sure the [pre-requisites](https://github.com/katanemo/arch/?tab=readme-ov-file#prerequisites) are installed correctly
2. Start Arch
3.
```sh
sh run_demo.sh
```
4. Navigate to http://localhost:18080/
5. You can type in queries like "how is the weather?"
# Observability
Arch gateway publishes stats endpoint at http://localhost:19901/stats. In this demo we are using prometheus to pull stats from arch and we are using grafana to visalize the stats in dashboard. To see grafana dashboard follow instructions below,
1. Start grafana and prometheus using following command
```yaml
docker compose --profile monitoring up
```
1. Navigate to http://localhost:3000/ to open grafana UI (use admin/grafana as credentials)
1. From grafana left nav click on dashboards and select "Intelligent Gateway Overview" to view arch gateway stats
Here is a sample interaction,
<img width="575" alt="image" src="https://github.com/user-attachments/assets/e0929490-3eb2-4130-ae87-a732aea4d059">

View file

@ -0,0 +1,77 @@
version: "0.1-beta"
listener:
address: 0.0.0.0
port: 10000
message_format: huggingface
connect_timeout: 0.005s
endpoints:
weather_forecast_service:
endpoint: host.docker.internal:18083
connect_timeout: 0.005s
overrides:
# confidence threshold for prompt target intent matching
prompt_target_intent_matching_threshold: 0.6
llm_providers:
- name: gpt-4o-mini
access_key: $OPENAI_API_KEY
provider: openai
model: gpt-4o-mini
default: true
- name: gpt-3.5-turbo-0125
access_key: $OPENAI_API_KEY
provider: openai
model: gpt-3.5-turbo-0125
- name: gpt-4o
access_key: $OPENAI_API_KEY
provider: openai
model: gpt-4o
- name: ministral-3b
access_key: $MISTRAL_API_KEY
provider: mistral
model: ministral-3b-latest
system_prompt: |
You are a helpful assistant.
prompt_targets:
- name: weather_forecast
description: Check weather information for a given city.
parameters:
- name: city
description: the name of the city
required: true
type: str
- name: days
description: the number of days
type: int
required: true
- name: units
description: the temperature unit, e.g., Celsius and Fahrenheit
type: str
default: Fahrenheit
endpoint:
name: weather_forecast_service
path: /weather
- name: default_target
default: true
description: This is the default target for all unmatched prompts.
endpoint:
name: weather_forecast_service
path: /default_target
system_prompt: |
You are a helpful assistant! Summarize the user's request and provide a helpful response.
# if it is set to false arch will send response that it received from this prompt target to the user
# if true arch will forward the response to the default LLM
auto_llm_dispatch_on_response: false
tracing:
random_sampling: 100
trace_arch_internal: true

View file

@ -0,0 +1,30 @@
services:
weather_forecast_service:
build:
context: ../shared/weather_forecast_service
environment:
- OLTP_HOST=http://jaeger:4317
extra_hosts:
- "host.docker.internal:host-gateway"
ports:
- "18083:80"
chatbot_ui:
build:
context: ../shared/chatbot_ui
ports:
- "18080:8080"
environment:
# this is only because we are running the sample app in the same docker container environemtn as archgw
- CHAT_COMPLETION_ENDPOINT=http://host.docker.internal:10000/v1
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- ./arch_config.yaml:/app/arch_config.yaml
jaeger:
build:
context: ../shared/jaeger
ports:
- "16686:16686"
- "4317:4317"

View file

@ -0,0 +1,46 @@
#!/bin/bash
# Function to start the demo
start_demo() {
# Step 1: Check if .env file exists
if [ -f ".env" ]; then
echo ".env file already exists. Skipping creation."
else
# Step 2: Create `.env` file and set OpenAI key
if [ -z "$OPENAI_API_KEY" ]; then
echo "Error: OPENAI_API_KEY environment variable is not set for the demo."
exit 1
fi
echo "Creating .env file..."
echo "OPENAI_API_KEY=$OPENAI_API_KEY" > .env
echo ".env file created with OPENAI_API_KEY."
fi
# Step 3: Start Arch
echo "Starting Arch with arch_config.yaml..."
archgw up arch_config.yaml
# Step 4: Start Network Agent
echo "Starting Network Agent using Docker Compose..."
docker compose up -d # Run in detached mode
}
# Function to stop the demo
stop_demo() {
# Step 1: Stop Docker Compose services
echo "Stopping Network Agent using Docker Compose..."
docker compose down
# Step 2: Stop Arch
echo "Stopping Arch..."
archgw down
}
# Main script logic
if [ "$1" == "down" ]; then
stop_demo
else
# Default action is to bring the demo up
start_demo
fi