run demos without docker, keep docker optional via --with-ui

This commit is contained in:
Adil Hafeez 2026-03-09 17:18:11 +00:00
parent b9f01c8471
commit 1285bd083d
33 changed files with 447 additions and 316 deletions

View file

@ -41,21 +41,36 @@ cd demos/agent_orchestration/multi_agent_crewai_langchain
./run_demo.sh
```
This starts Plano natively and brings up via Docker Compose:
This starts Plano natively and runs agents as local processes:
- **CrewAI Flight Agent** (port 10520) - flight search
- **LangChain Weather Agent** (port 10510) - weather forecasts
- **AnythingLLM** (port 3001) - chat interface
- **Jaeger** (port 16686) - distributed tracing
Plano runs natively on the host (ports 12000, 8001).
To also start AnythingLLM (chat UI), Jaeger (tracing), and other optional services:
```bash
./run_demo.sh --with-ui
```
This additionally starts:
- **AnythingLLM** (port 3001) - chat interface
- **Jaeger** (port 16686) - distributed tracing
### Try It Out
1. **Open the Chat Interface**
1. **Using curl**
```bash
curl -X POST http://localhost:8001/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{"model": "gpt-4o", "messages": [{"role": "user", "content": "What is the weather in San Francisco?"}]}'
```
2. **Using AnythingLLM (requires `--with-ui`)**
- Navigate to [http://localhost:3001](http://localhost:3001)
- Create an account (stored locally)
2. **Ask Multi-Agent Questions**
3. **Ask Multi-Agent Questions**
```
"What's the weather in San Francisco and can you find flights from Seattle to San Francisco?"
```
@ -65,7 +80,7 @@ Plano runs natively on the host (ports 12000, 8001).
- Routes the flight part to the CrewAI agent
- Combines responses seamlessly
3. **View Distributed Traces**
4. **View Distributed Traces (requires `--with-ui`)**
- Open [http://localhost:16686](http://localhost:16686) (Jaeger UI)
- See how requests flow through both agents

View file

@ -2,9 +2,9 @@ version: v0.3.0
agents:
- id: weather_agent
url: http://langchain-weather-agent:10510
url: http://localhost:10510
- id: flight_agent
url: http://crewai-flight-agent:10520
url: http://localhost:10520
model_providers:
- model: openai/gpt-4o

View file

@ -1,27 +1,5 @@
services:
crewai-flight-agent:
build:
dockerfile: Dockerfile
restart: always
ports:
- "10520:10520"
environment:
- LLM_GATEWAY_ENDPOINT=http://host.docker.internal:12000/v1
- AEROAPI_KEY=${AEROAPI_KEY:?AEROAPI_KEY environment variable is required but not set}
- PYTHONUNBUFFERED=1
command: ["python", "-u", "crewai/flight_agent.py"]
langchain-weather-agent:
build:
dockerfile: Dockerfile
restart: always
ports:
- "10510:10510"
environment:
- LLM_GATEWAY_ENDPOINT=http://host.docker.internal:12000/v1
command: ["python", "-u", "langchain/weather_agent.py"]
anythingllm:
image: mintplexlabs/anythingllm
restart: always
@ -36,6 +14,8 @@ services:
- GENERIC_OPEN_AI_MODEL_PREF=gpt-4o-mini
- GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=128000
- GENERIC_OPEN_AI_API_KEY=sk-placeholder
extra_hosts:
- "host.docker.internal:host-gateway"
jaeger:
build:
@ -44,3 +24,4 @@ services:
ports:
- "16686:16686" # Jaeger UI
- "4317:4317" # OTLP gRPC receiver
- "4318:4318" # OTLP HTTP receiver

View file

@ -12,14 +12,9 @@ start_demo() {
echo "Error: OPENAI_API_KEY environment variable is not set for the demo."
exit 1
fi
if [ -z "$AEROAPI_KEY" ]; then
echo "Error: AEROAPI_KEY environment variable is not set for the demo."
exit 1
fi
echo "Creating .env file..."
echo "OPENAI_API_KEY=$OPENAI_API_KEY" > .env
echo "AEROAPI_KEY=$AEROAPI_KEY" >> .env
echo ".env file created with API keys."
fi
@ -27,18 +22,27 @@ start_demo() {
echo "Starting Plano with config.yaml..."
planoai up config.yaml
# Step 4: Start agents and services
echo "Starting agents using Docker Compose..."
docker compose up -d
# Step 4: Start agents natively
echo "Starting agents..."
bash start_agents.sh &
# Step 5: Optionally start UI services (AnythingLLM, Jaeger)
if [ "$1" == "--with-ui" ]; then
echo "Starting UI services (AnythingLLM, Jaeger)..."
docker compose up -d
fi
}
# Function to stop the demo
stop_demo() {
# Step 1: Stop Docker Compose services
echo "Stopping Docker Compose services..."
docker compose down
# Stop agents
echo "Stopping agents..."
pkill -f start_agents.sh 2>/dev/null || true
# Step 2: Stop Plano
# Stop Docker Compose services if running
docker compose down 2>/dev/null || true
# Stop Plano
echo "Stopping Plano..."
planoai down
}
@ -47,5 +51,5 @@ stop_demo() {
if [ "$1" == "down" ]; then
stop_demo
else
start_demo
start_demo "$1"
fi

View file

@ -0,0 +1,30 @@
#!/bin/bash
set -e
PIDS=()
log() { echo "$(date '+%F %T') - $*"; }
cleanup() {
log "Stopping agents..."
for PID in "${PIDS[@]}"; do
kill $PID 2>/dev/null && log "Stopped process $PID"
done
exit 0
}
trap cleanup EXIT INT TERM
export LLM_GATEWAY_ENDPOINT=http://localhost:12000/v1
log "Starting langchain weather_agent on port 10510..."
uv run python langchain/weather_agent.py &
PIDS+=($!)
log "Starting crewai flight_agent on port 10520..."
uv run python crewai/flight_agent.py &
PIDS+=($!)
for PID in "${PIDS[@]}"; do
wait "$PID"
done

View file

@ -23,9 +23,10 @@ All agents use Plano's agent orchestration LLM to intelligently route user reque
## Prerequisites
- [Plano CLI](https://docs.planoai.dev/get_started/quickstart.html#prerequisites) installed (`pip install planoai`)
- Docker and Docker Compose (for agent services)
- [uv](https://docs.astral.sh/uv/) installed (for running agents natively)
- [OpenAI API key](https://platform.openai.com/api-keys)
- [FlightAware AeroAPI key](https://www.flightaware.com/aeroapi/portal)
- Docker and Docker Compose (optional, only needed for `--with-ui`)
> **Note:** You'll need to obtain a FlightAware AeroAPI key for live flight data. Visit [https://www.flightaware.com/aeroapi/portal](https://www.flightaware.com/aeroapi/portal) to get your API key.
@ -46,16 +47,34 @@ export OPENAI_API_KEY="your OpenAI api key"
./run_demo.sh
```
This starts Plano natively and brings up via Docker Compose:
This starts Plano natively and runs agents as local processes:
- Weather Agent on port 10510
- Flight Agent on port 10520
- Open WebUI on port 8080
Plano runs natively on the host (port 8001).
To also start Open WebUI, Jaeger tracing, and other optional services, pass `--with-ui`:
```bash
./run_demo.sh --with-ui
```
This additionally starts:
- Open WebUI on port 8080
- Jaeger tracing UI on port 16686
### 4. Test the System
Use Open WebUI at http://localhost:8080
**Option A: Using curl**
```bash
curl -X POST http://localhost:8001/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{"model": "gpt-5.2", "messages": [{"role": "user", "content": "What is the weather in Istanbul?"}]}'
```
**Option B: Using Open WebUI (requires `--with-ui`)**
Navigate to http://localhost:8080
> **Note:** The Open WebUI may take a few minutes to start up and be fully ready. Please wait for the container to finish initializing before accessing the interface. Once ready, make sure to select the **gpt-5.2** model from the model dropdown menu in the UI.
@ -102,7 +121,7 @@ Each agent:
3. Generates response using GPT-5.2
4. Streams response back to user
Both agents run as Docker containers and communicate with Plano running natively on the host.
Both agents run as native local processes and communicate with Plano running natively on the host.
## Observability

View file

@ -1,32 +1,5 @@
services:
weather-agent:
build:
context: .
dockerfile: Dockerfile
container_name: weather-agent
restart: always
ports:
- "10510:10510"
environment:
- LLM_GATEWAY_ENDPOINT=http://host.docker.internal:12000/v1
command: ["uv", "run", "python", "src/travel_agents/weather_agent.py"]
extra_hosts:
- "host.docker.internal:host-gateway"
flight-agent:
build:
context: .
dockerfile: Dockerfile
container_name: flight-agent
restart: always
ports:
- "10520:10520"
environment:
- LLM_GATEWAY_ENDPOINT=http://host.docker.internal:12000/v1
- AEROAPI_KEY=${AEROAPI_KEY:? AEROAPI_KEY environment variable is required but not set}
command: ["uv", "run", "python", "src/travel_agents/flight_agent.py"]
extra_hosts:
- "host.docker.internal:host-gateway"
open-web-ui:
image: dyrnq/open-webui:main
restart: always
@ -40,9 +13,8 @@ services:
- ENABLE_TITLE_GENERATION=false
- ENABLE_TAGS_GENERATION=false
- ENABLE_AUTOCOMPLETE_GENERATION=false
depends_on:
- weather-agent
- flight-agent
extra_hosts:
- "host.docker.internal:host-gateway"
jaeger:
build:
context: ../../shared/jaeger

View file

@ -27,18 +27,27 @@ start_demo() {
echo "Starting Plano with config.yaml..."
planoai up config.yaml
# Step 4: Start agents and services
echo "Starting agents using Docker Compose..."
docker compose up -d
# Step 4: Start agents natively
echo "Starting agents..."
bash start_agents.sh &
# Step 5: Optionally start UI services (Open WebUI, Jaeger)
if [ "$1" == "--with-ui" ]; then
echo "Starting UI services (Open WebUI, Jaeger)..."
docker compose up -d
fi
}
# Function to stop the demo
stop_demo() {
# Step 1: Stop Docker Compose services
echo "Stopping Docker Compose services..."
docker compose down
# Stop agents
echo "Stopping agents..."
pkill -f start_agents.sh 2>/dev/null || true
# Step 2: Stop Plano
# Stop Docker Compose services if running
docker compose down 2>/dev/null || true
# Stop Plano
echo "Stopping Plano..."
planoai down
}
@ -47,5 +56,5 @@ stop_demo() {
if [ "$1" == "down" ]; then
stop_demo
else
start_demo
start_demo "$1"
fi

View file

@ -0,0 +1,30 @@
#!/bin/bash
set -e
PIDS=()
log() { echo "$(date '+%F %T') - $*"; }
cleanup() {
log "Stopping agents..."
for PID in "${PIDS[@]}"; do
kill $PID 2>/dev/null && log "Stopped process $PID"
done
exit 0
}
trap cleanup EXIT INT TERM
export LLM_GATEWAY_ENDPOINT=http://localhost:12000/v1
log "Starting weather_agent on port 10510..."
uv run python src/travel_agents/weather_agent.py &
PIDS+=($!)
log "Starting flight_agent on port 10520..."
uv run python src/travel_agents/flight_agent.py &
PIDS+=($!)
for PID in "${PIDS[@]}"; do
wait "$PID"
done