mirror of
https://github.com/katanemo/plano.git
synced 2026-04-25 08:46:24 +02:00
improve service names (#54)
- embedding-server => model_server - public-types => public_types - chatbot-ui => chatbot_ui - function-calling => function_calling
This commit is contained in:
parent
215f96e273
commit
060a0d665e
35 changed files with 54 additions and 52 deletions
|
|
@ -1,108 +0,0 @@
|
|||
|
||||
services:
|
||||
|
||||
config-generator:
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: config_generator/Dockerfile
|
||||
volumes:
|
||||
- ../../envoyfilter/envoy.template.yaml:/usr/src/app/envoy.template.yaml
|
||||
- ./katanemo-config.yaml:/usr/src/app/katanemo-config.yaml
|
||||
- ./generated:/usr/src/app/out
|
||||
|
||||
bolt:
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: envoyfilter/Dockerfile
|
||||
hostname: bolt
|
||||
ports:
|
||||
- "10000:10000"
|
||||
- "19901:9901"
|
||||
volumes:
|
||||
- ./generated/envoy.yaml:/etc/envoy/envoy.yaml
|
||||
- /etc/ssl/cert.pem:/etc/ssl/cert.pem
|
||||
depends_on:
|
||||
config-generator:
|
||||
condition: service_completed_successfully
|
||||
embeddingserver:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- LOG_LEVEL=debug
|
||||
|
||||
embeddingserver:
|
||||
build:
|
||||
context: ../../embedding-server
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- "18081:80"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl" ,"http://localhost:80/healthz"]
|
||||
interval: 5s
|
||||
retries: 20
|
||||
volumes:
|
||||
- ~/.cache/huggingface:/root/.cache/huggingface
|
||||
|
||||
functionresolver:
|
||||
build:
|
||||
context: ../../function_resolver
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- "18082:80"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl" ,"http://localhost:80/healthz"]
|
||||
interval: 5s
|
||||
retries: 20
|
||||
volumes:
|
||||
- ~/.cache/huggingface:/root/.cache/huggingface
|
||||
environment:
|
||||
# use ollama endpoint that is hosted by host machine (no virtualization)
|
||||
- OLLAMA_ENDPOINT=host.docker.internal
|
||||
# uncomment following line to use ollama endpoint that is hosted by docker
|
||||
# - OLLAMA_ENDPOINT=ollama
|
||||
|
||||
ollama:
|
||||
image: ollama/ollama
|
||||
container_name: ollama
|
||||
volumes:
|
||||
- ./ollama:/root/.ollama
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- '11434:11434'
|
||||
profiles:
|
||||
- manual
|
||||
|
||||
chatbot-ui:
|
||||
build:
|
||||
context: ../../chatbot-ui
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- "18080:8080"
|
||||
environment:
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
||||
- CHAT_COMPLETION_ENDPOINT=http://bolt:10000/v1/chat/completions
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus
|
||||
container_name: prometheus
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yaml'
|
||||
ports:
|
||||
- 9090:9090
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./prometheus:/etc/prometheus
|
||||
- ./prom_data:/prometheus
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana
|
||||
container_name: grafana
|
||||
ports:
|
||||
- 3000:3000
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=grafana
|
||||
volumes:
|
||||
- ./grafana:/etc/grafana/provisioning/datasources
|
||||
- ./grafana/dashboard.yaml:/etc/grafana/provisioning/dashboards/main.yaml
|
||||
- ./grafana/dashboards:/var/lib/grafana/dashboards
|
||||
Loading…
Add table
Add a link
Reference in a new issue