services: webclaw: build: . ports: - "${WEBCLAW_PORT:-3000}:3000" env_file: - .env environment: - OLLAMA_HOST=http://ollama:11434 depends_on: - ollama restart: unless-stopped healthcheck: test: ["CMD", "webclaw", "--help"] interval: 30s timeout: 5s retries: 3 ollama: image: ollama/ollama:latest volumes: - ollama_data:/root/.ollama restart: unless-stopped # CPU-only by default. For GPU, uncomment: # deploy: # resources: # reservations: # devices: # - capabilities: [gpu] # # Pre-pull a model after starting: # docker compose exec ollama ollama pull qwen3:1.7b volumes: ollama_data: