From ce1f8c872f8922c4ba9cbac718ec0eceec887f71 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Tue, 24 Feb 2026 22:45:37 +0530
Subject: [PATCH 01/57] feat: add Docker configuration files and installation
script for SurfSense
---
.github/workflows/docker_build.yaml | 146 ++++++++------------
docker-compose.yml | 2 -
docker/.env.example | 194 +++++++++++++++++++++++++++
docker/docker-compose.dev.yml | 129 ++++++++++++++++++
docker/docker-compose.yml | 110 +++++++++++++++
docker/postgresql.conf | 20 +++
docker/scripts/init-electric-user.sh | 38 ++++++
docker/scripts/install.sh | 92 +++++++++++++
surfsense_web/Dockerfile | 23 +++-
surfsense_web/docker-entrypoint.js | 100 ++++++++++++++
surfsense_web/docker-entrypoint.sh | 6 +
11 files changed, 762 insertions(+), 98 deletions(-)
create mode 100644 docker/.env.example
create mode 100644 docker/docker-compose.dev.yml
create mode 100644 docker/docker-compose.yml
create mode 100644 docker/postgresql.conf
create mode 100644 docker/scripts/init-electric-user.sh
create mode 100644 docker/scripts/install.sh
create mode 100644 surfsense_web/docker-entrypoint.js
create mode 100644 surfsense_web/docker-entrypoint.sh
diff --git a/.github/workflows/docker_build.yaml b/.github/workflows/docker_build.yaml
index ff6c838d1..a33c238ab 100644
--- a/.github/workflows/docker_build.yaml
+++ b/.github/workflows/docker_build.yaml
@@ -1,4 +1,4 @@
-name: Build and Push Docker Image
+name: Build and Push Docker Images
on:
workflow_dispatch:
@@ -28,33 +28,28 @@ jobs:
- name: Read app version and calculate next Docker build version
id: tag_version
run: |
- # Read version from pyproject.toml
APP_VERSION=$(grep -E '^version = ' surfsense_backend/pyproject.toml | sed 's/version = "\(.*\)"/\1/')
echo "App version from pyproject.toml: $APP_VERSION"
-
+
if [ -z "$APP_VERSION" ]; then
echo "Error: Could not read version from surfsense_backend/pyproject.toml"
exit 1
fi
-
- # Fetch all tags
+
git fetch --tags
-
- # Find the latest docker build tag for this app version (format: APP_VERSION.BUILD_NUMBER)
- # Tags follow pattern: 0.0.11.1, 0.0.11.2, etc.
+
LATEST_BUILD_TAG=$(git tag --list "${APP_VERSION}.*" --sort='-v:refname' | head -n 1)
-
+
if [ -z "$LATEST_BUILD_TAG" ]; then
echo "No previous Docker build tag found for version ${APP_VERSION}. Starting with ${APP_VERSION}.1"
NEXT_VERSION="${APP_VERSION}.1"
else
echo "Latest Docker build tag found: $LATEST_BUILD_TAG"
- # Extract the build number (4th component)
BUILD_NUMBER=$(echo "$LATEST_BUILD_TAG" | rev | cut -d. -f1 | rev)
NEXT_BUILD=$((BUILD_NUMBER + 1))
NEXT_VERSION="${APP_VERSION}.${NEXT_BUILD}"
fi
-
+
echo "Calculated next Docker version: $NEXT_VERSION"
echo "next_version=$NEXT_VERSION" >> $GITHUB_OUTPUT
@@ -78,67 +73,35 @@ jobs:
git ls-remote --tags origin | grep "refs/tags/${{ steps.tag_version.outputs.next_version }}" || (echo "Tag push verification failed!" && exit 1)
echo "Tag successfully pushed."
- # Build for AMD64 on native x64 runner
- build_amd64:
- runs-on: ubuntu-latest
+ build:
needs: tag_release
+ runs-on: ${{ matrix.os }}
permissions:
packages: write
contents: read
- outputs:
- digest: ${{ steps.build.outputs.digest }}
+ strategy:
+ fail-fast: false
+ matrix:
+ platform: [linux/amd64, linux/arm64]
+ image: [backend, frontend]
+ include:
+ - platform: linux/amd64
+ suffix: amd64
+ os: ubuntu-latest
+ - platform: linux/arm64
+ suffix: arm64
+ os: ubuntu-24.04-arm
+ - image: backend
+ name: surfsense_backend
+ context: ./surfsense_backend
+ file: ./surfsense_backend/Dockerfile
+ - image: frontend
+ name: surfsense_web
+ context: ./surfsense_web
+ file: ./surfsense_web/Dockerfile
env:
- REGISTRY_IMAGE: ghcr.io/${{ github.repository_owner }}/surfsense
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ REGISTRY_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ matrix.name }}
- - name: Set lowercase image name
- id: image
- run: echo "name=${REGISTRY_IMAGE,,}" >> $GITHUB_OUTPUT
-
- - name: Login to GitHub Container Registry
- uses: docker/login-action@v3
- with:
- registry: ghcr.io
- username: ${{ github.repository_owner }}
- password: ${{ secrets.GITHUB_TOKEN }}
-
- - name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
-
- - name: Free up disk space
- run: |
- sudo rm -rf /usr/share/dotnet
- sudo rm -rf /opt/ghc
- sudo rm -rf /usr/local/share/boost
- sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- docker system prune -af
-
- - name: Build and push AMD64 image
- id: build
- uses: docker/build-push-action@v5
- with:
- context: .
- file: ./Dockerfile.allinone
- push: true
- tags: ${{ steps.image.outputs.name }}:${{ needs.tag_release.outputs.new_tag }}-amd64
- platforms: linux/amd64
- cache-from: type=gha,scope=amd64
- cache-to: type=gha,mode=max,scope=amd64
- provenance: false
-
- # Build for ARM64 on native arm64 runner (no QEMU emulation!)
- build_arm64:
- runs-on: ubuntu-24.04-arm
- needs: tag_release
- permissions:
- packages: write
- contents: read
- outputs:
- digest: ${{ steps.build.outputs.digest }}
- env:
- REGISTRY_IMAGE: ghcr.io/${{ github.repository_owner }}/surfsense
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -165,28 +128,36 @@ jobs:
sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
docker system prune -af
- - name: Build and push ARM64 image
+ - name: Build and push ${{ matrix.name }} (${{ matrix.suffix }})
id: build
- uses: docker/build-push-action@v5
+ uses: docker/build-push-action@v6
with:
- context: .
- file: ./Dockerfile.allinone
+ context: ${{ matrix.context }}
+ file: ${{ matrix.file }}
push: true
- tags: ${{ steps.image.outputs.name }}:${{ needs.tag_release.outputs.new_tag }}-arm64
- platforms: linux/arm64
- cache-from: type=gha,scope=arm64
- cache-to: type=gha,mode=max,scope=arm64
+ tags: ${{ steps.image.outputs.name }}:${{ needs.tag_release.outputs.new_tag }}-${{ matrix.suffix }}
+ platforms: ${{ matrix.platform }}
+ cache-from: type=gha,scope=${{ matrix.image }}-${{ matrix.suffix }}
+ cache-to: type=gha,mode=max,scope=${{ matrix.image }}-${{ matrix.suffix }}
provenance: false
+ build-args: |
+ ${{ matrix.image == 'frontend' && format('NEXT_PUBLIC_FASTAPI_BACKEND_URL=__NEXT_PUBLIC_FASTAPI_BACKEND_URL__{0}NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__{0}NEXT_PUBLIC_ETL_SERVICE=__NEXT_PUBLIC_ETL_SERVICE__{0}NEXT_PUBLIC_ELECTRIC_URL=__NEXT_PUBLIC_ELECTRIC_URL__{0}NEXT_PUBLIC_ELECTRIC_AUTH_MODE=__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__{0}NEXT_PUBLIC_DEPLOYMENT_MODE=__NEXT_PUBLIC_DEPLOYMENT_MODE__', '\n') || '' }}
- # Create multi-arch manifest combining both platform images
create_manifest:
runs-on: ubuntu-latest
- needs: [tag_release, build_amd64, build_arm64]
+ needs: [tag_release, build]
permissions:
packages: write
contents: read
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - name: surfsense_backend
+ - name: surfsense_web
env:
- REGISTRY_IMAGE: ghcr.io/${{ github.repository_owner }}/surfsense
+ REGISTRY_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ matrix.name }}
+
steps:
- name: Set lowercase image name
id: image
@@ -203,28 +174,23 @@ jobs:
run: |
VERSION_TAG="${{ needs.tag_release.outputs.new_tag }}"
IMAGE="${{ steps.image.outputs.name }}"
-
- # Create manifest for version tag
+
docker manifest create ${IMAGE}:${VERSION_TAG} \
${IMAGE}:${VERSION_TAG}-amd64 \
${IMAGE}:${VERSION_TAG}-arm64
-
+
docker manifest push ${IMAGE}:${VERSION_TAG}
-
- # Create/update latest tag if on default branch
+
if [[ "${{ github.ref }}" == "refs/heads/${{ github.event.repository.default_branch }}" ]] || [[ "${{ github.event.inputs.branch }}" == "${{ github.event.repository.default_branch }}" ]]; then
docker manifest create ${IMAGE}:latest \
${IMAGE}:${VERSION_TAG}-amd64 \
${IMAGE}:${VERSION_TAG}-arm64
-
+
docker manifest push ${IMAGE}:latest
fi
- - name: Clean up architecture-specific tags (optional)
- continue-on-error: true
+ - name: Summary
run: |
- # Note: GHCR doesn't support tag deletion via API easily
- # The arch-specific tags will remain but users should use the main tags
- echo "Multi-arch manifest created successfully!"
- echo "Users should pull: ${{ steps.image.outputs.name }}:${{ needs.tag_release.outputs.new_tag }}"
- echo "Or for latest: ${{ steps.image.outputs.name }}:latest"
+ echo "Multi-arch manifest created for ${{ matrix.name }}!"
+ echo "Versioned: ${{ steps.image.outputs.name }}:${{ needs.tag_release.outputs.new_tag }}"
+ echo "Latest: ${{ steps.image.outputs.name }}:latest"
diff --git a/docker-compose.yml b/docker-compose.yml
index a94cea2e5..2bf62b883 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,5 +1,3 @@
-version: "3.8"
-
services:
db:
image: ankane/pgvector:latest
diff --git a/docker/.env.example b/docker/.env.example
new file mode 100644
index 000000000..93830c4dd
--- /dev/null
+++ b/docker/.env.example
@@ -0,0 +1,194 @@
+# ==============================================================================
+# SurfSense Docker Configuration
+# ==============================================================================
+# Only variables YOU need to set are in this file.
+# Database, Redis, and internal service wiring are handled automatically.
+# ==============================================================================
+
+# SurfSense version (pin to a specific version like "0.0.13.1" or use "latest")
+SURFSENSE_VERSION=latest
+
+# ------------------------------------------------------------------------------
+# Core Settings
+# ------------------------------------------------------------------------------
+
+# REQUIRED: Generate a secret key with: openssl rand -base64 32
+SECRET_KEY=replace_me_with_a_random_string
+
+# Auth type: LOCAL (email/password) or GOOGLE (OAuth)
+AUTH_TYPE=LOCAL
+
+# Allow new user registrations (TRUE or FALSE)
+# REGISTRATION_ENABLED=TRUE
+
+# Document parsing service: DOCLING, UNSTRUCTURED, or LLAMACLOUD
+ETL_SERVICE=DOCLING
+
+# Embedding model for vector search
+# Local: sentence-transformers/all-MiniLM-L6-v2
+# OpenAI: openai://text-embedding-ada-002 (set OPENAI_API_KEY below)
+# Cohere: cohere://embed-english-light-v3.0 (set COHERE_API_KEY below)
+EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
+
+# ------------------------------------------------------------------------------
+# Ports (change these to avoid host conflicts — everything auto-derives)
+# ------------------------------------------------------------------------------
+
+# BACKEND_PORT=8000
+# FRONTEND_PORT=3000
+# ELECTRIC_PORT=5133
+
+# Frontend URL used by backend for CORS and OAuth redirects.
+# Auto-derived from FRONTEND_PORT for localhost. Set explicitly for reverse proxy.
+# NEXT_FRONTEND_URL=http://localhost:3000
+
+# Backend URL for OAuth callback redirects (set when behind a reverse proxy)
+# BACKEND_URL=https://api.yourdomain.com
+
+# ------------------------------------------------------------------------------
+# Frontend URL Overrides (reverse proxy / custom domains)
+# ------------------------------------------------------------------------------
+# These are auto-derived from the port settings above for localhost deployments.
+# You only need to set these explicitly when using a reverse proxy with real
+# domains (e.g. Caddy, Nginx, Cloudflare Tunnel).
+#
+# NEXT_PUBLIC_FASTAPI_BACKEND_URL=https://api.yourdomain.com
+# NEXT_PUBLIC_ELECTRIC_URL=https://electric.yourdomain.com
+
+# ------------------------------------------------------------------------------
+# Database (defaults work out of the box, change for security)
+# ------------------------------------------------------------------------------
+
+# DB_PASSWORD=surfsense
+
+# ------------------------------------------------------------------------------
+# TTS & STT (Text-to-Speech / Speech-to-Text)
+# ------------------------------------------------------------------------------
+
+# Local Kokoro TTS (default) or LiteLLM provider
+TTS_SERVICE=local/kokoro
+# TTS_SERVICE_API_KEY=
+# TTS_SERVICE_API_BASE=
+
+# Local Faster-Whisper STT: local/MODEL_SIZE (tiny, base, small, medium, large-v3)
+STT_SERVICE=local/base
+# Or use LiteLLM: openai/whisper-1
+# STT_SERVICE_API_KEY=
+# STT_SERVICE_API_BASE=
+
+# ------------------------------------------------------------------------------
+# Rerankers (optional, disabled by default)
+# ------------------------------------------------------------------------------
+
+# RERANKERS_ENABLED=TRUE
+# RERANKERS_MODEL_NAME=ms-marco-MiniLM-L-12-v2
+# RERANKERS_MODEL_TYPE=flashrank
+
+# ------------------------------------------------------------------------------
+# Google OAuth (only if AUTH_TYPE=GOOGLE)
+# ------------------------------------------------------------------------------
+
+# GOOGLE_OAUTH_CLIENT_ID=
+# GOOGLE_OAUTH_CLIENT_SECRET=
+
+# ------------------------------------------------------------------------------
+# Connector OAuth Keys (uncomment connectors you want to use)
+# ------------------------------------------------------------------------------
+
+# -- Google Connectors --
+# GOOGLE_CALENDAR_REDIRECT_URI=http://localhost:8000/api/v1/auth/google/calendar/connector/callback
+# GOOGLE_GMAIL_REDIRECT_URI=http://localhost:8000/api/v1/auth/google/gmail/connector/callback
+# GOOGLE_DRIVE_REDIRECT_URI=http://localhost:8000/api/v1/auth/google/drive/connector/callback
+
+# -- Notion --
+# NOTION_CLIENT_ID=
+# NOTION_CLIENT_SECRET=
+# NOTION_REDIRECT_URI=http://localhost:8000/api/v1/auth/notion/connector/callback
+
+# -- Slack --
+# SLACK_CLIENT_ID=
+# SLACK_CLIENT_SECRET=
+# SLACK_REDIRECT_URI=http://localhost:8000/api/v1/auth/slack/connector/callback
+
+# -- Discord --
+# DISCORD_CLIENT_ID=
+# DISCORD_CLIENT_SECRET=
+# DISCORD_REDIRECT_URI=http://localhost:8000/api/v1/auth/discord/connector/callback
+# DISCORD_BOT_TOKEN=
+
+# -- Atlassian (Jira & Confluence) --
+# ATLASSIAN_CLIENT_ID=
+# ATLASSIAN_CLIENT_SECRET=
+# JIRA_REDIRECT_URI=http://localhost:8000/api/v1/auth/jira/connector/callback
+# CONFLUENCE_REDIRECT_URI=http://localhost:8000/api/v1/auth/confluence/connector/callback
+
+# -- Linear --
+# LINEAR_CLIENT_ID=
+# LINEAR_CLIENT_SECRET=
+# LINEAR_REDIRECT_URI=http://localhost:8000/api/v1/auth/linear/connector/callback
+
+# -- ClickUp --
+# CLICKUP_CLIENT_ID=
+# CLICKUP_CLIENT_SECRET=
+# CLICKUP_REDIRECT_URI=http://localhost:8000/api/v1/auth/clickup/connector/callback
+
+# -- Airtable --
+# AIRTABLE_CLIENT_ID=
+# AIRTABLE_CLIENT_SECRET=
+# AIRTABLE_REDIRECT_URI=http://localhost:8000/api/v1/auth/airtable/connector/callback
+
+# -- Microsoft Teams --
+# TEAMS_CLIENT_ID=
+# TEAMS_CLIENT_SECRET=
+# TEAMS_REDIRECT_URI=http://localhost:8000/api/v1/auth/teams/connector/callback
+
+# -- Composio --
+# COMPOSIO_API_KEY=
+# COMPOSIO_ENABLED=TRUE
+# COMPOSIO_REDIRECT_URI=http://localhost:8000/api/v1/auth/composio/connector/callback
+
+# ------------------------------------------------------------------------------
+# External API Keys (optional)
+# ------------------------------------------------------------------------------
+
+# Firecrawl (web scraping)
+# FIRECRAWL_API_KEY=
+
+# Unstructured (if ETL_SERVICE=UNSTRUCTURED)
+# UNSTRUCTURED_API_KEY=
+
+# LlamaCloud (if ETL_SERVICE=LLAMACLOUD)
+# LLAMA_CLOUD_API_KEY=
+
+# ------------------------------------------------------------------------------
+# Observability (optional)
+# ------------------------------------------------------------------------------
+
+# LANGSMITH_TRACING=true
+# LANGSMITH_ENDPOINT=https://api.smith.langchain.com
+# LANGSMITH_API_KEY=
+# LANGSMITH_PROJECT=surfsense
+
+# ------------------------------------------------------------------------------
+# Advanced (optional)
+# ------------------------------------------------------------------------------
+
+# Periodic connector sync interval (default: 5m)
+# SCHEDULE_CHECKER_INTERVAL=5m
+
+# JWT token lifetimes
+# ACCESS_TOKEN_LIFETIME_SECONDS=86400
+# REFRESH_TOKEN_LIFETIME_SECONDS=1209600
+
+# Pages limit per user for ETL (default: unlimited)
+# PAGES_LIMIT=500
+
+# Connector indexing lock TTL in seconds (default: 28800 = 8 hours)
+# CONNECTOR_INDEXING_LOCK_TTL_SECONDS=28800
+
+# Residential proxy for web crawling
+# RESIDENTIAL_PROXY_USERNAME=
+# RESIDENTIAL_PROXY_PASSWORD=
+# RESIDENTIAL_PROXY_HOSTNAME=
+# RESIDENTIAL_PROXY_LOCATION=
+# RESIDENTIAL_PROXY_TYPE=1
diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml
new file mode 100644
index 000000000..9c1a50ebd
--- /dev/null
+++ b/docker/docker-compose.dev.yml
@@ -0,0 +1,129 @@
+# =============================================================================
+# SurfSense — Development Docker Compose
+# =============================================================================
+# Usage (from repo root):
+# docker compose -f docker/docker-compose.dev.yml up --build
+#
+# This file builds from source and includes dev tools like pgAdmin.
+# For production with prebuilt images, use docker/docker-compose.yml instead.
+# =============================================================================
+
+name: surfsense
+
+services:
+ db:
+ image: pgvector/pgvector:pg17
+ ports:
+ - "${POSTGRES_PORT:-5432}:5432"
+ volumes:
+ - postgres_data:/var/lib/postgresql/data
+ - ./postgresql.conf:/etc/postgresql/postgresql.conf:ro
+ - ./scripts/init-electric-user.sh:/docker-entrypoint-initdb.d/init-electric-user.sh:ro
+ environment:
+ - POSTGRES_USER=${POSTGRES_USER:-postgres}
+ - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-postgres}
+ - POSTGRES_DB=${POSTGRES_DB:-surfsense}
+ - ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
+ - ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
+ command: postgres -c config_file=/etc/postgresql/postgresql.conf
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-surfsense}"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ pgadmin:
+ image: dpage/pgadmin4
+ ports:
+ - "${PGADMIN_PORT:-5050}:80"
+ environment:
+ - PGADMIN_DEFAULT_EMAIL=${PGADMIN_DEFAULT_EMAIL:-admin@surfsense.com}
+ - PGADMIN_DEFAULT_PASSWORD=${PGADMIN_DEFAULT_PASSWORD:-surfsense}
+ volumes:
+ - pgadmin_data:/var/lib/pgadmin
+ depends_on:
+ - db
+
+ redis:
+ image: redis:7-alpine
+ ports:
+ - "${REDIS_PORT:-6379}:6379"
+ volumes:
+ - redis_data:/data
+ command: redis-server --appendonly yes
+ healthcheck:
+ test: ["CMD", "redis-cli", "ping"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ backend:
+ build: ../surfsense_backend
+ ports:
+ - "${BACKEND_PORT:-8000}:8000"
+ volumes:
+ - ../surfsense_backend/app:/app/app
+ - shared_temp:/shared_tmp
+ env_file:
+ - ../surfsense_backend/.env
+ environment:
+ - DATABASE_URL=postgresql+asyncpg://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-surfsense}
+ - CELERY_BROKER_URL=redis://redis:6379/0
+ - CELERY_RESULT_BACKEND=redis://redis:6379/0
+ - REDIS_APP_URL=redis://redis:6379/0
+ - CELERY_TASK_DEFAULT_QUEUE=surfsense
+ - PYTHONPATH=/app
+ - UVICORN_LOOP=asyncio
+ - UNSTRUCTURED_HAS_PATCHED_LOOP=1
+ - LANGCHAIN_TRACING_V2=false
+ - LANGSMITH_TRACING=false
+ - ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
+ - ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
+ - AUTH_TYPE=${AUTH_TYPE:-LOCAL}
+ - NEXT_FRONTEND_URL=${NEXT_FRONTEND_URL:-http://localhost:3000}
+ depends_on:
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+
+ electric:
+ image: electricsql/electric:latest
+ ports:
+ - "${ELECTRIC_PORT:-5133}:3000"
+ # depends_on:
+ # - db
+ environment:
+ - DATABASE_URL=${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@${POSTGRES_HOST:-db}:5432/${POSTGRES_DB:-surfsense}?sslmode=disable}
+ - ELECTRIC_INSECURE=true
+ - ELECTRIC_WRITE_TO_PG_MODE=direct
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:3000/v1/health"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ frontend:
+ build:
+ context: ../surfsense_web
+ args:
+ NEXT_PUBLIC_FASTAPI_BACKEND_URL: ${NEXT_PUBLIC_FASTAPI_BACKEND_URL:-http://localhost:8000}
+ NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE: ${NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE:-LOCAL}
+ NEXT_PUBLIC_ETL_SERVICE: ${NEXT_PUBLIC_ETL_SERVICE:-DOCLING}
+ NEXT_PUBLIC_ELECTRIC_URL: ${NEXT_PUBLIC_ELECTRIC_URL:-http://localhost:5133}
+ NEXT_PUBLIC_ELECTRIC_AUTH_MODE: ${NEXT_PUBLIC_ELECTRIC_AUTH_MODE:-insecure}
+ NEXT_PUBLIC_DEPLOYMENT_MODE: ${NEXT_PUBLIC_DEPLOYMENT_MODE:-self-hosted}
+ ports:
+ - "${FRONTEND_PORT:-3000}:3000"
+ env_file:
+ - ../surfsense_web/.env
+ depends_on:
+ - backend
+ - electric
+
+volumes:
+ postgres_data:
+ pgadmin_data:
+ redis_data:
+ shared_temp:
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
new file mode 100644
index 000000000..b6a167c1c
--- /dev/null
+++ b/docker/docker-compose.yml
@@ -0,0 +1,110 @@
+# =============================================================================
+# SurfSense — Production Docker Compose
+# Docs: https://docs.surfsense.com/docs/docker-installation
+# =============================================================================
+# Usage:
+# 1. Copy .env.example to .env and edit the required values
+# 2. docker compose up -d
+# =============================================================================
+
+name: surfsense
+
+services:
+ db:
+ image: pgvector/pgvector:pg17
+ volumes:
+ - postgres_data:/var/lib/postgresql/data
+ - ./postgresql.conf:/etc/postgresql/postgresql.conf:ro
+ - ./scripts/init-electric-user.sh:/docker-entrypoint-initdb.d/init-electric-user.sh:ro
+ environment:
+ POSTGRES_USER: surfsense
+ POSTGRES_PASSWORD: ${DB_PASSWORD:-surfsense}
+ POSTGRES_DB: surfsense
+ ELECTRIC_DB_USER: electric
+ ELECTRIC_DB_PASSWORD: electric_password
+ command: postgres -c config_file=/etc/postgresql/postgresql.conf
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U surfsense -d surfsense"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ redis:
+ image: redis:7-alpine
+ volumes:
+ - redis_data:/data
+ command: redis-server --appendonly yes
+ restart: unless-stopped
+ healthcheck:
+ test: ["CMD", "redis-cli", "ping"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ backend:
+ image: ghcr.io/modsetter/surfsense_backend:${SURFSENSE_VERSION:-latest}
+ ports:
+ - "${BACKEND_PORT:-8000}:8000"
+ volumes:
+ - shared_temp:/shared_tmp
+ env_file:
+ - .env
+ environment:
+ DATABASE_URL: postgresql+asyncpg://surfsense:${DB_PASSWORD:-surfsense}@db:5432/surfsense
+ CELERY_BROKER_URL: redis://redis:6379/0
+ CELERY_RESULT_BACKEND: redis://redis:6379/0
+ REDIS_APP_URL: redis://redis:6379/0
+ CELERY_TASK_DEFAULT_QUEUE: surfsense
+ PYTHONPATH: /app
+ UVICORN_LOOP: asyncio
+ UNSTRUCTURED_HAS_PATCHED_LOOP: "1"
+ ELECTRIC_DB_USER: electric
+ ELECTRIC_DB_PASSWORD: electric_password
+ NEXT_FRONTEND_URL: ${NEXT_FRONTEND_URL:-http://localhost:${FRONTEND_PORT:-3000}}
+ SERVICE_ROLE: all
+ depends_on:
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ restart: unless-stopped
+
+ electric:
+ image: electricsql/electric:latest
+ ports:
+ - "${ELECTRIC_PORT:-5133}:3000"
+ environment:
+ DATABASE_URL: postgresql://electric:electric_password@db:5432/surfsense?sslmode=disable
+ ELECTRIC_INSECURE: "true"
+ ELECTRIC_WRITE_TO_PG_MODE: direct
+ restart: unless-stopped
+ depends_on:
+ db:
+ condition: service_healthy
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:3000/v1/health"]
+ interval: 10s
+ timeout: 5s
+ retries: 5
+
+ frontend:
+ image: ghcr.io/modsetter/surfsense_web:${SURFSENSE_VERSION:-latest}
+ ports:
+ - "${FRONTEND_PORT:-3000}:3000"
+ environment:
+ NEXT_PUBLIC_FASTAPI_BACKEND_URL: ${NEXT_PUBLIC_FASTAPI_BACKEND_URL:-http://localhost:${BACKEND_PORT:-8000}}
+ NEXT_PUBLIC_ELECTRIC_URL: ${NEXT_PUBLIC_ELECTRIC_URL:-http://localhost:${ELECTRIC_PORT:-5133}}
+ NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE: ${AUTH_TYPE:-LOCAL}
+ NEXT_PUBLIC_ETL_SERVICE: ${ETL_SERVICE:-DOCLING}
+ NEXT_PUBLIC_DEPLOYMENT_MODE: ${DEPLOYMENT_MODE:-self-hosted}
+ NEXT_PUBLIC_ELECTRIC_AUTH_MODE: ${NEXT_PUBLIC_ELECTRIC_AUTH_MODE:-insecure}
+ depends_on:
+ - backend
+ - electric
+ restart: unless-stopped
+
+volumes:
+ postgres_data:
+ redis_data:
+ shared_temp:
diff --git a/docker/postgresql.conf b/docker/postgresql.conf
new file mode 100644
index 000000000..99b29ba30
--- /dev/null
+++ b/docker/postgresql.conf
@@ -0,0 +1,20 @@
+# PostgreSQL configuration for Electric SQL
+# This file is mounted into the PostgreSQL container
+
+listen_addresses = '*'
+max_connections = 200
+shared_buffers = 256MB
+
+# Enable logical replication (required for Electric SQL)
+wal_level = logical
+max_replication_slots = 10
+max_wal_senders = 10
+
+# Performance settings
+checkpoint_timeout = 10min
+max_wal_size = 1GB
+min_wal_size = 80MB
+
+# Logging (optional, for debugging)
+# log_statement = 'all'
+# log_replication_commands = on
diff --git a/docker/scripts/init-electric-user.sh b/docker/scripts/init-electric-user.sh
new file mode 100644
index 000000000..fbd1c361a
--- /dev/null
+++ b/docker/scripts/init-electric-user.sh
@@ -0,0 +1,38 @@
+#!/bin/sh
+# Creates the Electric SQL replication user on first DB initialization.
+# Idempotent — safe to run alongside Alembic migration 66.
+
+set -e
+
+ELECTRIC_DB_USER="${ELECTRIC_DB_USER:-electric}"
+ELECTRIC_DB_PASSWORD="${ELECTRIC_DB_PASSWORD:-electric_password}"
+
+echo "Creating Electric SQL replication user: $ELECTRIC_DB_USER"
+
+psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
+ DO \$\$
+ BEGIN
+ IF NOT EXISTS (SELECT FROM pg_user WHERE usename = '$ELECTRIC_DB_USER') THEN
+ CREATE USER $ELECTRIC_DB_USER WITH REPLICATION PASSWORD '$ELECTRIC_DB_PASSWORD';
+ END IF;
+ END
+ \$\$;
+
+ GRANT CONNECT ON DATABASE $POSTGRES_DB TO $ELECTRIC_DB_USER;
+ GRANT CREATE ON DATABASE $POSTGRES_DB TO $ELECTRIC_DB_USER;
+ GRANT USAGE ON SCHEMA public TO $ELECTRIC_DB_USER;
+ GRANT SELECT ON ALL TABLES IN SCHEMA public TO $ELECTRIC_DB_USER;
+ GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO $ELECTRIC_DB_USER;
+ ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO $ELECTRIC_DB_USER;
+ ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON SEQUENCES TO $ELECTRIC_DB_USER;
+
+ DO \$\$
+ BEGIN
+ IF NOT EXISTS (SELECT FROM pg_publication WHERE pubname = 'electric_publication_default') THEN
+ CREATE PUBLICATION electric_publication_default;
+ END IF;
+ END
+ \$\$;
+EOSQL
+
+echo "Electric SQL user '$ELECTRIC_DB_USER' and publication created successfully"
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
new file mode 100644
index 000000000..2d7308023
--- /dev/null
+++ b/docker/scripts/install.sh
@@ -0,0 +1,92 @@
+#!/usr/bin/env bash
+# =============================================================================
+# SurfSense — One-line Install Script
+# Usage: curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
+# =============================================================================
+
+set -euo pipefail
+
+REPO_RAW="https://raw.githubusercontent.com/MODSetter/SurfSense/main"
+INSTALL_DIR="./surfsense"
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+RED='\033[0;31m'
+NC='\033[0m'
+
+info() { printf "${GREEN}[SurfSense]${NC} %s\n" "$1"; }
+warn() { printf "${YELLOW}[SurfSense]${NC} %s\n" "$1"; }
+error() { printf "${RED}[SurfSense]${NC} %s\n" "$1" >&2; exit 1; }
+
+# ── Pre-flight checks ───────────────────────────────────────────────────────
+
+command -v docker >/dev/null 2>&1 || error "Docker is not installed. Please install Docker first: https://docs.docker.com/get-docker/"
+
+if docker compose version >/dev/null 2>&1; then
+ DC="docker compose"
+elif command -v docker-compose >/dev/null 2>&1; then
+ DC="docker-compose"
+else
+ error "Docker Compose is not installed. Please install Docker Compose: https://docs.docker.com/compose/install/"
+fi
+
+# ── Download files ───────────────────────────────────────────────────────────
+
+info "Creating installation directory: ${INSTALL_DIR}"
+mkdir -p "${INSTALL_DIR}"
+
+FILES=(
+ "docker/docker-compose.yml:docker-compose.yml"
+ "docker/.env.example:.env.example"
+ "docker/postgresql.conf:postgresql.conf"
+ "docker/scripts/init-electric-user.sh:init-electric-user.sh"
+)
+
+for entry in "${FILES[@]}"; do
+ src="${entry%%:*}"
+ dest="${entry##*:}"
+ info "Downloading ${dest}..."
+ curl -fsSL "${REPO_RAW}/${src}" -o "${INSTALL_DIR}/${dest}" || error "Failed to download ${src}"
+done
+
+chmod +x "${INSTALL_DIR}/init-electric-user.sh"
+
+# ── Set up .env ──────────────────────────────────────────────────────────────
+
+if [ ! -f "${INSTALL_DIR}/.env" ]; then
+ cp "${INSTALL_DIR}/.env.example" "${INSTALL_DIR}/.env"
+
+ SECRET_KEY=$(openssl rand -base64 32 2>/dev/null || head -c 32 /dev/urandom | base64)
+ if [[ "$OSTYPE" == "darwin"* ]]; then
+ sed -i '' "s|SECRET_KEY=replace_me_with_a_random_string|SECRET_KEY=${SECRET_KEY}|" "${INSTALL_DIR}/.env"
+ else
+ sed -i "s|SECRET_KEY=replace_me_with_a_random_string|SECRET_KEY=${SECRET_KEY}|" "${INSTALL_DIR}/.env"
+ fi
+
+ info "Generated random SECRET_KEY in .env"
+else
+ warn ".env already exists — skipping (your existing config is preserved)"
+fi
+
+# ── Start containers ─────────────────────────────────────────────────────────
+
+info "Starting SurfSense..."
+cd "${INSTALL_DIR}"
+${DC} up -d
+
+echo ""
+info "=========================================="
+info " SurfSense is starting up!"
+info "=========================================="
+info ""
+info " Frontend: http://localhost:3000"
+info " Backend: http://localhost:8000"
+info " API Docs: http://localhost:8000/docs"
+info ""
+info " Config: ${INSTALL_DIR}/.env"
+info " Logs: cd ${INSTALL_DIR} && ${DC} logs -f"
+info " Stop: cd ${INSTALL_DIR} && ${DC} down"
+info " Update: cd ${INSTALL_DIR} && ${DC} pull && ${DC} up -d"
+info ""
+warn " First startup may take a few minutes while images are pulled."
+warn " Edit .env to configure OAuth connectors, API keys, etc."
+info "=========================================="
diff --git a/surfsense_web/Dockerfile b/surfsense_web/Dockerfile
index e0ecb1225..51f65da5e 100644
--- a/surfsense_web/Dockerfile
+++ b/surfsense_web/Dockerfile
@@ -29,15 +29,22 @@ WORKDIR /app
# Enable pnpm
RUN corepack enable pnpm
-# Accept build arguments for Next.js public env vars
-ARG NEXT_PUBLIC_FASTAPI_BACKEND_URL
-ARG NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE
-ARG NEXT_PUBLIC_ETL_SERVICE
+# Build with placeholder values for NEXT_PUBLIC_* variables.
+# These are replaced at container startup by docker-entrypoint.js
+# with real values from the container's environment variables.
+ARG NEXT_PUBLIC_FASTAPI_BACKEND_URL=__NEXT_PUBLIC_FASTAPI_BACKEND_URL__
+ARG NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__
+ARG NEXT_PUBLIC_ETL_SERVICE=__NEXT_PUBLIC_ETL_SERVICE__
+ARG NEXT_PUBLIC_ELECTRIC_URL=__NEXT_PUBLIC_ELECTRIC_URL__
+ARG NEXT_PUBLIC_ELECTRIC_AUTH_MODE=__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__
+ARG NEXT_PUBLIC_DEPLOYMENT_MODE=__NEXT_PUBLIC_DEPLOYMENT_MODE__
-# Set them as environment variables for the build
ENV NEXT_PUBLIC_FASTAPI_BACKEND_URL=$NEXT_PUBLIC_FASTAPI_BACKEND_URL
ENV NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=$NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE
ENV NEXT_PUBLIC_ETL_SERVICE=$NEXT_PUBLIC_ETL_SERVICE
+ENV NEXT_PUBLIC_ELECTRIC_URL=$NEXT_PUBLIC_ELECTRIC_URL
+ENV NEXT_PUBLIC_ELECTRIC_AUTH_MODE=$NEXT_PUBLIC_ELECTRIC_AUTH_MODE
+ENV NEXT_PUBLIC_DEPLOYMENT_MODE=$NEXT_PUBLIC_DEPLOYMENT_MODE
COPY --from=deps /app/node_modules ./node_modules
COPY . .
@@ -67,6 +74,10 @@ COPY --from=builder /app/public ./public
COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
+# Entrypoint scripts for runtime env var substitution
+COPY --chown=nextjs:nodejs docker-entrypoint.js ./docker-entrypoint.js
+COPY --chown=nextjs:nodejs --chmod=755 docker-entrypoint.sh ./docker-entrypoint.sh
+
USER nextjs
EXPOSE 3000
@@ -76,4 +87,4 @@ ENV PORT=3000
# server.js is created by next build from the standalone output
# https://nextjs.org/docs/pages/api-reference/config/next-config-js/output
ENV HOSTNAME="0.0.0.0"
-CMD ["node", "server.js"]
\ No newline at end of file
+ENTRYPOINT ["/bin/sh", "./docker-entrypoint.sh"]
\ No newline at end of file
diff --git a/surfsense_web/docker-entrypoint.js b/surfsense_web/docker-entrypoint.js
new file mode 100644
index 000000000..7fe697e46
--- /dev/null
+++ b/surfsense_web/docker-entrypoint.js
@@ -0,0 +1,100 @@
+/**
+ * Runtime environment variable substitution for Next.js Docker images.
+ *
+ * Next.js inlines NEXT_PUBLIC_* values at build time. The Docker image is built
+ * with unique placeholder strings (e.g. __NEXT_PUBLIC_FASTAPI_BACKEND_URL__).
+ * This script replaces those placeholders with real values from the container's
+ * environment variables before the server starts.
+ *
+ * Runs once at container startup via docker-entrypoint.sh.
+ */
+
+const fs = require("fs");
+const path = require("path");
+
+const replacements = [
+ [
+ "__NEXT_PUBLIC_FASTAPI_BACKEND_URL__",
+ process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL || "http://localhost:8000",
+ ],
+ [
+ "__NEXT_PUBLIC_ELECTRIC_URL__",
+ process.env.NEXT_PUBLIC_ELECTRIC_URL || "http://localhost:5133",
+ ],
+ [
+ "__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__",
+ process.env.NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE || "LOCAL",
+ ],
+ [
+ "__NEXT_PUBLIC_ETL_SERVICE__",
+ process.env.NEXT_PUBLIC_ETL_SERVICE || "DOCLING",
+ ],
+ [
+ "__NEXT_PUBLIC_DEPLOYMENT_MODE__",
+ process.env.NEXT_PUBLIC_DEPLOYMENT_MODE || "self-hosted",
+ ],
+ [
+ "__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__",
+ process.env.NEXT_PUBLIC_ELECTRIC_AUTH_MODE || "insecure",
+ ],
+];
+
+let filesProcessed = 0;
+let filesModified = 0;
+
+function walk(dir) {
+ let entries;
+ try {
+ entries = fs.readdirSync(dir, { withFileTypes: true });
+ } catch {
+ return;
+ }
+ for (const entry of entries) {
+ const full = path.join(dir, entry.name);
+ if (entry.isDirectory()) {
+ walk(full);
+ } else if (entry.name.endsWith(".js")) {
+ filesProcessed++;
+ let content = fs.readFileSync(full, "utf8");
+ let changed = false;
+ for (const [placeholder, value] of replacements) {
+ if (content.includes(placeholder)) {
+ content = content.replaceAll(placeholder, value);
+ changed = true;
+ }
+ }
+ if (changed) {
+ fs.writeFileSync(full, content);
+ filesModified++;
+ }
+ }
+ }
+}
+
+console.log("[entrypoint] Replacing environment variable placeholders...");
+for (const [placeholder, value] of replacements) {
+ console.log(` ${placeholder} -> ${value}`);
+}
+
+walk(path.join(__dirname, ".next"));
+
+const serverJs = path.join(__dirname, "server.js");
+if (fs.existsSync(serverJs)) {
+ let content = fs.readFileSync(serverJs, "utf8");
+ let changed = false;
+ filesProcessed++;
+ for (const [placeholder, value] of replacements) {
+ if (content.includes(placeholder)) {
+ content = content.replaceAll(placeholder, value);
+ changed = true;
+ }
+ }
+ if (changed) {
+ fs.writeFileSync(serverJs, content);
+ filesModified++;
+ }
+}
+
+console.log(
+ `[entrypoint] Done. Scanned ${filesProcessed} files, modified ${filesModified}.`
+);
diff --git a/surfsense_web/docker-entrypoint.sh b/surfsense_web/docker-entrypoint.sh
new file mode 100644
index 000000000..7f4dfbf25
--- /dev/null
+++ b/surfsense_web/docker-entrypoint.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+set -e
+
+node /app/docker-entrypoint.js
+
+exec node server.js
From 68be0d867582d11c39e5bff99becaa67c8b36b93 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Tue, 24 Feb 2026 23:01:38 +0530
Subject: [PATCH 02/57] chore: remove legacy Docker configuration files and
initialization scripts for SurfSense
---
Dockerfile.allinone | 285 ------------------------
docker-compose.quickstart.yml | 80 -------
docker-compose.yml | 165 --------------
scripts/docker/entrypoint-allinone.sh | 243 --------------------
scripts/docker/init-electric-user.sh | 56 -----
scripts/docker/init-postgres.sh | 77 -------
scripts/docker/postgresql.conf | 20 --
scripts/docker/supervisor-allinone.conf | 121 ----------
8 files changed, 1047 deletions(-)
delete mode 100644 Dockerfile.allinone
delete mode 100644 docker-compose.quickstart.yml
delete mode 100644 docker-compose.yml
delete mode 100644 scripts/docker/entrypoint-allinone.sh
delete mode 100755 scripts/docker/init-electric-user.sh
delete mode 100644 scripts/docker/init-postgres.sh
delete mode 100644 scripts/docker/postgresql.conf
delete mode 100644 scripts/docker/supervisor-allinone.conf
diff --git a/Dockerfile.allinone b/Dockerfile.allinone
deleted file mode 100644
index e96618adc..000000000
--- a/Dockerfile.allinone
+++ /dev/null
@@ -1,285 +0,0 @@
-# SurfSense All-in-One Docker Image
-# This image bundles PostgreSQL+pgvector, Redis, Electric SQL, Backend, and Frontend
-# Usage: docker run -d -p 3000:3000 -p 8000:8000 -p 5133:5133 -v surfsense-data:/data --name surfsense ghcr.io/modsetter/surfsense:latest
-#
-# Included Services (all run locally by default):
-# - PostgreSQL 14 + pgvector (vector database)
-# - Redis (task queue)
-# - Electric SQL (real-time sync)
-# - Docling (document processing, CPU-only, OCR disabled)
-# - Kokoro TTS (local text-to-speech for podcasts)
-# - Faster-Whisper (local speech-to-text for audio files)
-# - Playwright Chromium (web scraping)
-#
-# Note: This is the CPU-only version. A :cuda tagged image with GPU support
-# will be available in the future for faster AI inference.
-
-# ====================
-# Stage 1: Get Electric SQL Binary
-# ====================
-FROM electricsql/electric:latest AS electric-builder
-
-# ====================
-# Stage 2: Build Frontend
-# ====================
-FROM node:20-alpine AS frontend-builder
-
-WORKDIR /app
-
-# Install pnpm
-RUN corepack enable pnpm
-
-# Copy package files
-COPY surfsense_web/package.json surfsense_web/pnpm-lock.yaml* ./
-COPY surfsense_web/source.config.ts ./
-COPY surfsense_web/content ./content
-
-# Install dependencies (skip postinstall which requires all source files)
-RUN pnpm install --frozen-lockfile --ignore-scripts
-
-# Copy source
-COPY surfsense_web/ ./
-
-# Run fumadocs-mdx postinstall now that source files are available
-RUN pnpm fumadocs-mdx
-
-# Build with placeholder values that will be replaced at runtime
-# These unique strings allow runtime substitution via entrypoint script
-ENV NEXT_PUBLIC_FASTAPI_BACKEND_URL=__NEXT_PUBLIC_FASTAPI_BACKEND_URL__
-ENV NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__
-ENV NEXT_PUBLIC_ETL_SERVICE=__NEXT_PUBLIC_ETL_SERVICE__
-ENV NEXT_PUBLIC_ELECTRIC_URL=__NEXT_PUBLIC_ELECTRIC_URL__
-ENV NEXT_PUBLIC_ELECTRIC_AUTH_MODE=__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__
-ENV NEXT_PUBLIC_DEPLOYMENT_MODE=__NEXT_PUBLIC_DEPLOYMENT_MODE__
-
-# Build
-RUN pnpm run build
-
-# ====================
-# Stage 3: Runtime Image
-# ====================
-FROM ubuntu:22.04 AS runtime
-
-# Prevent interactive prompts
-ENV DEBIAN_FRONTEND=noninteractive
-
-# Install system dependencies
-RUN apt-get update && apt-get install -y --no-install-recommends \
- # PostgreSQL
- postgresql-14 \
- postgresql-contrib-14 \
- # Build tools for pgvector
- build-essential \
- postgresql-server-dev-14 \
- git \
- # Redis
- redis-server \
- # Node.js prerequisites
- curl \
- ca-certificates \
- gnupg \
- # Backend dependencies
- gcc \
- wget \
- unzip \
- dos2unix \
- # For PPAs
- software-properties-common \
- # ============================
- # Local TTS (Kokoro) dependencies
- # ============================
- espeak-ng \
- libespeak-ng1 \
- # ============================
- # Local STT (Faster-Whisper) dependencies
- # ============================
- ffmpeg \
- # ============================
- # Audio processing (soundfile)
- # ============================
- libsndfile1 \
- # ============================
- # Image/OpenCV dependencies (for Docling)
- # ============================
- libgl1 \
- libglib2.0-0 \
- libsm6 \
- libxext6 \
- libxrender1 \
- # ============================
- # Playwright browser dependencies
- # ============================
- libnspr4 \
- libnss3 \
- libatk1.0-0 \
- libatk-bridge2.0-0 \
- libcups2 \
- libxkbcommon0 \
- libatspi2.0-0 \
- libxcomposite1 \
- libxdamage1 \
- libxrandr2 \
- libgbm1 \
- libcairo2 \
- libpango-1.0-0 \
- && rm -rf /var/lib/apt/lists/*
-
-# Install Pandoc 3.x from GitHub (apt ships 2.9 which has broken table rendering).
-RUN ARCH=$(dpkg --print-architecture) && \
- wget -qO /tmp/pandoc.deb "https://github.com/jgm/pandoc/releases/download/3.9/pandoc-3.9-1-${ARCH}.deb" && \
- dpkg -i /tmp/pandoc.deb && \
- rm /tmp/pandoc.deb
-
-
-# Install Node.js 20.x (for running frontend)
-RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \
- && apt-get install -y nodejs \
- && rm -rf /var/lib/apt/lists/*
-
-# Install Python 3.12 from deadsnakes PPA
-RUN add-apt-repository ppa:deadsnakes/ppa -y \
- && apt-get update \
- && apt-get install -y --no-install-recommends \
- python3.12 \
- python3.12-venv \
- python3.12-dev \
- && rm -rf /var/lib/apt/lists/*
-
-# Set Python 3.12 as default
-RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.12 1 \
- && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.12 1
-
-# Install pip for Python 3.12
-RUN python3.12 -m ensurepip --upgrade \
- && python3.12 -m pip install --upgrade pip
-
-# Install supervisor via pip (system package incompatible with Python 3.12)
-RUN pip install --no-cache-dir supervisor
-
-# Build and install pgvector
-RUN cd /tmp \
- && git clone --branch v0.7.4 https://github.com/pgvector/pgvector.git \
- && cd pgvector \
- && make \
- && make install \
- && rm -rf /tmp/pgvector
-
-# Update certificates
-RUN update-ca-certificates
-
-# Create data directories
-RUN mkdir -p /data/postgres /data/redis /data/surfsense \
- && chown -R postgres:postgres /data/postgres
-
-# ====================
-# Copy Frontend Build
-# ====================
-WORKDIR /app/frontend
-
-# Copy only the standalone build (not node_modules)
-COPY --from=frontend-builder /app/.next/standalone ./
-COPY --from=frontend-builder /app/.next/static ./.next/static
-COPY --from=frontend-builder /app/public ./public
-
-COPY surfsense_web/content/docs /app/surfsense_web/content/docs
-
-# ====================
-# Copy Electric SQL Release
-# ====================
-COPY --from=electric-builder /app /app/electric-release
-
-# ====================
-# Setup Backend
-# ====================
-WORKDIR /app/backend
-
-# Copy backend dependency files
-COPY surfsense_backend/pyproject.toml surfsense_backend/uv.lock ./
-
-# Install PyTorch CPU-only (Docling needs it but OCR is disabled, no GPU needed)
-RUN pip install --no-cache-dir torch torchvision --index-url https://download.pytorch.org/whl/cpu
-
-# Install python dependencies
-RUN pip install --no-cache-dir certifi pip-system-certs uv \
- && uv pip install --system --no-cache-dir -e .
-
-# Set SSL environment variables
-RUN CERTIFI_PATH=$(python -c "import certifi; print(certifi.where())") \
- && echo "export SSL_CERT_FILE=$CERTIFI_PATH" >> /etc/profile.d/ssl.sh \
- && echo "export REQUESTS_CA_BUNDLE=$CERTIFI_PATH" >> /etc/profile.d/ssl.sh
-
-# Note: EasyOCR models NOT downloaded - OCR is disabled in docling_service.py
-# GPU support will be added in a future :cuda tagged image
-
-# Install Playwright browsers
-RUN pip install --no-cache-dir playwright \
- && playwright install chromium \
- && rm -rf /root/.cache/ms-playwright/ffmpeg*
-
-# Copy backend source
-COPY surfsense_backend/ ./
-
-# ====================
-# Configuration
-# ====================
-WORKDIR /app
-
-# Copy supervisor configuration
-COPY scripts/docker/supervisor-allinone.conf /etc/supervisor/conf.d/surfsense.conf
-
-# Copy entrypoint script
-COPY scripts/docker/entrypoint-allinone.sh /app/entrypoint.sh
-RUN dos2unix /app/entrypoint.sh && chmod +x /app/entrypoint.sh
-
-# PostgreSQL initialization script
-COPY scripts/docker/init-postgres.sh /app/init-postgres.sh
-RUN dos2unix /app/init-postgres.sh && chmod +x /app/init-postgres.sh
-
-# Clean up build dependencies to reduce image size
-RUN apt-get purge -y build-essential postgresql-server-dev-14 \
- && apt-get autoremove -y \
- && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
-
-# Environment variables with defaults
-ENV POSTGRES_USER=surfsense
-ENV POSTGRES_PASSWORD=surfsense
-ENV POSTGRES_DB=surfsense
-ENV DATABASE_URL=postgresql+asyncpg://surfsense:surfsense@localhost:5432/surfsense
-ENV CELERY_BROKER_URL=redis://localhost:6379/0
-ENV CELERY_RESULT_BACKEND=redis://localhost:6379/0
-ENV CELERY_TASK_DEFAULT_QUEUE=surfsense
-ENV PYTHONPATH=/app/backend
-ENV NEXT_FRONTEND_URL=http://localhost:3000
-ENV AUTH_TYPE=LOCAL
-ENV ETL_SERVICE=DOCLING
-ENV EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
-
-# Frontend configuration (can be overridden at runtime)
-# These are injected into the Next.js build at container startup
-ENV NEXT_PUBLIC_FASTAPI_BACKEND_URL=http://localhost:8000
-ENV NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=LOCAL
-ENV NEXT_PUBLIC_ETL_SERVICE=DOCLING
-
-# Electric SQL configuration (ELECTRIC_DATABASE_URL is built dynamically by entrypoint from these values)
-ENV ELECTRIC_DB_USER=electric
-ENV ELECTRIC_DB_PASSWORD=electric_password
-# Note: ELECTRIC_DATABASE_URL is NOT set here - entrypoint builds it dynamically from ELECTRIC_DB_USER/PASSWORD
-ENV ELECTRIC_INSECURE=true
-ENV ELECTRIC_WRITE_TO_PG_MODE=direct
-ENV ELECTRIC_PORT=5133
-ENV PORT=5133
-ENV NEXT_PUBLIC_ELECTRIC_URL=http://localhost:5133
-ENV NEXT_PUBLIC_ELECTRIC_AUTH_MODE=insecure
-
-# Data volume
-VOLUME ["/data"]
-
-# Expose ports (Frontend: 3000, Backend: 8000, Electric: 5133)
-EXPOSE 3000 8000 5133
-
-# Health check
-HEALTHCHECK --interval=30s --timeout=10s --start-period=120s --retries=3 \
- CMD curl -f http://localhost:3000 || exit 1
-
-# Run entrypoint
-CMD ["/app/entrypoint.sh"]
diff --git a/docker-compose.quickstart.yml b/docker-compose.quickstart.yml
deleted file mode 100644
index ff72618b7..000000000
--- a/docker-compose.quickstart.yml
+++ /dev/null
@@ -1,80 +0,0 @@
-# SurfSense Quick Start Docker Compose
-#
-# This is a simplified docker-compose for quick local deployment using pre-built images.
-# For production or customized deployments, use the main docker-compose.yml
-#
-# Usage:
-# 1. (Optional) Create a .env file with your configuration
-# 2. Run: docker compose -f docker-compose.quickstart.yml up -d
-# 3. Access SurfSense at http://localhost:3000
-#
-# All Environment Variables are Optional:
-# - SECRET_KEY: JWT secret key (auto-generated and persisted if not set)
-# - EMBEDDING_MODEL: Embedding model to use (default: sentence-transformers/all-MiniLM-L6-v2)
-# - ETL_SERVICE: Document parsing service - DOCLING, UNSTRUCTURED, or LLAMACLOUD (default: DOCLING)
-# - TTS_SERVICE: Text-to-speech service for podcasts (default: local/kokoro)
-# - STT_SERVICE: Speech-to-text service with model size (default: local/base)
-# - FIRECRAWL_API_KEY: For web crawling features
-
-version: "3.8"
-
-services:
- # All-in-one SurfSense container
- surfsense:
- image: ghcr.io/modsetter/surfsense:latest
- container_name: surfsense
- ports:
- - "${FRONTEND_PORT:-3000}:3000"
- - "${BACKEND_PORT:-8000}:8000"
- volumes:
- - surfsense-data:/data
- environment:
- # Authentication (auto-generated if not set)
- - SECRET_KEY=${SECRET_KEY:-}
-
- # Auth Configuration
- - AUTH_TYPE=${AUTH_TYPE:-LOCAL}
- - GOOGLE_OAUTH_CLIENT_ID=${GOOGLE_OAUTH_CLIENT_ID:-}
- - GOOGLE_OAUTH_CLIENT_SECRET=${GOOGLE_OAUTH_CLIENT_SECRET:-}
-
- # AI/ML Configuration
- - EMBEDDING_MODEL=${EMBEDDING_MODEL:-sentence-transformers/all-MiniLM-L6-v2}
- - RERANKERS_ENABLED=${RERANKERS_ENABLED:-FALSE}
- - RERANKERS_MODEL_NAME=${RERANKERS_MODEL_NAME:-}
- - RERANKERS_MODEL_TYPE=${RERANKERS_MODEL_TYPE:-}
-
- # Document Processing
- - ETL_SERVICE=${ETL_SERVICE:-DOCLING}
- - UNSTRUCTURED_API_KEY=${UNSTRUCTURED_API_KEY:-}
- - LLAMA_CLOUD_API_KEY=${LLAMA_CLOUD_API_KEY:-}
-
- # Audio Services
- - TTS_SERVICE=${TTS_SERVICE:-local/kokoro}
- - TTS_SERVICE_API_KEY=${TTS_SERVICE_API_KEY:-}
- - STT_SERVICE=${STT_SERVICE:-local/base}
- - STT_SERVICE_API_KEY=${STT_SERVICE_API_KEY:-}
-
- # Web Crawling
- - FIRECRAWL_API_KEY=${FIRECRAWL_API_KEY:-}
-
- # Optional Features
- - REGISTRATION_ENABLED=${REGISTRATION_ENABLED:-TRUE}
- - SCHEDULE_CHECKER_INTERVAL=${SCHEDULE_CHECKER_INTERVAL:-1m}
-
- # LangSmith Observability (optional)
- - LANGSMITH_TRACING=${LANGSMITH_TRACING:-false}
- - LANGSMITH_ENDPOINT=${LANGSMITH_ENDPOINT:-}
- - LANGSMITH_API_KEY=${LANGSMITH_API_KEY:-}
- - LANGSMITH_PROJECT=${LANGSMITH_PROJECT:-}
- restart: unless-stopped
- healthcheck:
- test: ["CMD", "curl", "-f", "http://localhost:3000", "&&", "curl", "-f", "http://localhost:8000/docs"]
- interval: 30s
- timeout: 10s
- retries: 3
- start_period: 120s
-
-volumes:
- surfsense-data:
- name: surfsense-data
-
diff --git a/docker-compose.yml b/docker-compose.yml
deleted file mode 100644
index 2bf62b883..000000000
--- a/docker-compose.yml
+++ /dev/null
@@ -1,165 +0,0 @@
-services:
- db:
- image: ankane/pgvector:latest
- ports:
- - "${POSTGRES_PORT:-5432}:5432"
- volumes:
- - postgres_data:/var/lib/postgresql/data
- - ./scripts/docker/postgresql.conf:/etc/postgresql/postgresql.conf:ro
- - ./scripts/docker/init-electric-user.sh:/docker-entrypoint-initdb.d/init-electric-user.sh:ro
- environment:
- - POSTGRES_USER=${POSTGRES_USER:-postgres}
- - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-postgres}
- - POSTGRES_DB=${POSTGRES_DB:-surfsense}
- - ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
- - ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
- command: postgres -c config_file=/etc/postgresql/postgresql.conf
-
- pgadmin:
- image: dpage/pgadmin4
- ports:
- - "${PGADMIN_PORT:-5050}:80"
- environment:
- - PGADMIN_DEFAULT_EMAIL=${PGADMIN_DEFAULT_EMAIL:-admin@surfsense.com}
- - PGADMIN_DEFAULT_PASSWORD=${PGADMIN_DEFAULT_PASSWORD:-surfsense}
- volumes:
- - pgadmin_data:/var/lib/pgadmin
- depends_on:
- - db
-
- redis:
- image: redis:7-alpine
- ports:
- - "${REDIS_PORT:-6379}:6379"
- volumes:
- - redis_data:/data
- command: redis-server --appendonly yes
-
- backend:
- build: ./surfsense_backend
- # image: ghcr.io/modsetter/surfsense_backend:latest
- ports:
- - "${BACKEND_PORT:-8000}:8000"
- volumes:
- - ./surfsense_backend/app:/app/app
- - shared_temp:/tmp
- # Uncomment and edit the line below to enable Obsidian vault indexing
- # - /path/to/your/obsidian/vault:/obsidian-vault:ro
- env_file:
- - ./surfsense_backend/.env
- environment:
- - DATABASE_URL=postgresql+asyncpg://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-surfsense}
- - CELERY_BROKER_URL=redis://redis:${REDIS_PORT:-6379}/0
- - CELERY_RESULT_BACKEND=redis://redis:${REDIS_PORT:-6379}/0
- - REDIS_APP_URL=redis://redis:${REDIS_PORT:-6379}/0
- # Queue name isolation - prevents task collision if Redis is shared with other apps
- - CELERY_TASK_DEFAULT_QUEUE=surfsense
- - PYTHONPATH=/app
- - UVICORN_LOOP=asyncio
- - UNSTRUCTURED_HAS_PATCHED_LOOP=1
- - LANGCHAIN_TRACING_V2=false
- - LANGSMITH_TRACING=false
- - ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
- - ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
- - AUTH_TYPE=${AUTH_TYPE:-LOCAL}
- - NEXT_FRONTEND_URL=${NEXT_FRONTEND_URL:-http://localhost:3000}
- depends_on:
- - db
- - redis
-
- # Run these services separately in production
- # celery_worker:
- # build: ./surfsense_backend
- # # image: ghcr.io/modsetter/surfsense_backend:latest
- # command: celery -A app.celery_app worker --loglevel=info --concurrency=1 --pool=solo
- # volumes:
- # - ./surfsense_backend:/app
- # - shared_temp:/tmp
- # env_file:
- # - ./surfsense_backend/.env
- # environment:
- # - DATABASE_URL=postgresql+asyncpg://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-surfsense}
- # - CELERY_BROKER_URL=redis://redis:${REDIS_PORT:-6379}/0
- # - CELERY_RESULT_BACKEND=redis://redis:${REDIS_PORT:-6379}/0
- # - PYTHONPATH=/app
- # depends_on:
- # - db
- # - redis
- # - backend
-
- # celery_beat:
- # build: ./surfsense_backend
- # # image: ghcr.io/modsetter/surfsense_backend:latest
- # command: celery -A app.celery_app beat --loglevel=info
- # volumes:
- # - ./surfsense_backend:/app
- # - shared_temp:/tmp
- # env_file:
- # - ./surfsense_backend/.env
- # environment:
- # - DATABASE_URL=postgresql+asyncpg://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-surfsense}
- # - CELERY_BROKER_URL=redis://redis:${REDIS_PORT:-6379}/0
- # - CELERY_RESULT_BACKEND=redis://redis:${REDIS_PORT:-6379}/0
- # - PYTHONPATH=/app
- # depends_on:
- # - db
- # - redis
- # - celery_worker
-
- # flower:
- # build: ./surfsense_backend
- # # image: ghcr.io/modsetter/surfsense_backend:latest
- # command: celery -A app.celery_app flower --port=5555
- # ports:
- # - "${FLOWER_PORT:-5555}:5555"
- # env_file:
- # - ./surfsense_backend/.env
- # environment:
- # - CELERY_BROKER_URL=redis://redis:${REDIS_PORT:-6379}/0
- # - CELERY_RESULT_BACKEND=redis://redis:${REDIS_PORT:-6379}/0
- # - PYTHONPATH=/app
- # depends_on:
- # - redis
- # - celery_worker
-
- electric:
- image: electricsql/electric:latest
- ports:
- - "${ELECTRIC_PORT:-5133}:3000"
- environment:
- - DATABASE_URL=${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@${POSTGRES_HOST:-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-surfsense}?sslmode=disable}
- - ELECTRIC_INSECURE=true
- - ELECTRIC_WRITE_TO_PG_MODE=direct
- restart: unless-stopped
- # depends_on:
- # - db
- healthcheck:
- test: ["CMD", "curl", "-f", "http://localhost:3000/v1/health"]
- interval: 10s
- timeout: 5s
- retries: 5
-
- frontend:
- build:
- context: ./surfsense_web
- # image: ghcr.io/modsetter/surfsense_ui:latest
- args:
- NEXT_PUBLIC_FASTAPI_BACKEND_URL: ${NEXT_PUBLIC_FASTAPI_BACKEND_URL:-http://localhost:8000}
- NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE: ${NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE:-LOCAL}
- NEXT_PUBLIC_ETL_SERVICE: ${NEXT_PUBLIC_ETL_SERVICE:-DOCLING}
- ports:
- - "${FRONTEND_PORT:-3000}:3000"
- env_file:
- - ./surfsense_web/.env
- environment:
- - NEXT_PUBLIC_ELECTRIC_URL=${NEXT_PUBLIC_ELECTRIC_URL:-http://localhost:5133}
- - NEXT_PUBLIC_ELECTRIC_AUTH_MODE=insecure
- depends_on:
- - backend
- - electric
-
-volumes:
- postgres_data:
- pgadmin_data:
- redis_data:
- shared_temp:
diff --git a/scripts/docker/entrypoint-allinone.sh b/scripts/docker/entrypoint-allinone.sh
deleted file mode 100644
index 4f88b3382..000000000
--- a/scripts/docker/entrypoint-allinone.sh
+++ /dev/null
@@ -1,243 +0,0 @@
-#!/bin/bash
-set -e
-
-echo "==========================================="
-echo " 🏄 SurfSense All-in-One Container"
-echo "==========================================="
-
-# Create log directory
-mkdir -p /var/log/supervisor
-
-# ================================================
-# Ensure data directory exists
-# ================================================
-mkdir -p /data
-
-# ================================================
-# Generate SECRET_KEY if not provided
-# ================================================
-if [ -z "$SECRET_KEY" ]; then
- # Generate a random secret key and persist it
- if [ -f /data/.secret_key ]; then
- export SECRET_KEY=$(cat /data/.secret_key)
- echo "✅ Using existing SECRET_KEY from persistent storage"
- else
- export SECRET_KEY=$(python3 -c "import secrets; print(secrets.token_urlsafe(32))")
- echo "$SECRET_KEY" > /data/.secret_key
- chmod 600 /data/.secret_key
- echo "✅ Generated new SECRET_KEY (saved for persistence)"
- fi
-fi
-
-# ================================================
-# Set default TTS/STT services if not provided
-# ================================================
-if [ -z "$TTS_SERVICE" ]; then
- export TTS_SERVICE="local/kokoro"
- echo "✅ Using default TTS_SERVICE: local/kokoro"
-fi
-
-if [ -z "$STT_SERVICE" ]; then
- export STT_SERVICE="local/base"
- echo "✅ Using default STT_SERVICE: local/base"
-fi
-
-# ================================================
-# Set Electric SQL configuration
-# ================================================
-export ELECTRIC_DB_USER="${ELECTRIC_DB_USER:-electric}"
-export ELECTRIC_DB_PASSWORD="${ELECTRIC_DB_PASSWORD:-electric_password}"
-if [ -z "$ELECTRIC_DATABASE_URL" ]; then
- export ELECTRIC_DATABASE_URL="postgresql://${ELECTRIC_DB_USER}:${ELECTRIC_DB_PASSWORD}@localhost:5432/${POSTGRES_DB:-surfsense}?sslmode=disable"
- echo "✅ Electric SQL URL configured dynamically"
-else
- # Ensure sslmode=disable is in the URL if not already present
- if [[ "$ELECTRIC_DATABASE_URL" != *"sslmode="* ]]; then
- # Add sslmode=disable (handle both cases: with or without existing query params)
- if [[ "$ELECTRIC_DATABASE_URL" == *"?"* ]]; then
- export ELECTRIC_DATABASE_URL="${ELECTRIC_DATABASE_URL}&sslmode=disable"
- else
- export ELECTRIC_DATABASE_URL="${ELECTRIC_DATABASE_URL}?sslmode=disable"
- fi
- fi
- echo "✅ Electric SQL URL configured from environment"
-fi
-
-# Set Electric SQL port
-export ELECTRIC_PORT="${ELECTRIC_PORT:-5133}"
-export PORT="${ELECTRIC_PORT}"
-
-# ================================================
-# Initialize PostgreSQL if needed
-# ================================================
-if [ ! -f /data/postgres/PG_VERSION ]; then
- echo "📦 Initializing PostgreSQL database..."
-
- # Initialize PostgreSQL data directory
- chown -R postgres:postgres /data/postgres
- chmod 700 /data/postgres
-
- # Initialize with UTF8 encoding (required for proper text handling)
- su - postgres -c "/usr/lib/postgresql/14/bin/initdb -D /data/postgres --encoding=UTF8 --locale=C.UTF-8"
-
- # Configure PostgreSQL for connections
- echo "host all all 0.0.0.0/0 md5" >> /data/postgres/pg_hba.conf
- echo "local all all trust" >> /data/postgres/pg_hba.conf
- echo "listen_addresses='*'" >> /data/postgres/postgresql.conf
-
- # Enable logical replication for Electric SQL
- echo "wal_level = logical" >> /data/postgres/postgresql.conf
- echo "max_replication_slots = 10" >> /data/postgres/postgresql.conf
- echo "max_wal_senders = 10" >> /data/postgres/postgresql.conf
-
- # Start PostgreSQL temporarily to create database and user
- su - postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D /data/postgres -l /tmp/postgres_init.log start"
-
- # Wait for PostgreSQL to be ready
- sleep 5
-
- # Create user and database
- su - postgres -c "psql -c \"CREATE USER ${POSTGRES_USER:-surfsense} WITH PASSWORD '${POSTGRES_PASSWORD:-surfsense}' SUPERUSER;\""
- su - postgres -c "psql -c \"CREATE DATABASE ${POSTGRES_DB:-surfsense} OWNER ${POSTGRES_USER:-surfsense};\""
-
- # Enable pgvector extension
- su - postgres -c "psql -d ${POSTGRES_DB:-surfsense} -c 'CREATE EXTENSION IF NOT EXISTS vector;'"
-
- # Create Electric SQL replication user (idempotent - uses IF NOT EXISTS)
- echo "📡 Creating Electric SQL replication user..."
- su - postgres -c "psql -d ${POSTGRES_DB:-surfsense} <<-EOSQL
- DO \\\$\\\$
- BEGIN
- IF NOT EXISTS (SELECT FROM pg_user WHERE usename = '${ELECTRIC_DB_USER}') THEN
- CREATE USER ${ELECTRIC_DB_USER} WITH REPLICATION PASSWORD '${ELECTRIC_DB_PASSWORD}';
- END IF;
- END
- \\\$\\\$;
-
- GRANT CONNECT ON DATABASE ${POSTGRES_DB:-surfsense} TO ${ELECTRIC_DB_USER};
- GRANT USAGE ON SCHEMA public TO ${ELECTRIC_DB_USER};
- GRANT SELECT ON ALL TABLES IN SCHEMA public TO ${ELECTRIC_DB_USER};
- GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO ${ELECTRIC_DB_USER};
- ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO ${ELECTRIC_DB_USER};
- ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON SEQUENCES TO ${ELECTRIC_DB_USER};
-
- -- Create the publication for Electric SQL (if not exists)
- DO \\\$\\\$
- BEGIN
- IF NOT EXISTS (SELECT FROM pg_publication WHERE pubname = 'electric_publication_default') THEN
- CREATE PUBLICATION electric_publication_default;
- END IF;
- END
- \\\$\\\$;
-EOSQL"
- echo "✅ Electric SQL user '${ELECTRIC_DB_USER}' created"
-
- # Stop temporary PostgreSQL
- su - postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D /data/postgres stop"
-
- echo "✅ PostgreSQL initialized successfully"
-else
- echo "✅ PostgreSQL data directory already exists"
-fi
-
-# ================================================
-# Initialize Redis data directory
-# ================================================
-mkdir -p /data/redis
-chmod 755 /data/redis
-echo "✅ Redis data directory ready"
-
-# ================================================
-# Copy frontend build to runtime location
-# ================================================
-if [ -d /app/frontend/.next/standalone ]; then
- cp -r /app/frontend/.next/standalone/* /app/frontend/ 2>/dev/null || true
- cp -r /app/frontend/.next/static /app/frontend/.next/static 2>/dev/null || true
-fi
-
-# ================================================
-# Runtime Environment Variable Replacement
-# ================================================
-# Next.js NEXT_PUBLIC_* vars are baked in at build time.
-# This replaces placeholder values with actual runtime env vars.
-echo "🔧 Applying runtime environment configuration..."
-
-# Set defaults if not provided
-NEXT_PUBLIC_FASTAPI_BACKEND_URL="${NEXT_PUBLIC_FASTAPI_BACKEND_URL:-http://localhost:8000}"
-NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE="${NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE:-LOCAL}"
-NEXT_PUBLIC_ETL_SERVICE="${NEXT_PUBLIC_ETL_SERVICE:-DOCLING}"
-NEXT_PUBLIC_ELECTRIC_URL="${NEXT_PUBLIC_ELECTRIC_URL:-http://localhost:5133}"
-NEXT_PUBLIC_ELECTRIC_AUTH_MODE="${NEXT_PUBLIC_ELECTRIC_AUTH_MODE:-insecure}"
-NEXT_PUBLIC_DEPLOYMENT_MODE="${NEXT_PUBLIC_DEPLOYMENT_MODE:-self-hosted}"
-
-# Replace placeholders in all JS files
-find /app/frontend -type f \( -name "*.js" -o -name "*.json" \) -exec sed -i \
- -e "s|__NEXT_PUBLIC_FASTAPI_BACKEND_URL__|${NEXT_PUBLIC_FASTAPI_BACKEND_URL}|g" \
- -e "s|__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__|${NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE}|g" \
- -e "s|__NEXT_PUBLIC_ETL_SERVICE__|${NEXT_PUBLIC_ETL_SERVICE}|g" \
- -e "s|__NEXT_PUBLIC_ELECTRIC_URL__|${NEXT_PUBLIC_ELECTRIC_URL}|g" \
- -e "s|__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__|${NEXT_PUBLIC_ELECTRIC_AUTH_MODE}|g" \
- -e "s|__NEXT_PUBLIC_DEPLOYMENT_MODE__|${NEXT_PUBLIC_DEPLOYMENT_MODE}|g" \
- {} +
-
-echo "✅ Environment configuration applied"
-echo " Backend URL: ${NEXT_PUBLIC_FASTAPI_BACKEND_URL}"
-echo " Auth Type: ${NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE}"
-echo " ETL Service: ${NEXT_PUBLIC_ETL_SERVICE}"
-echo " Electric URL: ${NEXT_PUBLIC_ELECTRIC_URL}"
-echo " Deployment Mode: ${NEXT_PUBLIC_DEPLOYMENT_MODE}"
-
-# ================================================
-# Run database migrations
-# ================================================
-run_migrations() {
- echo "🔄 Running database migrations..."
-
- # Start PostgreSQL temporarily for migrations
- su - postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D /data/postgres -l /tmp/postgres_migrate.log start"
- sleep 5
-
- # Start Redis temporarily for migrations (some might need it)
- redis-server --dir /data/redis --daemonize yes
- sleep 2
-
- # Run alembic migrations
- cd /app/backend
- alembic upgrade head || echo "⚠️ Migrations may have already been applied"
-
- # Stop temporary services
- redis-cli shutdown || true
- su - postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D /data/postgres stop"
-
- echo "✅ Database migrations complete"
-}
-
-# Always run migrations on startup - alembic upgrade head is safe to run
-# every time. It only applies pending migrations (never re-runs applied ones,
-# never calls downgrade). This ensures updates are applied automatically.
-run_migrations
-
-# ================================================
-# Environment Variables Info
-# ================================================
-echo ""
-echo "==========================================="
-echo " 📋 Configuration"
-echo "==========================================="
-echo " Frontend URL: http://localhost:3000"
-echo " Backend API: ${NEXT_PUBLIC_FASTAPI_BACKEND_URL}"
-echo " API Docs: ${NEXT_PUBLIC_FASTAPI_BACKEND_URL}/docs"
-echo " Electric URL: ${NEXT_PUBLIC_ELECTRIC_URL:-http://localhost:5133}"
-echo " Auth Type: ${NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE}"
-echo " ETL Service: ${NEXT_PUBLIC_ETL_SERVICE}"
-echo " TTS Service: ${TTS_SERVICE}"
-echo " STT Service: ${STT_SERVICE}"
-echo "==========================================="
-echo ""
-
-# ================================================
-# Start Supervisor (manages all services)
-# ================================================
-echo "🚀 Starting all services..."
-exec /usr/local/bin/supervisord -c /etc/supervisor/conf.d/surfsense.conf
-
diff --git a/scripts/docker/init-electric-user.sh b/scripts/docker/init-electric-user.sh
deleted file mode 100755
index b3856c573..000000000
--- a/scripts/docker/init-electric-user.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/sh
-# ============================================================================
-# Electric SQL User Initialization Script (docker-compose only)
-# ============================================================================
-# This script is ONLY used when running via docker-compose.
-#
-# How it works:
-# - docker-compose.yml mounts this script into the PostgreSQL container's
-# /docker-entrypoint-initdb.d/ directory
-# - PostgreSQL automatically executes scripts in that directory on first
-# container initialization
-#
-# For local PostgreSQL users (non-Docker), this script is NOT used.
-# Instead, the Electric user is created by Alembic migration 66
-# (66_add_notifications_table_and_electric_replication.py).
-#
-# Both approaches are idempotent (use IF NOT EXISTS), so running both
-# will not cause conflicts.
-# ============================================================================
-
-set -e
-
-# Use environment variables with defaults
-ELECTRIC_DB_USER="${ELECTRIC_DB_USER:-electric}"
-ELECTRIC_DB_PASSWORD="${ELECTRIC_DB_PASSWORD:-electric_password}"
-
-echo "Creating Electric SQL replication user: $ELECTRIC_DB_USER"
-
-psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
- DO \$\$
- BEGIN
- IF NOT EXISTS (SELECT FROM pg_user WHERE usename = '$ELECTRIC_DB_USER') THEN
- CREATE USER $ELECTRIC_DB_USER WITH REPLICATION PASSWORD '$ELECTRIC_DB_PASSWORD';
- END IF;
- END
- \$\$;
-
- GRANT CONNECT ON DATABASE $POSTGRES_DB TO $ELECTRIC_DB_USER;
- GRANT CREATE ON DATABASE $POSTGRES_DB TO $ELECTRIC_DB_USER;
- GRANT USAGE ON SCHEMA public TO $ELECTRIC_DB_USER;
- GRANT SELECT ON ALL TABLES IN SCHEMA public TO $ELECTRIC_DB_USER;
- GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO $ELECTRIC_DB_USER;
- ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO $ELECTRIC_DB_USER;
- ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON SEQUENCES TO $ELECTRIC_DB_USER;
-
- -- Create the publication for Electric SQL (if not exists)
- DO \$\$
- BEGIN
- IF NOT EXISTS (SELECT FROM pg_publication WHERE pubname = 'electric_publication_default') THEN
- CREATE PUBLICATION electric_publication_default;
- END IF;
- END
- \$\$;
-EOSQL
-
-echo "Electric SQL user '$ELECTRIC_DB_USER' and publication created successfully"
diff --git a/scripts/docker/init-postgres.sh b/scripts/docker/init-postgres.sh
deleted file mode 100644
index b6ddb6a50..000000000
--- a/scripts/docker/init-postgres.sh
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/bin/bash
-# PostgreSQL initialization script for SurfSense
-# This script is called during container startup if the database needs initialization
-
-set -e
-
-PGDATA=${PGDATA:-/data/postgres}
-POSTGRES_USER=${POSTGRES_USER:-surfsense}
-POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-surfsense}
-POSTGRES_DB=${POSTGRES_DB:-surfsense}
-
-# Electric SQL user credentials (configurable)
-ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
-ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
-
-echo "Initializing PostgreSQL..."
-
-# Check if PostgreSQL is already initialized
-if [ -f "$PGDATA/PG_VERSION" ]; then
- echo "PostgreSQL data directory already exists. Skipping initialization."
- exit 0
-fi
-
-# Initialize the database cluster
-/usr/lib/postgresql/14/bin/initdb -D "$PGDATA" --username=postgres
-
-# Configure PostgreSQL
-cat >> "$PGDATA/postgresql.conf" << EOF
-listen_addresses = '*'
-max_connections = 200
-shared_buffers = 256MB
-
-# Enable logical replication (required for Electric SQL)
-wal_level = logical
-max_replication_slots = 10
-max_wal_senders = 10
-
-# Performance settings
-checkpoint_timeout = 10min
-max_wal_size = 1GB
-min_wal_size = 80MB
-EOF
-
-cat >> "$PGDATA/pg_hba.conf" << EOF
-# Allow connections from anywhere with password
-host all all 0.0.0.0/0 md5
-host all all ::0/0 md5
-EOF
-
-# Start PostgreSQL temporarily
-/usr/lib/postgresql/14/bin/pg_ctl -D "$PGDATA" -l /tmp/postgres_init.log start
-
-# Wait for PostgreSQL to start
-sleep 3
-
-# Create user and database
-psql -U postgres << EOF
-CREATE USER $POSTGRES_USER WITH PASSWORD '$POSTGRES_PASSWORD' SUPERUSER;
-CREATE DATABASE $POSTGRES_DB OWNER $POSTGRES_USER;
-\c $POSTGRES_DB
-CREATE EXTENSION IF NOT EXISTS vector;
-
--- Create Electric SQL replication user
-CREATE USER $ELECTRIC_DB_USER WITH REPLICATION PASSWORD '$ELECTRIC_DB_PASSWORD';
-GRANT CONNECT ON DATABASE $POSTGRES_DB TO $ELECTRIC_DB_USER;
-GRANT USAGE ON SCHEMA public TO $ELECTRIC_DB_USER;
-GRANT SELECT ON ALL TABLES IN SCHEMA public TO $ELECTRIC_DB_USER;
-GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO $ELECTRIC_DB_USER;
-ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO $ELECTRIC_DB_USER;
-ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON SEQUENCES TO $ELECTRIC_DB_USER;
-EOF
-
-echo "PostgreSQL initialized successfully."
-
-# Stop PostgreSQL (supervisor will start it)
-/usr/lib/postgresql/14/bin/pg_ctl -D "$PGDATA" stop
-
diff --git a/scripts/docker/postgresql.conf b/scripts/docker/postgresql.conf
deleted file mode 100644
index 99b29ba30..000000000
--- a/scripts/docker/postgresql.conf
+++ /dev/null
@@ -1,20 +0,0 @@
-# PostgreSQL configuration for Electric SQL
-# This file is mounted into the PostgreSQL container
-
-listen_addresses = '*'
-max_connections = 200
-shared_buffers = 256MB
-
-# Enable logical replication (required for Electric SQL)
-wal_level = logical
-max_replication_slots = 10
-max_wal_senders = 10
-
-# Performance settings
-checkpoint_timeout = 10min
-max_wal_size = 1GB
-min_wal_size = 80MB
-
-# Logging (optional, for debugging)
-# log_statement = 'all'
-# log_replication_commands = on
diff --git a/scripts/docker/supervisor-allinone.conf b/scripts/docker/supervisor-allinone.conf
deleted file mode 100644
index 1a21fcc04..000000000
--- a/scripts/docker/supervisor-allinone.conf
+++ /dev/null
@@ -1,121 +0,0 @@
-[supervisord]
-nodaemon=true
-logfile=/dev/stdout
-logfile_maxbytes=0
-pidfile=/var/run/supervisord.pid
-loglevel=info
-user=root
-
-[unix_http_server]
-file=/var/run/supervisor.sock
-chmod=0700
-
-[rpcinterface:supervisor]
-supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
-
-[supervisorctl]
-serverurl=unix:///var/run/supervisor.sock
-
-# PostgreSQL
-[program:postgresql]
-command=/usr/lib/postgresql/14/bin/postgres -D /data/postgres
-user=postgres
-autostart=true
-autorestart=true
-priority=10
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-stderr_logfile=/dev/stderr
-stderr_logfile_maxbytes=0
-environment=PGDATA="/data/postgres"
-
-# Redis
-[program:redis]
-command=/usr/bin/redis-server --dir /data/redis --appendonly yes
-autostart=true
-autorestart=true
-priority=20
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-stderr_logfile=/dev/stderr
-stderr_logfile_maxbytes=0
-
-# Backend API
-[program:backend]
-command=python main.py
-directory=/app/backend
-autostart=true
-autorestart=true
-priority=30
-startsecs=10
-startretries=3
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-stderr_logfile=/dev/stderr
-stderr_logfile_maxbytes=0
-environment=PYTHONPATH="/app/backend",UVICORN_LOOP="asyncio",UNSTRUCTURED_HAS_PATCHED_LOOP="1"
-
-# Celery Worker
-[program:celery-worker]
-command=celery -A app.celery_app worker --loglevel=info --concurrency=2 --pool=solo --queues=surfsense,surfsense.connectors
-directory=/app/backend
-autostart=true
-autorestart=true
-priority=40
-startsecs=15
-startretries=3
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-stderr_logfile=/dev/stderr
-stderr_logfile_maxbytes=0
-environment=PYTHONPATH="/app/backend"
-
-# Celery Beat (scheduler)
-[program:celery-beat]
-command=celery -A app.celery_app beat --loglevel=info
-directory=/app/backend
-autostart=true
-autorestart=true
-priority=50
-startsecs=20
-startretries=3
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-stderr_logfile=/dev/stderr
-stderr_logfile_maxbytes=0
-environment=PYTHONPATH="/app/backend"
-
-# Electric SQL (real-time sync)
-[program:electric]
-command=/app/electric-release/bin/entrypoint start
-autostart=true
-autorestart=true
-priority=25
-startsecs=10
-startretries=3
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-stderr_logfile=/dev/stderr
-stderr_logfile_maxbytes=0
-environment=DATABASE_URL="%(ENV_ELECTRIC_DATABASE_URL)s",ELECTRIC_INSECURE="%(ENV_ELECTRIC_INSECURE)s",ELECTRIC_WRITE_TO_PG_MODE="%(ENV_ELECTRIC_WRITE_TO_PG_MODE)s",RELEASE_COOKIE="surfsense_electric_cookie",PORT="%(ENV_ELECTRIC_PORT)s"
-
-# Frontend
-[program:frontend]
-command=node server.js
-directory=/app/frontend
-autostart=true
-autorestart=true
-priority=60
-startsecs=5
-startretries=3
-stdout_logfile=/dev/stdout
-stdout_logfile_maxbytes=0
-stderr_logfile=/dev/stderr
-stderr_logfile_maxbytes=0
-environment=NODE_ENV="production",PORT="3000",HOSTNAME="0.0.0.0"
-
-# Process Groups
-[group:surfsense]
-programs=postgresql,redis,electric,backend,celery-worker,celery-beat,frontend
-priority=999
-
From 1ce446ed2752236b3d8a1c0ed03ed25f29f2de16 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Tue, 24 Feb 2026 23:07:07 +0530
Subject: [PATCH 03/57] feat: add Celery worker and beat services to Docker
configuration, update service roles
---
docker/.env.example | 1 +
docker/docker-compose.dev.yml | 60 +++++++++++++++++++++++++++++++++
docker/docker-compose.yml | 63 ++++++++++++++++++++++++++++++++++-
3 files changed, 123 insertions(+), 1 deletion(-)
diff --git a/docker/.env.example b/docker/.env.example
index 93830c4dd..d08288129 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -37,6 +37,7 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# BACKEND_PORT=8000
# FRONTEND_PORT=3000
# ELECTRIC_PORT=5133
+# FLOWER_PORT=5555
# Frontend URL used by backend for CORS and OAuth redirects.
# Auto-derived from FRONTEND_PORT for localhost. Set explicitly for reverse proxy.
diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml
index 9c1a50ebd..a2bec64d9 100644
--- a/docker/docker-compose.dev.yml
+++ b/docker/docker-compose.dev.yml
@@ -81,12 +81,72 @@ services:
- ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
- AUTH_TYPE=${AUTH_TYPE:-LOCAL}
- NEXT_FRONTEND_URL=${NEXT_FRONTEND_URL:-http://localhost:3000}
+ - SERVICE_ROLE=api
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
+ celery_worker:
+ build: ../surfsense_backend
+ volumes:
+ - ../surfsense_backend/app:/app/app
+ - shared_temp:/shared_tmp
+ env_file:
+ - ../surfsense_backend/.env
+ environment:
+ - DATABASE_URL=postgresql+asyncpg://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-surfsense}
+ - CELERY_BROKER_URL=redis://redis:6379/0
+ - CELERY_RESULT_BACKEND=redis://redis:6379/0
+ - REDIS_APP_URL=redis://redis:6379/0
+ - CELERY_TASK_DEFAULT_QUEUE=surfsense
+ - PYTHONPATH=/app
+ - ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
+ - ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
+ - SERVICE_ROLE=worker
+ depends_on:
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ backend:
+ condition: service_started
+
+ celery_beat:
+ build: ../surfsense_backend
+ env_file:
+ - ../surfsense_backend/.env
+ environment:
+ - DATABASE_URL=postgresql+asyncpg://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-surfsense}
+ - CELERY_BROKER_URL=redis://redis:6379/0
+ - CELERY_RESULT_BACKEND=redis://redis:6379/0
+ - CELERY_TASK_DEFAULT_QUEUE=surfsense
+ - PYTHONPATH=/app
+ - SERVICE_ROLE=beat
+ depends_on:
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ celery_worker:
+ condition: service_started
+
+ # flower:
+ # build: ../surfsense_backend
+ # ports:
+ # - "${FLOWER_PORT:-5555}:5555"
+ # env_file:
+ # - ../surfsense_backend/.env
+ # environment:
+ # - CELERY_BROKER_URL=redis://redis:6379/0
+ # - CELERY_RESULT_BACKEND=redis://redis:6379/0
+ # - PYTHONPATH=/app
+ # command: celery -A app.celery_app flower --port=5555
+ # depends_on:
+ # - redis
+ # - celery_worker
+
electric:
image: electricsql/electric:latest
ports:
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index b6a167c1c..aa1e712ad 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -62,7 +62,7 @@ services:
ELECTRIC_DB_USER: electric
ELECTRIC_DB_PASSWORD: electric_password
NEXT_FRONTEND_URL: ${NEXT_FRONTEND_URL:-http://localhost:${FRONTEND_PORT:-3000}}
- SERVICE_ROLE: all
+ SERVICE_ROLE: api
depends_on:
db:
condition: service_healthy
@@ -70,6 +70,67 @@ services:
condition: service_healthy
restart: unless-stopped
+ celery_worker:
+ image: ghcr.io/modsetter/surfsense_backend:${SURFSENSE_VERSION:-latest}
+ volumes:
+ - shared_temp:/shared_tmp
+ env_file:
+ - .env
+ environment:
+ DATABASE_URL: postgresql+asyncpg://surfsense:${DB_PASSWORD:-surfsense}@db:5432/surfsense
+ CELERY_BROKER_URL: redis://redis:6379/0
+ CELERY_RESULT_BACKEND: redis://redis:6379/0
+ REDIS_APP_URL: redis://redis:6379/0
+ CELERY_TASK_DEFAULT_QUEUE: surfsense
+ PYTHONPATH: /app
+ ELECTRIC_DB_USER: electric
+ ELECTRIC_DB_PASSWORD: electric_password
+ SERVICE_ROLE: worker
+ depends_on:
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ backend:
+ condition: service_started
+ restart: unless-stopped
+
+ celery_beat:
+ image: ghcr.io/modsetter/surfsense_backend:${SURFSENSE_VERSION:-latest}
+ env_file:
+ - .env
+ environment:
+ DATABASE_URL: postgresql+asyncpg://surfsense:${DB_PASSWORD:-surfsense}@db:5432/surfsense
+ CELERY_BROKER_URL: redis://redis:6379/0
+ CELERY_RESULT_BACKEND: redis://redis:6379/0
+ CELERY_TASK_DEFAULT_QUEUE: surfsense
+ PYTHONPATH: /app
+ SERVICE_ROLE: beat
+ depends_on:
+ db:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ celery_worker:
+ condition: service_started
+ restart: unless-stopped
+
+ # flower:
+ # image: ghcr.io/modsetter/surfsense_backend:${SURFSENSE_VERSION:-latest}
+ # ports:
+ # - "${FLOWER_PORT:-5555}:5555"
+ # env_file:
+ # - .env
+ # environment:
+ # CELERY_BROKER_URL: redis://redis:6379/0
+ # CELERY_RESULT_BACKEND: redis://redis:6379/0
+ # PYTHONPATH: /app
+ # command: celery -A app.celery_app flower --port=5555
+ # depends_on:
+ # - redis
+ # - celery_worker
+ # restart: unless-stopped
+
electric:
image: electricsql/electric:latest
ports:
From 211309f3ac6e1ea0dc701a18f1c2d97e70395adc Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Tue, 24 Feb 2026 23:41:22 +0530
Subject: [PATCH 04/57] chore: update Docker environment variables for database
configuration and improve security defaults
---
docker/.env.example | 13 +++++++++++++
docker/docker-compose.dev.yml | 16 ++++++++--------
docker/docker-compose.yml | 26 +++++++++++++-------------
3 files changed, 34 insertions(+), 21 deletions(-)
diff --git a/docker/.env.example b/docker/.env.example
index d08288129..4f71a3132 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -60,7 +60,20 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# Database (defaults work out of the box, change for security)
# ------------------------------------------------------------------------------
+# DB_USER=surfsense
# DB_PASSWORD=surfsense
+# DB_NAME=surfsense
+
+# ------------------------------------------------------------------------------
+# Electric SQL (real-time sync credentials)
+# ------------------------------------------------------------------------------
+# These must match on the db, backend, and electric services.
+# Change for security; defaults work out of the box.
+
+# ELECTRIC_DB_USER=electric
+# ELECTRIC_DB_PASSWORD=electric_password
+# Full override for pointing Electric at an external database:
+# ELECTRIC_DATABASE_URL=postgresql://electric:electric_password@db:5432/surfsense?sslmode=disable
# ------------------------------------------------------------------------------
# TTS & STT (Text-to-Speech / Speech-to-Text)
diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml
index a2bec64d9..310349a4e 100644
--- a/docker/docker-compose.dev.yml
+++ b/docker/docker-compose.dev.yml
@@ -20,14 +20,14 @@ services:
- ./postgresql.conf:/etc/postgresql/postgresql.conf:ro
- ./scripts/init-electric-user.sh:/docker-entrypoint-initdb.d/init-electric-user.sh:ro
environment:
- - POSTGRES_USER=${POSTGRES_USER:-postgres}
- - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-postgres}
- - POSTGRES_DB=${POSTGRES_DB:-surfsense}
+ - POSTGRES_USER=${DB_USER:-postgres}
+ - POSTGRES_PASSWORD=${DB_PASSWORD:-postgres}
+ - POSTGRES_DB=${DB_NAME:-surfsense}
- ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
- ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
command: postgres -c config_file=/etc/postgresql/postgresql.conf
healthcheck:
- test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres} -d ${POSTGRES_DB:-surfsense}"]
+ test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-postgres} -d ${DB_NAME:-surfsense}"]
interval: 10s
timeout: 5s
retries: 5
@@ -67,7 +67,7 @@ services:
env_file:
- ../surfsense_backend/.env
environment:
- - DATABASE_URL=postgresql+asyncpg://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-surfsense}
+ - DATABASE_URL=postgresql+asyncpg://${DB_USER:-postgres}:${DB_PASSWORD:-postgres}@db:5432/${DB_NAME:-surfsense}
- CELERY_BROKER_URL=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/0
- REDIS_APP_URL=redis://redis:6379/0
@@ -96,7 +96,7 @@ services:
env_file:
- ../surfsense_backend/.env
environment:
- - DATABASE_URL=postgresql+asyncpg://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-surfsense}
+ - DATABASE_URL=postgresql+asyncpg://${DB_USER:-postgres}:${DB_PASSWORD:-postgres}@db:5432/${DB_NAME:-surfsense}
- CELERY_BROKER_URL=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/0
- REDIS_APP_URL=redis://redis:6379/0
@@ -118,7 +118,7 @@ services:
env_file:
- ../surfsense_backend/.env
environment:
- - DATABASE_URL=postgresql+asyncpg://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-surfsense}
+ - DATABASE_URL=postgresql+asyncpg://${DB_USER:-postgres}:${DB_PASSWORD:-postgres}@db:5432/${DB_NAME:-surfsense}
- CELERY_BROKER_URL=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/0
- CELERY_TASK_DEFAULT_QUEUE=surfsense
@@ -154,7 +154,7 @@ services:
# depends_on:
# - db
environment:
- - DATABASE_URL=${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@${POSTGRES_HOST:-db}:5432/${POSTGRES_DB:-surfsense}?sslmode=disable}
+ - DATABASE_URL=${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@${POSTGRES_HOST:-db}:5432/${DB_NAME:-surfsense}?sslmode=disable}
- ELECTRIC_INSECURE=true
- ELECTRIC_WRITE_TO_PG_MODE=direct
restart: unless-stopped
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index aa1e712ad..66d8db2e0 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -17,15 +17,15 @@ services:
- ./postgresql.conf:/etc/postgresql/postgresql.conf:ro
- ./scripts/init-electric-user.sh:/docker-entrypoint-initdb.d/init-electric-user.sh:ro
environment:
- POSTGRES_USER: surfsense
+ POSTGRES_USER: ${DB_USER:-surfsense}
POSTGRES_PASSWORD: ${DB_PASSWORD:-surfsense}
- POSTGRES_DB: surfsense
- ELECTRIC_DB_USER: electric
- ELECTRIC_DB_PASSWORD: electric_password
+ POSTGRES_DB: ${DB_NAME:-surfsense}
+ ELECTRIC_DB_USER: ${ELECTRIC_DB_USER:-electric}
+ ELECTRIC_DB_PASSWORD: ${ELECTRIC_DB_PASSWORD:-electric_password}
command: postgres -c config_file=/etc/postgresql/postgresql.conf
restart: unless-stopped
healthcheck:
- test: ["CMD-SHELL", "pg_isready -U surfsense -d surfsense"]
+ test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-surfsense} -d ${DB_NAME:-surfsense}"]
interval: 10s
timeout: 5s
retries: 5
@@ -51,7 +51,7 @@ services:
env_file:
- .env
environment:
- DATABASE_URL: postgresql+asyncpg://surfsense:${DB_PASSWORD:-surfsense}@db:5432/surfsense
+ DATABASE_URL: postgresql+asyncpg://${DB_USER:-surfsense}:${DB_PASSWORD:-surfsense}@db:5432/${DB_NAME:-surfsense}
CELERY_BROKER_URL: redis://redis:6379/0
CELERY_RESULT_BACKEND: redis://redis:6379/0
REDIS_APP_URL: redis://redis:6379/0
@@ -59,8 +59,8 @@ services:
PYTHONPATH: /app
UVICORN_LOOP: asyncio
UNSTRUCTURED_HAS_PATCHED_LOOP: "1"
- ELECTRIC_DB_USER: electric
- ELECTRIC_DB_PASSWORD: electric_password
+ ELECTRIC_DB_USER: ${ELECTRIC_DB_USER:-electric}
+ ELECTRIC_DB_PASSWORD: ${ELECTRIC_DB_PASSWORD:-electric_password}
NEXT_FRONTEND_URL: ${NEXT_FRONTEND_URL:-http://localhost:${FRONTEND_PORT:-3000}}
SERVICE_ROLE: api
depends_on:
@@ -77,14 +77,14 @@ services:
env_file:
- .env
environment:
- DATABASE_URL: postgresql+asyncpg://surfsense:${DB_PASSWORD:-surfsense}@db:5432/surfsense
+ DATABASE_URL: postgresql+asyncpg://${DB_USER:-surfsense}:${DB_PASSWORD:-surfsense}@db:5432/${DB_NAME:-surfsense}
CELERY_BROKER_URL: redis://redis:6379/0
CELERY_RESULT_BACKEND: redis://redis:6379/0
REDIS_APP_URL: redis://redis:6379/0
CELERY_TASK_DEFAULT_QUEUE: surfsense
PYTHONPATH: /app
- ELECTRIC_DB_USER: electric
- ELECTRIC_DB_PASSWORD: electric_password
+ ELECTRIC_DB_USER: ${ELECTRIC_DB_USER:-electric}
+ ELECTRIC_DB_PASSWORD: ${ELECTRIC_DB_PASSWORD:-electric_password}
SERVICE_ROLE: worker
depends_on:
db:
@@ -100,7 +100,7 @@ services:
env_file:
- .env
environment:
- DATABASE_URL: postgresql+asyncpg://surfsense:${DB_PASSWORD:-surfsense}@db:5432/surfsense
+ DATABASE_URL: postgresql+asyncpg://${DB_USER:-surfsense}:${DB_PASSWORD:-surfsense}@db:5432/${DB_NAME:-surfsense}
CELERY_BROKER_URL: redis://redis:6379/0
CELERY_RESULT_BACKEND: redis://redis:6379/0
CELERY_TASK_DEFAULT_QUEUE: surfsense
@@ -136,7 +136,7 @@ services:
ports:
- "${ELECTRIC_PORT:-5133}:3000"
environment:
- DATABASE_URL: postgresql://electric:electric_password@db:5432/surfsense?sslmode=disable
+ DATABASE_URL: ${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@db:5432/${DB_NAME:-surfsense}?sslmode=disable}
ELECTRIC_INSECURE: "true"
ELECTRIC_WRITE_TO_PG_MODE: direct
restart: unless-stopped
From 2958d1c06a9b6f99f264c46d6d109b3a6f161acc Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Wed, 25 Feb 2026 00:15:29 +0530
Subject: [PATCH 05/57] chore: update Docker configuration to rename frontend
to web, adjust environment variable references for database and Redis
connections
---
.github/workflows/docker_build.yaml | 14 +++++------
docker/.env.example | 19 +++++++++++++++
docker/docker-compose.dev.yml | 28 ++++++++++-----------
docker/docker-compose.yml | 38 ++++++++++++++---------------
4 files changed, 59 insertions(+), 40 deletions(-)
diff --git a/.github/workflows/docker_build.yaml b/.github/workflows/docker_build.yaml
index a33c238ab..81108567b 100644
--- a/.github/workflows/docker_build.yaml
+++ b/.github/workflows/docker_build.yaml
@@ -83,7 +83,7 @@ jobs:
fail-fast: false
matrix:
platform: [linux/amd64, linux/arm64]
- image: [backend, frontend]
+ image: [backend, web]
include:
- platform: linux/amd64
suffix: amd64
@@ -92,11 +92,11 @@ jobs:
suffix: arm64
os: ubuntu-24.04-arm
- image: backend
- name: surfsense_backend
+ name: surfsense-backend
context: ./surfsense_backend
file: ./surfsense_backend/Dockerfile
- - image: frontend
- name: surfsense_web
+ - image: web
+ name: surfsense-web
context: ./surfsense_web
file: ./surfsense_web/Dockerfile
env:
@@ -141,7 +141,7 @@ jobs:
cache-to: type=gha,mode=max,scope=${{ matrix.image }}-${{ matrix.suffix }}
provenance: false
build-args: |
- ${{ matrix.image == 'frontend' && format('NEXT_PUBLIC_FASTAPI_BACKEND_URL=__NEXT_PUBLIC_FASTAPI_BACKEND_URL__{0}NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__{0}NEXT_PUBLIC_ETL_SERVICE=__NEXT_PUBLIC_ETL_SERVICE__{0}NEXT_PUBLIC_ELECTRIC_URL=__NEXT_PUBLIC_ELECTRIC_URL__{0}NEXT_PUBLIC_ELECTRIC_AUTH_MODE=__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__{0}NEXT_PUBLIC_DEPLOYMENT_MODE=__NEXT_PUBLIC_DEPLOYMENT_MODE__', '\n') || '' }}
+ ${{ matrix.image == 'web' && format('NEXT_PUBLIC_FASTAPI_BACKEND_URL=__NEXT_PUBLIC_FASTAPI_BACKEND_URL__{0}NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__{0}NEXT_PUBLIC_ETL_SERVICE=__NEXT_PUBLIC_ETL_SERVICE__{0}NEXT_PUBLIC_ELECTRIC_URL=__NEXT_PUBLIC_ELECTRIC_URL__{0}NEXT_PUBLIC_ELECTRIC_AUTH_MODE=__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__{0}NEXT_PUBLIC_DEPLOYMENT_MODE=__NEXT_PUBLIC_DEPLOYMENT_MODE__', '\n') || '' }}
create_manifest:
runs-on: ubuntu-latest
@@ -153,8 +153,8 @@ jobs:
fail-fast: false
matrix:
include:
- - name: surfsense_backend
- - name: surfsense_web
+ - name: surfsense-backend
+ - name: surfsense-web
env:
REGISTRY_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ matrix.name }}
diff --git a/docker/.env.example b/docker/.env.example
index 4f71a3132..6d7d8b6ce 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -63,6 +63,25 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# DB_USER=surfsense
# DB_PASSWORD=surfsense
# DB_NAME=surfsense
+# DB_HOST=db
+# DB_PORT=5432
+
+# SSL mode for database connections: disable, require, verify-ca, verify-full
+# DB_SSLMODE=disable
+
+# Full DATABASE_URL override — when set, takes precedence over the individual
+# DB_USER / DB_PASSWORD / DB_NAME / DB_HOST / DB_PORT settings above.
+# Use this for managed databases (AWS RDS, GCP Cloud SQL, Supabase, etc.)
+# DATABASE_URL=postgresql+asyncpg://user:password@your-rds-host:5432/surfsense?sslmode=require
+
+# ------------------------------------------------------------------------------
+# Redis (defaults work out of the box)
+# ------------------------------------------------------------------------------
+# Full Redis URL override for Celery broker, result backend, and app cache.
+# Use this for managed Redis (AWS ElastiCache, Redis Cloud, etc.)
+# Supports auth: redis://:password@host:port/0
+# Supports TLS: rediss://:password@host:6380/0
+# REDIS_URL=redis://redis:6379/0
# ------------------------------------------------------------------------------
# Electric SQL (real-time sync credentials)
diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml
index 310349a4e..12c1ba53c 100644
--- a/docker/docker-compose.dev.yml
+++ b/docker/docker-compose.dev.yml
@@ -67,10 +67,10 @@ services:
env_file:
- ../surfsense_backend/.env
environment:
- - DATABASE_URL=postgresql+asyncpg://${DB_USER:-postgres}:${DB_PASSWORD:-postgres}@db:5432/${DB_NAME:-surfsense}
- - CELERY_BROKER_URL=redis://redis:6379/0
- - CELERY_RESULT_BACKEND=redis://redis:6379/0
- - REDIS_APP_URL=redis://redis:6379/0
+ - DATABASE_URL=${DATABASE_URL:-postgresql+asyncpg://${DB_USER:-postgres}:${DB_PASSWORD:-postgres}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}}
+ - CELERY_BROKER_URL=${REDIS_URL:-redis://redis:6379/0}
+ - CELERY_RESULT_BACKEND=${REDIS_URL:-redis://redis:6379/0}
+ - REDIS_APP_URL=${REDIS_URL:-redis://redis:6379/0}
- CELERY_TASK_DEFAULT_QUEUE=surfsense
- PYTHONPATH=/app
- UVICORN_LOOP=asyncio
@@ -96,10 +96,10 @@ services:
env_file:
- ../surfsense_backend/.env
environment:
- - DATABASE_URL=postgresql+asyncpg://${DB_USER:-postgres}:${DB_PASSWORD:-postgres}@db:5432/${DB_NAME:-surfsense}
- - CELERY_BROKER_URL=redis://redis:6379/0
- - CELERY_RESULT_BACKEND=redis://redis:6379/0
- - REDIS_APP_URL=redis://redis:6379/0
+ - DATABASE_URL=${DATABASE_URL:-postgresql+asyncpg://${DB_USER:-postgres}:${DB_PASSWORD:-postgres}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}}
+ - CELERY_BROKER_URL=${REDIS_URL:-redis://redis:6379/0}
+ - CELERY_RESULT_BACKEND=${REDIS_URL:-redis://redis:6379/0}
+ - REDIS_APP_URL=${REDIS_URL:-redis://redis:6379/0}
- CELERY_TASK_DEFAULT_QUEUE=surfsense
- PYTHONPATH=/app
- ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
@@ -118,9 +118,9 @@ services:
env_file:
- ../surfsense_backend/.env
environment:
- - DATABASE_URL=postgresql+asyncpg://${DB_USER:-postgres}:${DB_PASSWORD:-postgres}@db:5432/${DB_NAME:-surfsense}
- - CELERY_BROKER_URL=redis://redis:6379/0
- - CELERY_RESULT_BACKEND=redis://redis:6379/0
+ - DATABASE_URL=${DATABASE_URL:-postgresql+asyncpg://${DB_USER:-postgres}:${DB_PASSWORD:-postgres}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}}
+ - CELERY_BROKER_URL=${REDIS_URL:-redis://redis:6379/0}
+ - CELERY_RESULT_BACKEND=${REDIS_URL:-redis://redis:6379/0}
- CELERY_TASK_DEFAULT_QUEUE=surfsense
- PYTHONPATH=/app
- SERVICE_ROLE=beat
@@ -139,8 +139,8 @@ services:
# env_file:
# - ../surfsense_backend/.env
# environment:
- # - CELERY_BROKER_URL=redis://redis:6379/0
- # - CELERY_RESULT_BACKEND=redis://redis:6379/0
+ # - CELERY_BROKER_URL=${REDIS_URL:-redis://redis:6379/0}
+ # - CELERY_RESULT_BACKEND=${REDIS_URL:-redis://redis:6379/0}
# - PYTHONPATH=/app
# command: celery -A app.celery_app flower --port=5555
# depends_on:
@@ -154,7 +154,7 @@ services:
# depends_on:
# - db
environment:
- - DATABASE_URL=${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@${POSTGRES_HOST:-db}:5432/${DB_NAME:-surfsense}?sslmode=disable}
+ - DATABASE_URL=${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}?sslmode=${DB_SSLMODE:-disable}}
- ELECTRIC_INSECURE=true
- ELECTRIC_WRITE_TO_PG_MODE=direct
restart: unless-stopped
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index 66d8db2e0..eacfc0806 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -43,7 +43,7 @@ services:
retries: 5
backend:
- image: ghcr.io/modsetter/surfsense_backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
ports:
- "${BACKEND_PORT:-8000}:8000"
volumes:
@@ -51,10 +51,10 @@ services:
env_file:
- .env
environment:
- DATABASE_URL: postgresql+asyncpg://${DB_USER:-surfsense}:${DB_PASSWORD:-surfsense}@db:5432/${DB_NAME:-surfsense}
- CELERY_BROKER_URL: redis://redis:6379/0
- CELERY_RESULT_BACKEND: redis://redis:6379/0
- REDIS_APP_URL: redis://redis:6379/0
+ DATABASE_URL: ${DATABASE_URL:-postgresql+asyncpg://${DB_USER:-surfsense}:${DB_PASSWORD:-surfsense}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}}
+ CELERY_BROKER_URL: ${REDIS_URL:-redis://redis:6379/0}
+ CELERY_RESULT_BACKEND: ${REDIS_URL:-redis://redis:6379/0}
+ REDIS_APP_URL: ${REDIS_URL:-redis://redis:6379/0}
CELERY_TASK_DEFAULT_QUEUE: surfsense
PYTHONPATH: /app
UVICORN_LOOP: asyncio
@@ -71,16 +71,16 @@ services:
restart: unless-stopped
celery_worker:
- image: ghcr.io/modsetter/surfsense_backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
volumes:
- shared_temp:/shared_tmp
env_file:
- .env
environment:
- DATABASE_URL: postgresql+asyncpg://${DB_USER:-surfsense}:${DB_PASSWORD:-surfsense}@db:5432/${DB_NAME:-surfsense}
- CELERY_BROKER_URL: redis://redis:6379/0
- CELERY_RESULT_BACKEND: redis://redis:6379/0
- REDIS_APP_URL: redis://redis:6379/0
+ DATABASE_URL: ${DATABASE_URL:-postgresql+asyncpg://${DB_USER:-surfsense}:${DB_PASSWORD:-surfsense}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}}
+ CELERY_BROKER_URL: ${REDIS_URL:-redis://redis:6379/0}
+ CELERY_RESULT_BACKEND: ${REDIS_URL:-redis://redis:6379/0}
+ REDIS_APP_URL: ${REDIS_URL:-redis://redis:6379/0}
CELERY_TASK_DEFAULT_QUEUE: surfsense
PYTHONPATH: /app
ELECTRIC_DB_USER: ${ELECTRIC_DB_USER:-electric}
@@ -96,13 +96,13 @@ services:
restart: unless-stopped
celery_beat:
- image: ghcr.io/modsetter/surfsense_backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
env_file:
- .env
environment:
- DATABASE_URL: postgresql+asyncpg://${DB_USER:-surfsense}:${DB_PASSWORD:-surfsense}@db:5432/${DB_NAME:-surfsense}
- CELERY_BROKER_URL: redis://redis:6379/0
- CELERY_RESULT_BACKEND: redis://redis:6379/0
+ DATABASE_URL: ${DATABASE_URL:-postgresql+asyncpg://${DB_USER:-surfsense}:${DB_PASSWORD:-surfsense}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}}
+ CELERY_BROKER_URL: ${REDIS_URL:-redis://redis:6379/0}
+ CELERY_RESULT_BACKEND: ${REDIS_URL:-redis://redis:6379/0}
CELERY_TASK_DEFAULT_QUEUE: surfsense
PYTHONPATH: /app
SERVICE_ROLE: beat
@@ -116,14 +116,14 @@ services:
restart: unless-stopped
# flower:
- # image: ghcr.io/modsetter/surfsense_backend:${SURFSENSE_VERSION:-latest}
+ # image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
# ports:
# - "${FLOWER_PORT:-5555}:5555"
# env_file:
# - .env
# environment:
- # CELERY_BROKER_URL: redis://redis:6379/0
- # CELERY_RESULT_BACKEND: redis://redis:6379/0
+ # CELERY_BROKER_URL: ${REDIS_URL:-redis://redis:6379/0}
+ # CELERY_RESULT_BACKEND: ${REDIS_URL:-redis://redis:6379/0}
# PYTHONPATH: /app
# command: celery -A app.celery_app flower --port=5555
# depends_on:
@@ -136,7 +136,7 @@ services:
ports:
- "${ELECTRIC_PORT:-5133}:3000"
environment:
- DATABASE_URL: ${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@db:5432/${DB_NAME:-surfsense}?sslmode=disable}
+ DATABASE_URL: ${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}?sslmode=${DB_SSLMODE:-disable}}
ELECTRIC_INSECURE: "true"
ELECTRIC_WRITE_TO_PG_MODE: direct
restart: unless-stopped
@@ -150,7 +150,7 @@ services:
retries: 5
frontend:
- image: ghcr.io/modsetter/surfsense_web:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-web:${SURFSENSE_VERSION:-latest}
ports:
- "${FRONTEND_PORT:-3000}:3000"
environment:
From 0a231ff5ad29df3b0e429abd20148759b2740af2 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Wed, 25 Feb 2026 00:26:00 +0530
Subject: [PATCH 06/57] chore: update installation script to create a nested
scripts directory and adjust file paths for initialization script
---
docker/scripts/install.sh | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 2d7308023..21d214de3 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -32,13 +32,13 @@ fi
# ── Download files ───────────────────────────────────────────────────────────
info "Creating installation directory: ${INSTALL_DIR}"
-mkdir -p "${INSTALL_DIR}"
+mkdir -p "${INSTALL_DIR}/scripts"
FILES=(
"docker/docker-compose.yml:docker-compose.yml"
"docker/.env.example:.env.example"
"docker/postgresql.conf:postgresql.conf"
- "docker/scripts/init-electric-user.sh:init-electric-user.sh"
+ "docker/scripts/init-electric-user.sh:scripts/init-electric-user.sh"
)
for entry in "${FILES[@]}"; do
@@ -48,7 +48,7 @@ for entry in "${FILES[@]}"; do
curl -fsSL "${REPO_RAW}/${src}" -o "${INSTALL_DIR}/${dest}" || error "Failed to download ${src}"
done
-chmod +x "${INSTALL_DIR}/init-electric-user.sh"
+chmod +x "${INSTALL_DIR}/scripts/init-electric-user.sh"
# ── Set up .env ──────────────────────────────────────────────────────────────
From d5be5c65b882189de587cddb4cfc401017d6e769 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Wed, 25 Feb 2026 02:07:53 +0530
Subject: [PATCH 07/57] chore: update docker compose
---
docker/docker-compose.yml | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index eacfc0806..6b04b5657 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -43,7 +43,7 @@ services:
retries: 5
backend:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/MODSetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
ports:
- "${BACKEND_PORT:-8000}:8000"
volumes:
@@ -71,7 +71,7 @@ services:
restart: unless-stopped
celery_worker:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/MODSetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
volumes:
- shared_temp:/shared_tmp
env_file:
@@ -96,7 +96,7 @@ services:
restart: unless-stopped
celery_beat:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/MODSetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
env_file:
- .env
environment:
@@ -116,7 +116,7 @@ services:
restart: unless-stopped
# flower:
- # image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ # image: ghcr.io/MODSetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
# ports:
# - "${FLOWER_PORT:-5555}:5555"
# env_file:
@@ -150,7 +150,7 @@ services:
retries: 5
frontend:
- image: ghcr.io/modsetter/surfsense-web:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/MODSetter/surfsense-web:${SURFSENSE_VERSION:-latest}
ports:
- "${FRONTEND_PORT:-3000}:3000"
environment:
From d39951e4eacc6f93e539eff44f5b4bc89bef75d0 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Wed, 25 Feb 2026 03:28:53 +0530
Subject: [PATCH 08/57] chore: update docker-compose image;
`init-electric-user.sh` is now executable bit in Git
---
docker/docker-compose.yml | 10 +++++-----
docker/scripts/init-electric-user.sh | 0
2 files changed, 5 insertions(+), 5 deletions(-)
mode change 100644 => 100755 docker/scripts/init-electric-user.sh
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index 6b04b5657..eacfc0806 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -43,7 +43,7 @@ services:
retries: 5
backend:
- image: ghcr.io/MODSetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
ports:
- "${BACKEND_PORT:-8000}:8000"
volumes:
@@ -71,7 +71,7 @@ services:
restart: unless-stopped
celery_worker:
- image: ghcr.io/MODSetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
volumes:
- shared_temp:/shared_tmp
env_file:
@@ -96,7 +96,7 @@ services:
restart: unless-stopped
celery_beat:
- image: ghcr.io/MODSetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
env_file:
- .env
environment:
@@ -116,7 +116,7 @@ services:
restart: unless-stopped
# flower:
- # image: ghcr.io/MODSetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ # image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
# ports:
# - "${FLOWER_PORT:-5555}:5555"
# env_file:
@@ -150,7 +150,7 @@ services:
retries: 5
frontend:
- image: ghcr.io/MODSetter/surfsense-web:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-web:${SURFSENSE_VERSION:-latest}
ports:
- "${FRONTEND_PORT:-3000}:3000"
environment:
diff --git a/docker/scripts/init-electric-user.sh b/docker/scripts/init-electric-user.sh
old mode 100644
new mode 100755
From c216c5bb7a314d5668b867f57ac8cd87fe66359b Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Wed, 25 Feb 2026 03:51:48 +0530
Subject: [PATCH 09/57] chore: refactor Docker build arguments for web image to
separate environment variable definitions
---
.github/workflows/docker_build.yaml | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/docker_build.yaml b/.github/workflows/docker_build.yaml
index 81108567b..f55419200 100644
--- a/.github/workflows/docker_build.yaml
+++ b/.github/workflows/docker_build.yaml
@@ -141,7 +141,12 @@ jobs:
cache-to: type=gha,mode=max,scope=${{ matrix.image }}-${{ matrix.suffix }}
provenance: false
build-args: |
- ${{ matrix.image == 'web' && format('NEXT_PUBLIC_FASTAPI_BACKEND_URL=__NEXT_PUBLIC_FASTAPI_BACKEND_URL__{0}NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__{0}NEXT_PUBLIC_ETL_SERVICE=__NEXT_PUBLIC_ETL_SERVICE__{0}NEXT_PUBLIC_ELECTRIC_URL=__NEXT_PUBLIC_ELECTRIC_URL__{0}NEXT_PUBLIC_ELECTRIC_AUTH_MODE=__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__{0}NEXT_PUBLIC_DEPLOYMENT_MODE=__NEXT_PUBLIC_DEPLOYMENT_MODE__', '\n') || '' }}
+ ${{ matrix.image == 'web' && 'NEXT_PUBLIC_FASTAPI_BACKEND_URL=__NEXT_PUBLIC_FASTAPI_BACKEND_URL__' || '' }}
+ ${{ matrix.image == 'web' && 'NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__' || '' }}
+ ${{ matrix.image == 'web' && 'NEXT_PUBLIC_ETL_SERVICE=__NEXT_PUBLIC_ETL_SERVICE__' || '' }}
+ ${{ matrix.image == 'web' && 'NEXT_PUBLIC_ELECTRIC_URL=__NEXT_PUBLIC_ELECTRIC_URL__' || '' }}
+ ${{ matrix.image == 'web' && 'NEXT_PUBLIC_ELECTRIC_AUTH_MODE=__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__' || '' }}
+ ${{ matrix.image == 'web' && 'NEXT_PUBLIC_DEPLOYMENT_MODE=__NEXT_PUBLIC_DEPLOYMENT_MODE__' || '' }}
create_manifest:
runs-on: ubuntu-latest
From 2e0f75203ecfaa206d6f1506ea46dfd84834f2d8 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Wed, 25 Feb 2026 03:56:32 +0530
Subject: [PATCH 10/57] chore: correct file path for Electric SQL user
initialization script in Docker deployment documentation
---
.../66_add_notifications_table_and_electric_replication.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/surfsense_backend/alembic/versions/66_add_notifications_table_and_electric_replication.py b/surfsense_backend/alembic/versions/66_add_notifications_table_and_electric_replication.py
index 182bf981c..35418c6ae 100644
--- a/surfsense_backend/alembic/versions/66_add_notifications_table_and_electric_replication.py
+++ b/surfsense_backend/alembic/versions/66_add_notifications_table_and_electric_replication.py
@@ -8,7 +8,7 @@ Creates notifications table and sets up Electric SQL replication
search_source_connectors, and documents tables.
NOTE: Electric SQL user creation is idempotent (uses IF NOT EXISTS).
-- Docker deployments: user is pre-created by scripts/docker/init-electric-user.sh
+- Docker deployments: user is pre-created by docker/scripts/init-electric-user.sh
- Local PostgreSQL: user is created here during migration
Both approaches are safe to run together without conflicts as this migraiton is idempotent
"""
From d7a548622d916d45c60615a571d3de786bf4f27a Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Wed, 25 Feb 2026 13:00:05 +0530
Subject: [PATCH 11/57] chore: pinning version for electricsql in both
docker-compose files; edit .env.example
---
docker/.env.example | 2 +-
docker/docker-compose.dev.yml | 2 +-
docker/docker-compose.yml | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/docker/.env.example b/docker/.env.example
index 6d7d8b6ce..201b60e8c 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -40,7 +40,7 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# FLOWER_PORT=5555
# Frontend URL used by backend for CORS and OAuth redirects.
-# Auto-derived from FRONTEND_PORT for localhost. Set explicitly for reverse proxy.
+# Auto-derived from FRONTEND_PORT for localhost. (Set explicitly for reverse proxy)
# NEXT_FRONTEND_URL=http://localhost:3000
# Backend URL for OAuth callback redirects (set when behind a reverse proxy)
diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml
index 12c1ba53c..591a83ee0 100644
--- a/docker/docker-compose.dev.yml
+++ b/docker/docker-compose.dev.yml
@@ -148,7 +148,7 @@ services:
# - celery_worker
electric:
- image: electricsql/electric:latest
+ image: electricsql/electric:1.4.6
ports:
- "${ELECTRIC_PORT:-5133}:3000"
# depends_on:
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index eacfc0806..7f983ef9c 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -132,7 +132,7 @@ services:
# restart: unless-stopped
electric:
- image: electricsql/electric:latest
+ image: electricsql/electric:1.4.6
ports:
- "${ELECTRIC_PORT:-5133}:3000"
environment:
From 4ce883b615c30d4f40c90f4862ba0aba2a348e44 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 03:49:32 +0530
Subject: [PATCH 12/57] chore: update Docker images to use repo for testing
---
docker/docker-compose.yml | 10 +++++-----
docker/scripts/install.sh | 2 +-
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index 7f983ef9c..7436796ce 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -43,7 +43,7 @@ services:
retries: 5
backend:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
ports:
- "${BACKEND_PORT:-8000}:8000"
volumes:
@@ -71,7 +71,7 @@ services:
restart: unless-stopped
celery_worker:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
volumes:
- shared_temp:/shared_tmp
env_file:
@@ -96,7 +96,7 @@ services:
restart: unless-stopped
celery_beat:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
env_file:
- .env
environment:
@@ -116,7 +116,7 @@ services:
restart: unless-stopped
# flower:
- # image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ # image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
# ports:
# - "${FLOWER_PORT:-5555}:5555"
# env_file:
@@ -150,7 +150,7 @@ services:
retries: 5
frontend:
- image: ghcr.io/modsetter/surfsense-web:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-web:${SURFSENSE_VERSION:-latest}
ports:
- "${FRONTEND_PORT:-3000}:3000"
environment:
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 21d214de3..d32aaba9a 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -6,7 +6,7 @@
set -euo pipefail
-REPO_RAW="https://raw.githubusercontent.com/MODSetter/SurfSense/main"
+REPO_RAW="https://raw.githubusercontent.com/AnishSarkar22/SurfSense/fix/docker"
INSTALL_DIR="./surfsense"
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
From bbbde07db7f6a37830f4de8f32c69b7374d7ab0f Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 03:58:06 +0530
Subject: [PATCH 13/57] chore: set specific version for testing
---
docker/.env.example | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docker/.env.example b/docker/.env.example
index 201b60e8c..a79b51442 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -6,7 +6,7 @@
# ==============================================================================
# SurfSense version (pin to a specific version like "0.0.13.1" or use "latest")
-SURFSENSE_VERSION=latest
+SURFSENSE_VERSION=0.0.13.6
# ------------------------------------------------------------------------------
# Core Settings
From 85eabff246cc59c7ea8bd2e42f3e200234397e93 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 04:45:59 +0530
Subject: [PATCH 14/57] chore: update Docker configuration to use new
repository and set SURFSENSE_VERSION to latest
---
docker/.env.example | 2 +-
docker/docker-compose.yml | 10 +++++-----
docker/scripts/install.sh | 28 +++++++++++++++++++---------
3 files changed, 25 insertions(+), 15 deletions(-)
diff --git a/docker/.env.example b/docker/.env.example
index a79b51442..201b60e8c 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -6,7 +6,7 @@
# ==============================================================================
# SurfSense version (pin to a specific version like "0.0.13.1" or use "latest")
-SURFSENSE_VERSION=0.0.13.6
+SURFSENSE_VERSION=latest
# ------------------------------------------------------------------------------
# Core Settings
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index 7436796ce..7f983ef9c 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -43,7 +43,7 @@ services:
retries: 5
backend:
- image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
ports:
- "${BACKEND_PORT:-8000}:8000"
volumes:
@@ -71,7 +71,7 @@ services:
restart: unless-stopped
celery_worker:
- image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
volumes:
- shared_temp:/shared_tmp
env_file:
@@ -96,7 +96,7 @@ services:
restart: unless-stopped
celery_beat:
- image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
env_file:
- .env
environment:
@@ -116,7 +116,7 @@ services:
restart: unless-stopped
# flower:
- # image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ # image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
# ports:
# - "${FLOWER_PORT:-5555}:5555"
# env_file:
@@ -150,7 +150,7 @@ services:
retries: 5
frontend:
- image: ghcr.io/anishsarkar22/surfsense-web:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-web:${SURFSENSE_VERSION:-latest}
ports:
- "${FRONTEND_PORT:-3000}:3000"
environment:
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index d32aaba9a..7a12c591a 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -6,14 +6,14 @@
set -euo pipefail
-REPO_RAW="https://raw.githubusercontent.com/AnishSarkar22/SurfSense/fix/docker"
+REPO_RAW="https://raw.githubusercontent.com/MODSetter/SurfSense/main"
INSTALL_DIR="./surfsense"
-GREEN='\033[0;32m'
+CYAN='\033[1;36m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
-info() { printf "${GREEN}[SurfSense]${NC} %s\n" "$1"; }
+info() { printf "${CYAN}[SurfSense]${NC} %s\n" "$1"; }
warn() { printf "${YELLOW}[SurfSense]${NC} %s\n" "$1"; }
error() { printf "${RED}[SurfSense]${NC} %s\n" "$1" >&2; exit 1; }
@@ -74,10 +74,21 @@ cd "${INSTALL_DIR}"
${DC} up -d
echo ""
-info "=========================================="
-info " SurfSense is starting up!"
-info "=========================================="
-info ""
+printf '\033[1;37m'
+cat << 'EOF'
+
+ .d8888b. .d888 .d8888b.
+d88P Y88b d88P" d88P Y88b
+Y88b. 888 Y88b.
+ "Y888b. 888 888 888d888 888888 "Y888b. .d88b. 88888b. .d8888b .d88b.
+ "Y88b. 888 888 888P" 888 "Y88b. d8P Y8b 888 "88b 88K d8P Y8b
+ "888 888 888 888 888 "888 88888888 888 888 "Y8888b. 88888888
+Y88b d88P Y88b 888 888 888 Y88b d88P Y8b. 888 888 X88 Y8b.
+ "Y8888P" "Y88888 888 888 "Y8888P" "Y8888 888 888 88888P' "Y8888
+
+EOF
+printf " Your personal AI-powered search engine ${YELLOW}v${SURFSENSE_VERSION:-latest}${NC}\n"
+printf "${CYAN}══════════════════════════════════════════════════════════════${NC}\n\n"
info " Frontend: http://localhost:3000"
info " Backend: http://localhost:8000"
info " API Docs: http://localhost:8000/docs"
@@ -87,6 +98,5 @@ info " Logs: cd ${INSTALL_DIR} && ${DC} logs -f"
info " Stop: cd ${INSTALL_DIR} && ${DC} down"
info " Update: cd ${INSTALL_DIR} && ${DC} pull && ${DC} up -d"
info ""
-warn " First startup may take a few minutes while images are pulled."
+warn " First startup may take sometime."
warn " Edit .env to configure OAuth connectors, API keys, etc."
-info "=========================================="
From 176dfdaeed8cf15400e6c6ba6b8ca4bfd26d773a Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 04:59:53 +0530
Subject: [PATCH 15/57] chore: enhance .env.example with restructuring
configuration options for ports and reverse proxy settings
---
docker/.env.example | 49 ++++++++++++++++++++++-----------------------
1 file changed, 24 insertions(+), 25 deletions(-)
diff --git a/docker/.env.example b/docker/.env.example
index 201b60e8c..58549acba 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -15,6 +15,30 @@ SURFSENSE_VERSION=latest
# REQUIRED: Generate a secret key with: openssl rand -base64 32
SECRET_KEY=replace_me_with_a_random_string
+
+# ------------------------------------------------------------------------------
+# Ports (change to avoid conflicts with other services on your machine)
+# ------------------------------------------------------------------------------
+
+# BACKEND_PORT=8000
+# FRONTEND_PORT=3000
+# ELECTRIC_PORT=5133
+# FLOWER_PORT=5555
+
+# ------------------------------------------------------------------------------
+# Custom Domain / Reverse Proxy
+# ------------------------------------------------------------------------------
+# ONLY set these if you are serving SurfSense on a real domain via a reverse
+# proxy (e.g. Caddy, Nginx, Cloudflare Tunnel).
+# For standard localhost deployments, leave all of these commented out —
+# they are automatically derived from the port settings above.
+#
+# NEXT_FRONTEND_URL=https://app.yourdomain.com
+# BACKEND_URL=https://api.yourdomain.com
+# NEXT_PUBLIC_FASTAPI_BACKEND_URL=https://api.yourdomain.com
+# NEXT_PUBLIC_ELECTRIC_URL=https://electric.yourdomain.com
+
+
# Auth type: LOCAL (email/password) or GOOGLE (OAuth)
AUTH_TYPE=LOCAL
@@ -30,31 +54,6 @@ ETL_SERVICE=DOCLING
# Cohere: cohere://embed-english-light-v3.0 (set COHERE_API_KEY below)
EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
-# ------------------------------------------------------------------------------
-# Ports (change these to avoid host conflicts — everything auto-derives)
-# ------------------------------------------------------------------------------
-
-# BACKEND_PORT=8000
-# FRONTEND_PORT=3000
-# ELECTRIC_PORT=5133
-# FLOWER_PORT=5555
-
-# Frontend URL used by backend for CORS and OAuth redirects.
-# Auto-derived from FRONTEND_PORT for localhost. (Set explicitly for reverse proxy)
-# NEXT_FRONTEND_URL=http://localhost:3000
-
-# Backend URL for OAuth callback redirects (set when behind a reverse proxy)
-# BACKEND_URL=https://api.yourdomain.com
-
-# ------------------------------------------------------------------------------
-# Frontend URL Overrides (reverse proxy / custom domains)
-# ------------------------------------------------------------------------------
-# These are auto-derived from the port settings above for localhost deployments.
-# You only need to set these explicitly when using a reverse proxy with real
-# domains (e.g. Caddy, Nginx, Cloudflare Tunnel).
-#
-# NEXT_PUBLIC_FASTAPI_BACKEND_URL=https://api.yourdomain.com
-# NEXT_PUBLIC_ELECTRIC_URL=https://electric.yourdomain.com
# ------------------------------------------------------------------------------
# Database (defaults work out of the box, change for security)
From 95c41565f1b47095bb52adfc2f3e7bf298bf0c28 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 05:09:56 +0530
Subject: [PATCH 16/57] chore: update .env.example
---
docker/.env.example | 30 ++++++++++++++----------------
1 file changed, 14 insertions(+), 16 deletions(-)
diff --git a/docker/.env.example b/docker/.env.example
index 58549acba..f2e9dda0a 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -15,6 +15,20 @@ SURFSENSE_VERSION=latest
# REQUIRED: Generate a secret key with: openssl rand -base64 32
SECRET_KEY=replace_me_with_a_random_string
+# Auth type: LOCAL (email/password) or GOOGLE (OAuth)
+AUTH_TYPE=LOCAL
+
+# Allow new user registrations (TRUE or FALSE)
+# REGISTRATION_ENABLED=TRUE
+
+# Document parsing service: DOCLING, UNSTRUCTURED, or LLAMACLOUD
+ETL_SERVICE=DOCLING
+
+# Embedding model for vector search
+# Local: sentence-transformers/all-MiniLM-L6-v2
+# OpenAI: openai://text-embedding-ada-002 (set OPENAI_API_KEY below)
+# Cohere: cohere://embed-english-light-v3.0 (set COHERE_API_KEY below)
+EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# ------------------------------------------------------------------------------
# Ports (change to avoid conflicts with other services on your machine)
@@ -39,22 +53,6 @@ SECRET_KEY=replace_me_with_a_random_string
# NEXT_PUBLIC_ELECTRIC_URL=https://electric.yourdomain.com
-# Auth type: LOCAL (email/password) or GOOGLE (OAuth)
-AUTH_TYPE=LOCAL
-
-# Allow new user registrations (TRUE or FALSE)
-# REGISTRATION_ENABLED=TRUE
-
-# Document parsing service: DOCLING, UNSTRUCTURED, or LLAMACLOUD
-ETL_SERVICE=DOCLING
-
-# Embedding model for vector search
-# Local: sentence-transformers/all-MiniLM-L6-v2
-# OpenAI: openai://text-embedding-ada-002 (set OPENAI_API_KEY below)
-# Cohere: cohere://embed-english-light-v3.0 (set COHERE_API_KEY below)
-EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
-
-
# ------------------------------------------------------------------------------
# Database (defaults work out of the box, change for security)
# ------------------------------------------------------------------------------
From 9ae589b6baed1b6b5e212a714e9ed5d49ef4f027 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 05:26:38 +0530
Subject: [PATCH 17/57] chore: update documentation for Electric SQL setup and
enhance .env.example with connection instructions
---
docker/.env.example | 4 +-
.../content/docs/how-to/electric-sql.mdx | 159 ++++++------------
2 files changed, 57 insertions(+), 106 deletions(-)
diff --git a/docker/.env.example b/docker/.env.example
index f2e9dda0a..438e97941 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -88,7 +88,9 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# ELECTRIC_DB_USER=electric
# ELECTRIC_DB_PASSWORD=electric_password
-# Full override for pointing Electric at an external database:
+# Full override for the Electric → Postgres connection URL.
+# Leave commented out to use the Docker-managed `db` container (default).
+# Uncomment and set `db` to `host.docker.internal` when pointing Electric at a local Postgres instance (e.g. Postgres.app on macOS):
# ELECTRIC_DATABASE_URL=postgresql://electric:electric_password@db:5432/surfsense?sslmode=disable
# ------------------------------------------------------------------------------
diff --git a/surfsense_web/content/docs/how-to/electric-sql.mdx b/surfsense_web/content/docs/how-to/electric-sql.mdx
index 288745850..fb2cf941a 100644
--- a/surfsense_web/content/docs/how-to/electric-sql.mdx
+++ b/surfsense_web/content/docs/how-to/electric-sql.mdx
@@ -3,8 +3,6 @@ title: Electric SQL
description: Setting up Electric SQL for real-time data synchronization in SurfSense
---
-# Electric SQL
-
[Electric SQL](https://electric-sql.com/) enables real-time data synchronization in SurfSense, providing instant updates for inbox items, document indexing status, and connector sync progress without manual refresh. The frontend uses [PGlite](https://pglite.dev/) (a lightweight PostgreSQL in the browser) to maintain a local database that syncs with the backend via Electric SQL.
## What Does Electric SQL Do?
@@ -25,74 +23,29 @@ This means:
## Docker Setup
-### All-in-One Quickstart
-
-The simplest way to run SurfSense with Electric SQL is using the all-in-one Docker image. This bundles everything into a single container:
-
-- PostgreSQL + pgvector (vector database)
-- Redis (task queue)
-- Electric SQL (real-time sync)
-- Backend API
-- Frontend
+The `docker-compose.yml` includes the Electric SQL service. It is pre-configured to connect to the Docker-managed `db` container out of the box.
```bash
-docker run -d \
- -p 3000:3000 \
- -p 8000:8000 \
- -p 5133:5133 \
- -v surfsense-data:/data \
- --name surfsense \
- ghcr.io/modsetter/surfsense:latest
+docker compose up -d
```
-**With custom Electric SQL credentials:**
-
-```bash
-docker run -d \
- -p 3000:3000 \
- -p 8000:8000 \
- -p 5133:5133 \
- -v surfsense-data:/data \
- -e ELECTRIC_DB_USER=your_electric_user \
- -e ELECTRIC_DB_PASSWORD=your_electric_password \
- --name surfsense \
- ghcr.io/modsetter/surfsense:latest
-```
-
-Access SurfSense at `http://localhost:3000`. Electric SQL is automatically configured and running on port 5133.
-
-### Docker Compose
-
-For more control over individual services, use Docker Compose.
-
-**Quickstart (all-in-one image):**
-
-```bash
-docker compose -f docker-compose.quickstart.yml up -d
-```
-
-**Standard setup (separate services):**
-
-The `docker-compose.yml` includes the Electric SQL service configuration:
+The Electric SQL service configuration in `docker-compose.yml`:
```yaml
electric:
- image: electricsql/electric:latest
+ image: electricsql/electric:1.4.6
ports:
- "${ELECTRIC_PORT:-5133}:3000"
environment:
- - DATABASE_URL=${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@${POSTGRES_HOST:-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-surfsense}?sslmode=disable}
- - ELECTRIC_INSECURE=true
- - ELECTRIC_WRITE_TO_PG_MODE=direct
- restart: unless-stopped
- healthcheck:
- test: ["CMD", "curl", "-f", "http://localhost:3000/v1/health"]
- interval: 10s
- timeout: 5s
- retries: 5
+ DATABASE_URL: ${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}?sslmode=${DB_SSLMODE:-disable}}
+ ELECTRIC_INSECURE: "true"
+ ELECTRIC_WRITE_TO_PG_MODE: direct
+ depends_on:
+ db:
+ condition: service_healthy
```
-No additional configuration is required - Electric SQL is pre-configured to work with the Docker PostgreSQL instance.
+No additional configuration is required — Electric SQL is pre-configured to work with the Docker PostgreSQL instance.
## Manual Setup
@@ -102,19 +55,16 @@ Follow the steps below based on your PostgreSQL setup.
Ensure your environment files are configured. If you haven't set up SurfSense yet, follow the [Manual Installation Guide](/docs/manual-installation) first.
-For Electric SQL, verify these variables are set:
-
-**Root `.env`:**
+For Electric SQL, verify these variables are set in `docker/.env`:
```bash
ELECTRIC_PORT=5133
-POSTGRES_HOST=host.docker.internal # Use 'db' for Docker PostgreSQL instance
ELECTRIC_DB_USER=electric
ELECTRIC_DB_PASSWORD=electric_password
NEXT_PUBLIC_ELECTRIC_URL=http://localhost:5133
```
-**Frontend `.env` (`surfsense_web/.env`):**
+**Frontend (`surfsense_web/.env`):**
```bash
NEXT_PUBLIC_ELECTRIC_URL=http://localhost:5133
@@ -125,32 +75,17 @@ NEXT_PUBLIC_ELECTRIC_AUTH_MODE=insecure
### Option A: Using Docker PostgreSQL
-If you're using the Docker-managed PostgreSQL instance, follow these steps:
-
-**1. Update environment variable:**
-
-In your root `.env` file, set:
+If you're using the Docker-managed PostgreSQL instance, no extra configuration is needed. Just start the services:
```bash
-POSTGRES_HOST=db
+docker compose up -d db electric
```
-**2. Start PostgreSQL and Electric SQL:**
-
-```bash
-docker-compose up -d db electric
-```
-
-**3. Run database migration:**
+Then run the database migration and start the backend:
```bash
cd surfsense_backend
uv run alembic upgrade head
-```
-
-**4. Start the backend:**
-
-```bash
uv run main.py
```
@@ -160,17 +95,17 @@ Electric SQL is now configured and connected to your Docker PostgreSQL database.
### Option B: Using Local PostgreSQL
-If you're using a local PostgreSQL installation, follow these steps:
+If you're using a local PostgreSQL installation (e.g. Postgres.app on macOS), follow these steps:
**1. Enable logical replication in PostgreSQL:**
-Open your `postgresql.conf` file using vim (or your preferred editor):
+Open your `postgresql.conf` file:
```bash
# Common locations:
-# macOS (Homebrew): /opt/homebrew/var/postgresql@15/postgresql.conf
-# Linux: /etc/postgresql/15/main/postgresql.conf
-# Windows: C:\Program Files\PostgreSQL\15\data\postgresql.conf
+# macOS (Postgres.app): ~/Library/Application Support/Postgres/var-17/postgresql.conf
+# macOS (Homebrew): /opt/homebrew/var/postgresql@17/postgresql.conf
+# Linux: /etc/postgresql/17/main/postgresql.conf
sudo vim /path/to/postgresql.conf
```
@@ -178,38 +113,51 @@ sudo vim /path/to/postgresql.conf
Add the following settings:
```ini
-# Enable logical replication (required for Electric SQL)
+# Required for Electric SQL
wal_level = logical
max_replication_slots = 10
max_wal_senders = 10
```
-After saving the changes (`:wq` in vim), restart your PostgreSQL server for the configuration to take effect.
+After saving, restart PostgreSQL for the settings to take effect.
-**2. Update environment variable:**
+**2. Create the Electric replication user:**
-In your root `.env` file, set:
+Connect to your local database as a superuser and run:
-```bash
-POSTGRES_HOST=host.docker.internal
+```sql
+CREATE USER electric WITH REPLICATION PASSWORD 'electric_password';
+GRANT CONNECT ON DATABASE surfsense TO electric;
+GRANT CREATE ON DATABASE surfsense TO electric;
+GRANT USAGE ON SCHEMA public TO electric;
+GRANT SELECT ON ALL TABLES IN SCHEMA public TO electric;
+GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO electric;
+ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO electric;
+ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON SEQUENCES TO electric;
+CREATE PUBLICATION electric_publication_default;
```
-**3. Start Electric SQL:**
+**3. Set `ELECTRIC_DATABASE_URL` in `docker/.env`:**
+
+Uncomment and update this line to point Electric at your local Postgres via `host.docker.internal` (the hostname Docker containers use to reach the host machine):
```bash
-docker-compose up -d electric
+ELECTRIC_DATABASE_URL=postgresql://electric:electric_password@host.docker.internal:5432/surfsense?sslmode=disable
```
-**4. Run database migration:**
+**4. Start Electric SQL only (skip the Docker `db` container):**
+
+```bash
+docker compose up -d --no-deps electric
+```
+
+The `--no-deps` flag starts only the `electric` service without starting the Docker-managed `db` container.
+
+**5. Run database migration and start the backend:**
```bash
cd surfsense_backend
uv run alembic upgrade head
-```
-
-**5. Start the backend:**
-
-```bash
uv run main.py
```
@@ -219,12 +167,13 @@ Electric SQL is now configured and connected to your local PostgreSQL database.
| Variable | Location | Description | Default |
|----------|----------|-------------|---------|
-| `ELECTRIC_PORT` | Root `.env` | Port to expose Electric SQL | `5133` |
-| `POSTGRES_HOST` | Root `.env` | PostgreSQL host (`db` for Docker, `host.docker.internal` for local) | `host.docker.internal` |
-| `ELECTRIC_DB_USER` | Root `.env` | Database user for Electric | `electric` |
-| `ELECTRIC_DB_PASSWORD` | Root `.env` | Database password for Electric | `electric_password` |
+| `ELECTRIC_PORT` | `docker/.env` | Port to expose Electric SQL | `5133` |
+| `ELECTRIC_DB_USER` | `docker/.env` | Database user for Electric replication | `electric` |
+| `ELECTRIC_DB_PASSWORD` | `docker/.env` | Database password for Electric replication | `electric_password` |
+| `ELECTRIC_DATABASE_URL` | `docker/.env` | Full connection URL override for Electric. Set to use `host.docker.internal` when pointing at a local Postgres instance | *(built from above defaults)* |
| `NEXT_PUBLIC_ELECTRIC_URL` | Frontend `.env` | Electric SQL server URL (PGlite connects to this) | `http://localhost:5133` |
| `NEXT_PUBLIC_ELECTRIC_AUTH_MODE` | Frontend `.env` | Authentication mode (`insecure` for dev, `secure` for production) | `insecure` |
+
## Verify Setup
To verify Electric SQL is running correctly:
@@ -262,7 +211,7 @@ You should receive:
### Data Not Syncing
-- Check Electric SQL logs: `docker logs electric`
+- Check Electric SQL logs: `docker compose logs electric`
- Verify PostgreSQL replication is working
- Ensure the Electric user has proper table permissions
From f311a34bf3a46c4839234d54a06c305b47ec74ad Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 05:38:15 +0530
Subject: [PATCH 18/57] chore: update README and installation documentation to
streamline Docker setup and clarify update instructions
---
README.md | 19 +-
.../content/docs/docker-installation.mdx | 648 +++++-------------
2 files changed, 193 insertions(+), 474 deletions(-)
diff --git a/README.md b/README.md
index 9056c27f2..c839e9c99 100644
--- a/README.md
+++ b/README.md
@@ -81,21 +81,26 @@ https://github.com/user-attachments/assets/a0a16566-6967-4374-ac51-9b3e07fbecd7
Run SurfSense on your own infrastructure for full data control and privacy.
-**Quick Start (Docker one-liner):**
-
```bash
-docker run -d -p 3000:3000 -p 8000:8000 -p 5133:5133 -v surfsense-data:/data --name surfsense --restart unless-stopped ghcr.io/modsetter/surfsense:latest
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
-After starting, open [http://localhost:3000](http://localhost:3000) in your browser.
+For Docker Compose and other deployment options, see the [Docker Installation docs](https://www.surfsense.com/docs/docker-installation).
-**Update (Automatic updates with Watchtower):**
+**Update (recommended — Watchtower):**
```bash
-docker run --rm -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --run-once surfsense
+docker run --rm -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --run-once --label-filter "com.docker.compose.project=surfsense"
```
-For Docker Compose, manual installation, and other deployment options, check the [docs](https://www.surfsense.com/docs/).
+**Update (manual):**
+
+```bash
+cd surfsense # or SurfSense/docker if you used Option 2
+docker compose pull && docker compose up -d
+```
+
+For manual installation and other deployment options, check the [docs](https://www.surfsense.com/docs/).
### How to Realtime Collaborate (Beta)
diff --git a/surfsense_web/content/docs/docker-installation.mdx b/surfsense_web/content/docs/docker-installation.mdx
index 767240206..4ca525d7c 100644
--- a/surfsense_web/content/docs/docker-installation.mdx
+++ b/surfsense_web/content/docs/docker-installation.mdx
@@ -3,511 +3,225 @@ title: Docker Installation
description: Setting up SurfSense using Docker
---
+This guide explains how to run SurfSense using Docker, with options ranging from a single-command install to a fully manual setup.
-This guide explains how to run SurfSense using Docker, with options ranging from quick single-command deployment to full production setups.
+## Quick Start
-## Quick Start with Docker 🐳
+### Option 1 — Install Script (recommended)
-Get SurfSense running in seconds with a single command:
-
-
-The all-in-one Docker image bundles PostgreSQL (with pgvector), Redis, and all SurfSense services. Perfect for quick evaluation and development.
-
-
-
-Make sure to include the `-v surfsense-data:/data` in your Docker command. This ensures your database and files are properly persisted.
-
-
-### One-Line Installation
-
-**Linux/macOS:**
+Downloads the compose files, generates a `SECRET_KEY`, and starts all services automatically:
```bash
-docker run -d -p 3000:3000 -p 8000:8000 -p 5133:5133 \
- -v surfsense-data:/data \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
-**Windows (PowerShell):**
+This creates a `./surfsense/` directory with `docker-compose.yml` and `.env`, then runs `docker compose up -d`.
-```powershell
-docker run -d -p 3000:3000 -p 8000:8000 -p 5133:5133 `
- -v surfsense-data:/data `
- --name surfsense `
- --restart unless-stopped `
- ghcr.io/modsetter/surfsense:latest
-```
-
-> **Note:** A secure `SECRET_KEY` is automatically generated and persisted in the data volume on first run.
-
-### With Custom Configuration
-
-You can pass any [environment variable](/docs/manual-installation#backend-environment-variables) using `-e` flags:
+### Option 2 — Manual Docker Compose
```bash
-docker run -d -p 3000:3000 -p 8000:8000 -p 5133:5133 \
- -v surfsense-data:/data \
- -e EMBEDDING_MODEL=openai://text-embedding-ada-002 \
- -e OPENAI_API_KEY=your_openai_api_key \
- -e AUTH_TYPE=GOOGLE \
- -e GOOGLE_OAUTH_CLIENT_ID=your_google_client_id \
- -e GOOGLE_OAUTH_CLIENT_SECRET=your_google_client_secret \
- -e ETL_SERVICE=LLAMACLOUD \
- -e LLAMA_CLOUD_API_KEY=your_llama_cloud_key \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
-```
-
-
-- For Google OAuth, create credentials in the [Google Cloud Console](https://console.cloud.google.com/apis/credentials)
-- For Airtable connector, create an OAuth integration in the [Airtable Developer Hub](https://airtable.com/create/oauth)
-- If deploying behind a reverse proxy with HTTPS, add `-e BACKEND_URL=https://api.yourdomain.com`
-
-
-### Quick Start with Docker Compose
-
-For easier management with environment files:
-
-```bash
-# Download the quick start compose file
-curl -o docker-compose.yml https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker-compose.quickstart.yml
-
-# Create .env file (optional - for custom configuration)
-cat > .env << EOF
-# EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
-# ETL_SERVICE=DOCLING
-# SECRET_KEY=your_custom_secret_key # Auto-generated if not set
-EOF
-
-# Start SurfSense
+git clone https://github.com/MODSetter/SurfSense.git
+cd SurfSense/docker
+cp .env.example .env
+# Edit .env — at minimum set SECRET_KEY
docker compose up -d
```
After starting, access SurfSense at:
+
- **Frontend**: [http://localhost:3000](http://localhost:3000)
- **Backend API**: [http://localhost:8000](http://localhost:8000)
- **API Docs**: [http://localhost:8000/docs](http://localhost:8000/docs)
-- **Electric-SQL**: [http://localhost:5133](http://localhost:5133)
+- **Electric SQL**: [http://localhost:5133](http://localhost:5133)
-### Quick Start Environment Variables
+---
+
+## Configuration
+
+All configuration lives in a single `docker/.env` file (or `surfsense/.env` if you used the install script). Copy `.env.example` to `.env` and edit the values you need.
+
+### Required
+
+| Variable | Description |
+|----------|-------------|
+| `SECRET_KEY` | JWT secret key. Generate with: `openssl rand -base64 32`. Auto-generated by the install script. |
+
+### Core Settings
| Variable | Description | Default |
|----------|-------------|---------|
-| SECRET_KEY | JWT secret key (auto-generated if not set) | Auto-generated |
-| AUTH_TYPE | Authentication: `LOCAL` or `GOOGLE` | LOCAL |
-| EMBEDDING_MODEL | Model for embeddings | sentence-transformers/all-MiniLM-L6-v2 |
-| ETL_SERVICE | Document parser: `DOCLING`, `UNSTRUCTURED`, `LLAMACLOUD` | DOCLING |
-| TTS_SERVICE | Text-to-speech for podcasts | local/kokoro |
-| STT_SERVICE | Speech-to-text for audio (model size: tiny, base, small, medium, large) | local/base |
-| REGISTRATION_ENABLED | Allow new user registration | TRUE |
+| `SURFSENSE_VERSION` | Image tag to deploy. Pin to a version (e.g. `0.0.13.1`) or use `latest` | `latest` |
+| `AUTH_TYPE` | Authentication method: `LOCAL` (email/password) or `GOOGLE` (OAuth) | `LOCAL` |
+| `ETL_SERVICE` | Document parsing: `DOCLING` (local), `UNSTRUCTURED`, or `LLAMACLOUD` | `DOCLING` |
+| `EMBEDDING_MODEL` | Embedding model for vector search | `sentence-transformers/all-MiniLM-L6-v2` |
+| `TTS_SERVICE` | Text-to-speech provider for podcasts | `local/kokoro` |
+| `STT_SERVICE` | Speech-to-text provider for audio files | `local/base` |
+| `REGISTRATION_ENABLED` | Allow new user registrations | `TRUE` |
-### Useful Commands
+### Ports
+
+| Variable | Description | Default |
+|----------|-------------|---------|
+| `FRONTEND_PORT` | Frontend service port | `3000` |
+| `BACKEND_PORT` | Backend API service port | `8000` |
+| `ELECTRIC_PORT` | Electric SQL service port | `5133` |
+
+### Custom Domain / Reverse Proxy
+
+Only set these if serving SurfSense on a real domain via a reverse proxy (Caddy, Nginx, Cloudflare Tunnel, etc.). Leave commented out for standard localhost deployments.
+
+| Variable | Description |
+|----------|-------------|
+| `NEXT_FRONTEND_URL` | Public frontend URL (e.g. `https://app.yourdomain.com`) |
+| `BACKEND_URL` | Public backend URL for OAuth callbacks (e.g. `https://api.yourdomain.com`) |
+| `NEXT_PUBLIC_FASTAPI_BACKEND_URL` | Backend URL used by the frontend (e.g. `https://api.yourdomain.com`) |
+| `NEXT_PUBLIC_ELECTRIC_URL` | Electric SQL URL used by the frontend (e.g. `https://electric.yourdomain.com`) |
+
+### Database
+
+Defaults work out of the box. Change for security in production.
+
+| Variable | Description | Default |
+|----------|-------------|---------|
+| `DB_USER` | PostgreSQL username | `surfsense` |
+| `DB_PASSWORD` | PostgreSQL password | `surfsense` |
+| `DB_NAME` | PostgreSQL database name | `surfsense` |
+| `DB_HOST` | PostgreSQL host | `db` |
+| `DB_PORT` | PostgreSQL port | `5432` |
+| `DB_SSLMODE` | SSL mode: `disable`, `require`, `verify-ca`, `verify-full` | `disable` |
+| `DATABASE_URL` | Full connection URL override. Use for managed databases (RDS, Supabase, etc.) | *(built from above)* |
+
+### Electric SQL
+
+| Variable | Description | Default |
+|----------|-------------|---------|
+| `ELECTRIC_DB_USER` | Replication user for Electric SQL | `electric` |
+| `ELECTRIC_DB_PASSWORD` | Replication password for Electric SQL | `electric_password` |
+| `ELECTRIC_DATABASE_URL` | Full connection URL override for Electric. Set to `host.docker.internal` when pointing at a local Postgres instance | *(built from above)* |
+
+### Authentication
+
+| Variable | Description |
+|----------|-------------|
+| `GOOGLE_OAUTH_CLIENT_ID` | Google OAuth client ID (required if `AUTH_TYPE=GOOGLE`) |
+| `GOOGLE_OAUTH_CLIENT_SECRET` | Google OAuth client secret (required if `AUTH_TYPE=GOOGLE`) |
+
+Create credentials at the [Google Cloud Console](https://console.cloud.google.com/apis/credentials).
+
+### External API Keys
+
+| Variable | Description |
+|----------|-------------|
+| `FIRECRAWL_API_KEY` | Firecrawl API key for web crawling |
+| `UNSTRUCTURED_API_KEY` | Unstructured.io API key (required if `ETL_SERVICE=UNSTRUCTURED`) |
+| `LLAMA_CLOUD_API_KEY` | LlamaCloud API key (required if `ETL_SERVICE=LLAMACLOUD`) |
+
+### Connector OAuth Keys
+
+Uncomment the connectors you want to use. Redirect URIs follow the pattern `http://localhost:8000/api/v1/auth//connector/callback`.
+
+| Connector | Variables |
+|-----------|-----------|
+| Google Drive / Gmail / Calendar | `GOOGLE_DRIVE_REDIRECT_URI`, `GOOGLE_GMAIL_REDIRECT_URI`, `GOOGLE_CALENDAR_REDIRECT_URI` |
+| Notion | `NOTION_CLIENT_ID`, `NOTION_CLIENT_SECRET`, `NOTION_REDIRECT_URI` |
+| Slack | `SLACK_CLIENT_ID`, `SLACK_CLIENT_SECRET`, `SLACK_REDIRECT_URI` |
+| Discord | `DISCORD_CLIENT_ID`, `DISCORD_CLIENT_SECRET`, `DISCORD_BOT_TOKEN`, `DISCORD_REDIRECT_URI` |
+| Jira & Confluence | `ATLASSIAN_CLIENT_ID`, `ATLASSIAN_CLIENT_SECRET`, `JIRA_REDIRECT_URI`, `CONFLUENCE_REDIRECT_URI` |
+| Linear | `LINEAR_CLIENT_ID`, `LINEAR_CLIENT_SECRET`, `LINEAR_REDIRECT_URI` |
+| ClickUp | `CLICKUP_CLIENT_ID`, `CLICKUP_CLIENT_SECRET`, `CLICKUP_REDIRECT_URI` |
+| Airtable | `AIRTABLE_CLIENT_ID`, `AIRTABLE_CLIENT_SECRET`, `AIRTABLE_REDIRECT_URI` |
+| Microsoft Teams | `TEAMS_CLIENT_ID`, `TEAMS_CLIENT_SECRET`, `TEAMS_REDIRECT_URI` |
+
+For Airtable, create an OAuth integration at the [Airtable Developer Hub](https://airtable.com/create/oauth).
+
+### Observability (optional)
+
+| Variable | Description |
+|----------|-------------|
+| `LANGSMITH_TRACING` | Enable LangSmith tracing (`true` / `false`) |
+| `LANGSMITH_ENDPOINT` | LangSmith API endpoint |
+| `LANGSMITH_API_KEY` | LangSmith API key |
+| `LANGSMITH_PROJECT` | LangSmith project name |
+
+### Advanced (optional)
+
+| Variable | Description | Default |
+|----------|-------------|---------|
+| `SCHEDULE_CHECKER_INTERVAL` | How often to check for scheduled connector tasks (e.g. `5m`, `1h`) | `5m` |
+| `RERANKERS_ENABLED` | Enable document reranking for improved search | `FALSE` |
+| `RERANKERS_MODEL_NAME` | Reranker model name (e.g. `ms-marco-MiniLM-L-12-v2`) | |
+| `RERANKERS_MODEL_TYPE` | Reranker model type (e.g. `flashrank`) | |
+| `PAGES_LIMIT` | Max pages per user for ETL services | unlimited |
+
+---
+
+## Docker Services
+
+| Service | Description |
+|---------|-------------|
+| `db` | PostgreSQL with pgvector extension |
+| `redis` | Message broker for Celery |
+| `backend` | FastAPI application server |
+| `celery_worker` | Background task processing (document indexing, etc.) |
+| `celery_beat` | Periodic task scheduler (connector sync) |
+| `electric` | Electric SQL — real-time sync for the frontend |
+| `frontend` | Next.js web application |
+
+All services start automatically with `docker compose up -d`.
+
+---
+
+## Updating
+
+**Option 1 — Watchtower (recommended):**
```bash
-# View logs
-docker logs -f surfsense
-
-# Stop SurfSense
-docker stop surfsense
-
-# Start SurfSense
-docker start surfsense
-
-# Remove container (data preserved in volume)
-docker rm surfsense
-
-# Remove container AND data
-docker rm surfsense && docker volume rm surfsense-data
-```
-
-### Updating
-
-To update SurfSense to the latest version, you can use either of the following methods:
-
-
-Your data is safe! The `surfsense-data` volume persists across updates, and database migrations are applied automatically on every startup.
-
-
-**Option 1: Using Watchtower (one-time auto-update)**
-
-[Watchtower](https://github.com/nicholas-fedor/watchtower) can automatically pull the latest image, stop the old container, and restart it with the same options:
-
-```bash
-docker run --rm \
- -v /var/run/docker.sock:/var/run/docker.sock \
- nickfedor/watchtower \
- --run-once surfsense
+docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \
+ nickfedor/watchtower --run-once \
+ --label-filter "com.docker.compose.project=surfsense"
```
-Use the `nickfedor/watchtower` fork. The original `containrrr/watchtower` is no longer maintained and may fail with newer Docker versions.
+Use `nickfedor/watchtower`. The original `containrrr/watchtower` is no longer maintained and may fail with newer Docker versions.
-**Option 2: Manual Update**
+**Option 2 — Manual:**
```bash
-# Stop and remove the current container
-docker rm -f surfsense
-
-# Pull the latest image
-docker pull ghcr.io/modsetter/surfsense:latest
-
-# Start with the new image
-docker run -d -p 3000:3000 -p 8000:8000 -p 5133:5133 \
- -v surfsense-data:/data \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+cd surfsense # or SurfSense/docker if you cloned manually
+docker compose pull && docker compose up -d
```
-If you used Docker Compose for the quick start, updating is simpler:
+Database migrations are applied automatically on every startup.
+
+---
+
+## Useful Commands
```bash
-docker compose -f docker-compose.quickstart.yml pull
-docker compose -f docker-compose.quickstart.yml up -d
+# View logs (all services)
+docker compose logs -f
+
+# View logs for a specific service
+docker compose logs -f backend
+docker compose logs -f electric
+
+# Stop all services
+docker compose down
+
+# Restart a specific service
+docker compose restart backend
+
+# Stop and remove all containers + volumes (destructive!)
+docker compose down -v
```
---
-## Full Docker Compose Setup (Production)
-
-For production deployments with separate services and more control, use the full Docker Compose setup below.
-
-## Prerequisites
-
-Before you begin, ensure you have:
-
-- [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) installed on your machine
-- [Git](https://git-scm.com/downloads) (to clone the repository)
-- Completed all the [prerequisite setup steps](/docs) including:
- - Auth setup
- - **File Processing ETL Service** (choose one):
- - Unstructured.io API key (Supports 34+ formats)
- - LlamaIndex API key (enhanced parsing, supports 50+ formats)
- - Docling (local processing, no API key required, supports PDF, Office docs, images, HTML, CSV)
- - Other required API keys
-
-## Installation Steps
-
-1. **Configure Environment Variables**
- Set up the necessary environment variables:
-
- **Linux/macOS:**
-
- ```bash
- # Copy example environment files
- cp surfsense_backend/.env.example surfsense_backend/.env
- cp surfsense_web/.env.example surfsense_web/.env
- cp .env.example .env # For Docker-specific settings
- ```
-
- **Windows (Command Prompt):**
-
- ```cmd
- copy surfsense_backend\.env.example surfsense_backend\.env
- copy surfsense_web\.env.example surfsense_web\.env
- copy .env.example .env
- ```
-
- **Windows (PowerShell):**
-
- ```powershell
- Copy-Item -Path surfsense_backend\.env.example -Destination surfsense_backend\.env
- Copy-Item -Path surfsense_web\.env.example -Destination surfsense_web\.env
- Copy-Item -Path .env.example -Destination .env
- ```
-
- Edit all `.env` files and fill in the required values:
-
-### Docker-Specific Environment Variables (Optional)
-
-| ENV VARIABLE | DESCRIPTION | DEFAULT VALUE |
-|----------------------------|-----------------------------------------------------------------------------|---------------------|
-| FRONTEND_PORT | Port for the frontend service | 3000 |
-| BACKEND_PORT | Port for the backend API service | 8000 |
-| POSTGRES_PORT | Port for the PostgreSQL database | 5432 |
-| PGADMIN_PORT | Port for pgAdmin web interface | 5050 |
-| REDIS_PORT | Port for Redis (used by Celery) | 6379 |
-| FLOWER_PORT | Port for Flower (Celery monitoring tool) | 5555 |
-| POSTGRES_USER | PostgreSQL username | postgres |
-| POSTGRES_PASSWORD | PostgreSQL password | postgres |
-| POSTGRES_DB | PostgreSQL database name | surfsense |
-| PGADMIN_DEFAULT_EMAIL | Email for pgAdmin login | admin@surfsense.com |
-| PGADMIN_DEFAULT_PASSWORD | Password for pgAdmin login | surfsense |
-| NEXT_PUBLIC_FASTAPI_BACKEND_URL | URL of the backend API (used by frontend during build and runtime) | http://localhost:8000 |
-| NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE | Authentication method for frontend: `LOCAL` or `GOOGLE` | LOCAL |
-| NEXT_PUBLIC_ETL_SERVICE | Document parsing service for frontend UI: `UNSTRUCTURED`, `LLAMACLOUD`, or `DOCLING` | DOCLING |
-| ELECTRIC_PORT | Port for Electric-SQL service | 5133 |
-| POSTGRES_HOST | PostgreSQL host for Electric connection (`db` for Docker PostgreSQL, `host.docker.internal` for local PostgreSQL) | db |
-| ELECTRIC_DB_USER | PostgreSQL username for Electric connection | electric |
-| ELECTRIC_DB_PASSWORD | PostgreSQL password for Electric connection | electric_password |
-| NEXT_PUBLIC_ELECTRIC_URL | URL for Electric-SQL service (used by frontend) | http://localhost:5133 |
-
-**Note:** Frontend environment variables with the `NEXT_PUBLIC_` prefix are embedded into the Next.js production build at build time. Since the frontend now runs as a production build in Docker, these variables must be set in the root `.env` file (Docker-specific configuration) and will be passed as build arguments during the Docker build process.
-
-**Backend Environment Variables:**
-
-| ENV VARIABLE | DESCRIPTION |
-| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| DATABASE_URL | PostgreSQL connection string (e.g., `postgresql+asyncpg://postgres:postgres@localhost:5432/surfsense`) |
-| SECRET_KEY | JWT Secret key for authentication (should be a secure random string) |
-| NEXT_FRONTEND_URL | URL where your frontend application is hosted (e.g., `http://localhost:3000`) |
-| BACKEND_URL | (Optional) Public URL of the backend for OAuth callbacks (e.g., `https://api.yourdomain.com`). Required when running behind a reverse proxy with HTTPS. Used to set correct OAuth redirect URLs and secure cookies. |
-| AUTH_TYPE | Authentication method: `GOOGLE` for OAuth with Google, `LOCAL` for email/password authentication |
-| GOOGLE_OAUTH_CLIENT_ID | (Optional) Client ID from Google Cloud Console (required if AUTH_TYPE=GOOGLE) |
-| GOOGLE_OAUTH_CLIENT_SECRET | (Optional) Client secret from Google Cloud Console (required if AUTH_TYPE=GOOGLE) |
-| ELECTRIC_DB_USER | (Optional) PostgreSQL username for Electric-SQL connection (default: `electric`) |
-| ELECTRIC_DB_PASSWORD | (Optional) PostgreSQL password for Electric-SQL connection (default: `electric_password`) |
-| EMBEDDING_MODEL | Name of the embedding model (e.g., `sentence-transformers/all-MiniLM-L6-v2`, `openai://text-embedding-ada-002`) |
-| RERANKERS_ENABLED | (Optional) Enable or disable document reranking for improved search results (e.g., `TRUE` or `FALSE`, default: `FALSE`) |
-| RERANKERS_MODEL_NAME | Name of the reranker model (e.g., `ms-marco-MiniLM-L-12-v2`) (required if RERANKERS_ENABLED=TRUE) |
-| RERANKERS_MODEL_TYPE | Type of reranker model (e.g., `flashrank`) (required if RERANKERS_ENABLED=TRUE) |
-| TTS_SERVICE | Text-to-Speech API provider for Podcasts (e.g., `local/kokoro`, `openai/tts-1`). See [supported providers](https://docs.litellm.ai/docs/text_to_speech#supported-providers) |
-| TTS_SERVICE_API_KEY | (Optional if local) API key for the Text-to-Speech service |
-| TTS_SERVICE_API_BASE | (Optional) Custom API base URL for the Text-to-Speech service |
-| STT_SERVICE | Speech-to-Text API provider for Audio Files (e.g., `local/base`, `openai/whisper-1`). See [supported providers](https://docs.litellm.ai/docs/audio_transcription#supported-providers) |
-| STT_SERVICE_API_KEY | (Optional if local) API key for the Speech-to-Text service |
-| STT_SERVICE_API_BASE | (Optional) Custom API base URL for the Speech-to-Text service |
-| FIRECRAWL_API_KEY | API key for Firecrawl service for web crawling |
-| ETL_SERVICE | Document parsing service: `UNSTRUCTURED` (supports 34+ formats), `LLAMACLOUD` (supports 50+ formats including legacy document types), or `DOCLING` (local processing, supports PDF, Office docs, images, HTML, CSV) |
-| UNSTRUCTURED_API_KEY | API key for Unstructured.io service for document parsing (required if ETL_SERVICE=UNSTRUCTURED) |
-| LLAMA_CLOUD_API_KEY | API key for LlamaCloud service for document parsing (required if ETL_SERVICE=LLAMACLOUD) |
-| CELERY_BROKER_URL | Redis connection URL for Celery broker (e.g., `redis://localhost:6379/0`) |
-| CELERY_RESULT_BACKEND | Redis connection URL for Celery result backend (e.g., `redis://localhost:6379/0`) |
-| SCHEDULE_CHECKER_INTERVAL | (Optional) How often to check for scheduled connector tasks. Format: `` where unit is `m` (minutes) or `h` (hours). Examples: `1m`, `5m`, `1h`, `2h` (default: `1m`) |
-| REGISTRATION_ENABLED | (Optional) Enable or disable new user registration (e.g., `TRUE` or `FALSE`, default: `TRUE`) |
-| PAGES_LIMIT | (Optional) Maximum pages limit per user for ETL services (default: `999999999` for unlimited in OSS version) |
-
-**Google Connector OAuth Configuration:**
-| ENV VARIABLE | DESCRIPTION |
-| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| GOOGLE_CALENDAR_REDIRECT_URI | (Optional) Redirect URI for Google Calendar connector OAuth callback (e.g., `http://localhost:8000/api/v1/auth/google/calendar/connector/callback`) |
-| GOOGLE_GMAIL_REDIRECT_URI | (Optional) Redirect URI for Gmail connector OAuth callback (e.g., `http://localhost:8000/api/v1/auth/google/gmail/connector/callback`) |
-| GOOGLE_DRIVE_REDIRECT_URI | (Optional) Redirect URI for Google Drive connector OAuth callback (e.g., `http://localhost:8000/api/v1/auth/google/drive/connector/callback`) |
-
-**Connector OAuth Configurations (Optional):**
-
-| ENV VARIABLE | DESCRIPTION |
-| -------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| AIRTABLE_CLIENT_ID | (Optional) Airtable OAuth client ID from [Airtable Developer Hub](https://airtable.com/create/oauth) |
-| AIRTABLE_CLIENT_SECRET | (Optional) Airtable OAuth client secret |
-| AIRTABLE_REDIRECT_URI | (Optional) Redirect URI for Airtable connector OAuth callback (e.g., `http://localhost:8000/api/v1/auth/airtable/connector/callback`) |
-| CLICKUP_CLIENT_ID | (Optional) ClickUp OAuth client ID |
-| CLICKUP_CLIENT_SECRET | (Optional) ClickUp OAuth client secret |
-| CLICKUP_REDIRECT_URI | (Optional) Redirect URI for ClickUp connector OAuth callback (e.g., `http://localhost:8000/api/v1/auth/clickup/connector/callback`) |
-| DISCORD_CLIENT_ID | (Optional) Discord OAuth client ID |
-| DISCORD_CLIENT_SECRET | (Optional) Discord OAuth client secret |
-| DISCORD_REDIRECT_URI | (Optional) Redirect URI for Discord connector OAuth callback (e.g., `http://localhost:8000/api/v1/auth/discord/connector/callback`) |
-| DISCORD_BOT_TOKEN | (Optional) Discord bot token from Developer Portal |
-| ATLASSIAN_CLIENT_ID | (Optional) Atlassian OAuth client ID (for Jira and Confluence) |
-| ATLASSIAN_CLIENT_SECRET | (Optional) Atlassian OAuth client secret |
-| JIRA_REDIRECT_URI | (Optional) Redirect URI for Jira connector OAuth callback (e.g., `http://localhost:8000/api/v1/auth/jira/connector/callback`) |
-| CONFLUENCE_REDIRECT_URI | (Optional) Redirect URI for Confluence connector OAuth callback (e.g., `http://localhost:8000/api/v1/auth/confluence/connector/callback`) |
-| LINEAR_CLIENT_ID | (Optional) Linear OAuth client ID |
-| LINEAR_CLIENT_SECRET | (Optional) Linear OAuth client secret |
-| LINEAR_REDIRECT_URI | (Optional) Redirect URI for Linear connector OAuth callback (e.g., `http://localhost:8000/api/v1/auth/linear/connector/callback`) |
-| NOTION_CLIENT_ID | (Optional) Notion OAuth client ID |
-| NOTION_CLIENT_SECRET | (Optional) Notion OAuth client secret |
-| NOTION_REDIRECT_URI | (Optional) Redirect URI for Notion connector OAuth callback (e.g., `http://localhost:8000/api/v1/auth/notion/connector/callback`) |
-| SLACK_CLIENT_ID | (Optional) Slack OAuth client ID |
-| SLACK_CLIENT_SECRET | (Optional) Slack OAuth client secret |
-| SLACK_REDIRECT_URI | (Optional) Redirect URI for Slack connector OAuth callback (e.g., `http://localhost:8000/api/v1/auth/slack/connector/callback`) |
-| TEAMS_CLIENT_ID | (Optional) Microsoft Teams OAuth client ID |
-| TEAMS_CLIENT_SECRET | (Optional) Microsoft Teams OAuth client secret |
-| TEAMS_REDIRECT_URI | (Optional) Redirect URI for Teams connector OAuth callback (e.g., `http://localhost:8000/api/v1/auth/teams/connector/callback`) |
-
-
-**Optional Backend LangSmith Observability:**
-| ENV VARIABLE | DESCRIPTION |
-|--------------|-------------|
-| LANGSMITH_TRACING | Enable LangSmith tracing (e.g., `true`) |
-| LANGSMITH_ENDPOINT | LangSmith API endpoint (e.g., `https://api.smith.langchain.com`) |
-| LANGSMITH_API_KEY | Your LangSmith API key |
-| LANGSMITH_PROJECT | LangSmith project name (e.g., `surfsense`) |
-
-**Backend Uvicorn Server Configuration:**
-| ENV VARIABLE | DESCRIPTION | DEFAULT VALUE |
-|------------------------------|---------------------------------------------|---------------|
-| UVICORN_HOST | Host address to bind the server | 0.0.0.0 |
-| UVICORN_PORT | Port to run the backend API | 8000 |
-| UVICORN_LOG_LEVEL | Logging level (e.g., info, debug, warning) | info |
-| UVICORN_PROXY_HEADERS | Enable/disable proxy headers | false |
-| UVICORN_FORWARDED_ALLOW_IPS | Comma-separated list of allowed IPs | 127.0.0.1 |
-| UVICORN_WORKERS | Number of worker processes | 1 |
-| UVICORN_ACCESS_LOG | Enable/disable access log (true/false) | true |
-| UVICORN_LOOP | Event loop implementation | auto |
-| UVICORN_HTTP | HTTP protocol implementation | auto |
-| UVICORN_WS | WebSocket protocol implementation | auto |
-| UVICORN_LIFESPAN | Lifespan implementation | auto |
-| UVICORN_LOG_CONFIG | Path to logging config file or empty string | |
-| UVICORN_SERVER_HEADER | Enable/disable Server header | true |
-| UVICORN_DATE_HEADER | Enable/disable Date header | true |
-| UVICORN_LIMIT_CONCURRENCY | Max concurrent connections | |
-| UVICORN_LIMIT_MAX_REQUESTS | Max requests before worker restart | |
-| UVICORN_TIMEOUT_KEEP_ALIVE | Keep-alive timeout (seconds) | 5 |
-| UVICORN_TIMEOUT_NOTIFY | Worker shutdown notification timeout (sec) | 30 |
-| UVICORN_SSL_KEYFILE | Path to SSL key file | |
-| UVICORN_SSL_CERTFILE | Path to SSL certificate file | |
-| UVICORN_SSL_KEYFILE_PASSWORD | Password for SSL key file | |
-| UVICORN_SSL_VERSION | SSL version | |
-| UVICORN_SSL_CERT_REQS | SSL certificate requirements | |
-| UVICORN_SSL_CA_CERTS | Path to CA certificates file | |
-| UVICORN_SSL_CIPHERS | SSL ciphers | |
-| UVICORN_HEADERS | Comma-separated list of headers | |
-| UVICORN_USE_COLORS | Enable/disable colored logs | true |
-| UVICORN_UDS | Unix domain socket path | |
-| UVICORN_FD | File descriptor to bind to | |
-| UVICORN_ROOT_PATH | Root path for the application | |
-
-For more details, see the [Uvicorn documentation](https://www.uvicorn.org/#command-line-options).
-
-### Frontend Environment Variables
-
-**Important:** Frontend environment variables are now configured in the **Docker-Specific Environment Variables** section above since the Next.js application runs as a production build in Docker. The following `NEXT_PUBLIC_*` variables should be set in your root `.env` file:
-
-- `NEXT_PUBLIC_FASTAPI_BACKEND_URL` - URL of the backend service
-- `NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE` - Authentication method (`LOCAL` or `GOOGLE`)
-- `NEXT_PUBLIC_ETL_SERVICE` - Document parsing service (should match backend `ETL_SERVICE`)
-- `NEXT_PUBLIC_ELECTRIC_URL` - URL for Electric-SQL service (default: `http://localhost:5133`)
-- `NEXT_PUBLIC_ELECTRIC_AUTH_MODE` - Electric-SQL authentication mode (default: `insecure`)
-
-These variables are embedded into the application during the Docker build process and affect the frontend's behavior and available features.
-
-2. **Build and Start Containers**
-
- Start the Docker containers:
-
- **Linux/macOS/Windows:**
-
- ```bash
- docker compose up --build
- ```
-
- To run in detached mode (in the background):
-
- **Linux/macOS/Windows:**
-
- ```bash
- docker compose up -d
- ```
-
- **Note for Windows users:** If you're using older Docker Desktop versions, you might need to use `docker compose` (with a space) instead of `docker compose`.
-
-3. **Access the Applications**
-
- Once the containers are running, you can access:
-
- - Frontend: [http://localhost:3000](http://localhost:3000)
- - Backend API: [http://localhost:8000](http://localhost:8000)
- - API Documentation: [http://localhost:8000/docs](http://localhost:8000/docs)
- - Electric-SQL: [http://localhost:5133](http://localhost:5133)
- - pgAdmin: [http://localhost:5050](http://localhost:5050)
-
-## Docker Services Overview
-
-The Docker setup includes several services that work together:
-
-- **Backend**: FastAPI application server
-- **Frontend**: Next.js web application
-- **PostgreSQL (db)**: Database with pgvector extension
-- **Redis**: Message broker for Celery
-- **Electric-SQL**: Real-time sync service for database operations
-- **Celery Worker**: Handles background tasks (document processing, indexing, etc.)
-- **Celery Beat**: Scheduler for periodic tasks (enables scheduled connector indexing)
- - The schedule interval can be configured using the `SCHEDULE_CHECKER_INTERVAL` environment variable in your backend `.env` file
- - Default: checks every minute for connectors that need indexing
-- **pgAdmin**: Database management interface
-
-All services start automatically with `docker compose up`. The Celery Beat service ensures that periodic indexing functionality works out of the box.
-
-## Using pgAdmin
-
-pgAdmin is included in the Docker setup to help manage your PostgreSQL database. To connect:
-
-1. Open pgAdmin at [http://localhost:5050](http://localhost:5050)
-2. Login with the credentials from your `.env` file (default: admin@surfsense.com / surfsense)
-3. Right-click "Servers" > "Create" > "Server"
-4. In the "General" tab, name your connection (e.g., "SurfSense DB")
-5. In the "Connection" tab:
- - Host: `db`
- - Port: `5432`
- - Maintenance database: `surfsense`
- - Username: `postgres` (or your custom POSTGRES_USER)
- - Password: `postgres` (or your custom POSTGRES_PASSWORD)
-6. Click "Save" to connect
-
-## Updating (Full Docker Compose)
-
-To update the full Docker Compose production setup to the latest version:
-
-```bash
-# Pull latest changes
-git pull
-
-# Rebuild and restart containers
-docker compose up --build -d
-```
-
-Database migrations are applied automatically on startup.
-
-## Useful Docker Commands
-
-### Container Management
-
-- **Stop containers:**
-
- **Linux/macOS/Windows:**
-
- ```bash
- docker compose down
- ```
-
-- **View logs:**
-
- **Linux/macOS/Windows:**
-
- ```bash
- # All services
- docker compose logs -f
-
- # Specific service
- docker compose logs -f backend
- docker compose logs -f frontend
- docker compose logs -f db
- ```
-
-- **Restart a specific service:**
-
- **Linux/macOS/Windows:**
-
- ```bash
- docker compose restart backend
- ```
-
-- **Execute commands in a running container:**
-
- **Linux/macOS/Windows:**
-
- ```bash
- # Backend
- docker compose exec backend python -m pytest
-
- # Frontend
- docker compose exec frontend pnpm lint
- ```
-
## Troubleshooting
-- **Linux/macOS:** If you encounter permission errors, you may need to run the docker commands with `sudo`.
-- **Windows:** If you see access denied errors, make sure you're running Command Prompt or PowerShell as Administrator.
-- If ports are already in use, modify the port mappings in the `docker-compose.yml` file.
-- For backend dependency issues, check the `Dockerfile` in the backend directory.
-- For frontend dependency issues, check the `Dockerfile` in the frontend directory.
-- **Windows-specific:** If you encounter line ending issues (CRLF vs LF), configure Git to handle line endings properly with `git config --global core.autocrlf true` before cloning the repository.
-
-## Next Steps
-
-Once your installation is complete, you can start using SurfSense! Navigate to the frontend URL and log in using your Google account.
+- **Ports already in use** — Change the relevant `*_PORT` variable in `.env` and restart.
+- **Permission errors on Linux** — You may need to prefix `docker` commands with `sudo`.
+- **Electric SQL not connecting** — Check `docker compose logs electric`. If it shows `domain does not exist: db`, ensure `ELECTRIC_DATABASE_URL` is not set to a stale value in `.env`.
+- **Real-time updates not working in browser** — Open DevTools → Console and look for `[Electric]` errors. Check that `NEXT_PUBLIC_ELECTRIC_URL` matches the running Electric SQL address.
+- **Line ending issues on Windows** — Run `git config --global core.autocrlf true` before cloning.
From 512640354caf25df317ee980f6ab2966528c6d0d Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 10:23:38 +0530
Subject: [PATCH 19/57] chore: update Docker Compose files to specify names for
volume configurations
---
docker/docker-compose.dev.yml | 4 ++++
docker/docker-compose.yml | 3 +++
2 files changed, 7 insertions(+)
diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml
index 591a83ee0..16e2479f4 100644
--- a/docker/docker-compose.dev.yml
+++ b/docker/docker-compose.dev.yml
@@ -184,6 +184,10 @@ services:
volumes:
postgres_data:
+ name: surfsense-postgres
pgadmin_data:
+ name: surfsense-pgadmin
redis_data:
+ name: surfsense-redis
shared_temp:
+ name: surfsense-shared-temp
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index 7f983ef9c..a3723cd1c 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -167,5 +167,8 @@ services:
volumes:
postgres_data:
+ name: surfsense-postgres
redis_data:
+ name: surfsense-redis
shared_temp:
+ name: surfsense-shared-temp
From b06c70a61dc80fab2c43f9bee719f853610232c7 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 11:17:34 +0530
Subject: [PATCH 20/57] chore: update GitHub Actions workflow to trigger on
main branch pushes and add concurrency settings
---
.github/workflows/docker_build.yaml | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/.github/workflows/docker_build.yaml b/.github/workflows/docker_build.yaml
index f55419200..73ae1882c 100644
--- a/.github/workflows/docker_build.yaml
+++ b/.github/workflows/docker_build.yaml
@@ -1,6 +1,12 @@
name: Build and Push Docker Images
on:
+ push:
+ branches:
+ - main
+ paths:
+ - 'surfsense_backend/**'
+ - 'surfsense_web/**'
workflow_dispatch:
inputs:
branch:
@@ -8,6 +14,10 @@ on:
required: false
default: ''
+concurrency:
+ group: docker-build
+ cancel-in-progress: false
+
permissions:
contents: write
packages: write
From 6e2a0556692baed55e1be2667cb78ddd173d1e42 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 11:19:51 +0530
Subject: [PATCH 21/57] chore: enhance .env.example with additional
configuration options for pgAdmin and frontend build arguments
---
docker/.env.example | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/docker/.env.example b/docker/.env.example
index 438e97941..25538e579 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -38,6 +38,29 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# FRONTEND_PORT=3000
# ELECTRIC_PORT=5133
# FLOWER_PORT=5555
+# REDIS_PORT=6379
+
+# ------------------------------------------------------------------------------
+# pgAdmin (dev compose only — docker-compose.dev.yml)
+# ------------------------------------------------------------------------------
+# PGADMIN_PORT=5050
+# PGADMIN_DEFAULT_EMAIL=admin@surfsense.com
+# PGADMIN_DEFAULT_PASSWORD=surfsense
+
+# ------------------------------------------------------------------------------
+# Frontend Build Args (dev compose only — docker-compose.dev.yml)
+# ------------------------------------------------------------------------------
+# In dev, the frontend is built from source and these are passed as build args.
+# In prod, they are derived from AUTH_TYPE, ETL_SERVICE, and the port settings.
+
+# NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=LOCAL
+# NEXT_PUBLIC_ETL_SERVICE=DOCLING
+
+# Deployment mode (self-hosted or cloud)
+# NEXT_PUBLIC_DEPLOYMENT_MODE=self-hosted
+
+# Electric auth mode (insecure or secure)
+# NEXT_PUBLIC_ELECTRIC_AUTH_MODE=insecure
# ------------------------------------------------------------------------------
# Custom Domain / Reverse Proxy
From f4f659e19008d2b2b647746252ab71e4e29b05df Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 14:37:08 +0530
Subject: [PATCH 22/57] chore: add Daytona Sandbox configuration options to
.env.example and Docker Compose files for cloud code execution
---
docker/.env.example | 10 ++++++++++
docker/docker-compose.dev.yml | 5 +++++
docker/docker-compose.yml | 5 +++++
3 files changed, 20 insertions(+)
diff --git a/docker/.env.example b/docker/.env.example
index 25538e579..20272c697 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -202,6 +202,16 @@ STT_SERVICE=local/base
# COMPOSIO_ENABLED=TRUE
# COMPOSIO_REDIRECT_URI=http://localhost:8000/api/v1/auth/composio/connector/callback
+# ------------------------------------------------------------------------------
+# Daytona Sandbox (optional — cloud code execution for the deep agent)
+# ------------------------------------------------------------------------------
+# Set DAYTONA_SANDBOX_ENABLED=TRUE and provide credentials to give the agent
+# an isolated code execution environment via the Daytona cloud API.
+# DAYTONA_SANDBOX_ENABLED=FALSE
+# DAYTONA_API_KEY=
+# DAYTONA_API_URL=https://app.daytona.io/api
+# DAYTONA_TARGET=us
+
# ------------------------------------------------------------------------------
# External API Keys (optional)
# ------------------------------------------------------------------------------
diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml
index 16e2479f4..94811b2aa 100644
--- a/docker/docker-compose.dev.yml
+++ b/docker/docker-compose.dev.yml
@@ -81,6 +81,11 @@ services:
- ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
- AUTH_TYPE=${AUTH_TYPE:-LOCAL}
- NEXT_FRONTEND_URL=${NEXT_FRONTEND_URL:-http://localhost:3000}
+ # Daytona Sandbox – uncomment and set credentials to enable cloud code execution
+ # - DAYTONA_SANDBOX_ENABLED=TRUE
+ # - DAYTONA_API_KEY=${DAYTONA_API_KEY:-}
+ # - DAYTONA_API_URL=${DAYTONA_API_URL:-https://app.daytona.io/api}
+ # - DAYTONA_TARGET=${DAYTONA_TARGET:-us}
- SERVICE_ROLE=api
depends_on:
db:
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index a3723cd1c..e1352ea32 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -62,6 +62,11 @@ services:
ELECTRIC_DB_USER: ${ELECTRIC_DB_USER:-electric}
ELECTRIC_DB_PASSWORD: ${ELECTRIC_DB_PASSWORD:-electric_password}
NEXT_FRONTEND_URL: ${NEXT_FRONTEND_URL:-http://localhost:${FRONTEND_PORT:-3000}}
+ # Daytona Sandbox – uncomment and set credentials to enable cloud code execution
+ # DAYTONA_SANDBOX_ENABLED: "TRUE"
+ # DAYTONA_API_KEY: ${DAYTONA_API_KEY:-}
+ # DAYTONA_API_URL: ${DAYTONA_API_URL:-https://app.daytona.io/api}
+ # DAYTONA_TARGET: ${DAYTONA_TARGET:-us}
SERVICE_ROLE: api
depends_on:
db:
From cbff5bda65e273ea6d86e478e90f67c543826f5a Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 15:01:34 +0530
Subject: [PATCH 23/57] chore: remove unnecessary comment from .env.example to
streamline configuration clarity
---
docker/.env.example | 1 -
1 file changed, 1 deletion(-)
diff --git a/docker/.env.example b/docker/.env.example
index 20272c697..10c00ec67 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -1,7 +1,6 @@
# ==============================================================================
# SurfSense Docker Configuration
# ==============================================================================
-# Only variables YOU need to set are in this file.
# Database, Redis, and internal service wiring are handled automatically.
# ==============================================================================
From 91c30db3e41758fdb76c5223be4781c4ea98f58c Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 16:04:47 +0530
Subject: [PATCH 24/57] chore: reorganize comments in .env.example for improved
clarity and consistency in development configuration
---
docker/.env.example | 24 +++++++++++-------------
1 file changed, 11 insertions(+), 13 deletions(-)
diff --git a/docker/.env.example b/docker/.env.example
index 10c00ec67..470037ee3 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -37,28 +37,26 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# FRONTEND_PORT=3000
# ELECTRIC_PORT=5133
# FLOWER_PORT=5555
-# REDIS_PORT=6379
-# ------------------------------------------------------------------------------
-# pgAdmin (dev compose only — docker-compose.dev.yml)
-# ------------------------------------------------------------------------------
+# ==============================================================================
+# DEV COMPOSE ONLY (docker-compose.dev.yml)
+# You only need them only if you are running `docker-compose.dev.yml`.
+# ==============================================================================
+
+# -- pgAdmin (database GUI) --
# PGADMIN_PORT=5050
# PGADMIN_DEFAULT_EMAIL=admin@surfsense.com
# PGADMIN_DEFAULT_PASSWORD=surfsense
-# ------------------------------------------------------------------------------
-# Frontend Build Args (dev compose only — docker-compose.dev.yml)
-# ------------------------------------------------------------------------------
-# In dev, the frontend is built from source and these are passed as build args.
-# In prod, they are derived from AUTH_TYPE, ETL_SERVICE, and the port settings.
+# -- Redis exposed port (dev only; Redis is internal-only in prod) --
+# REDIS_PORT=6379
+# -- Frontend Build Args --
+# In dev, the frontend is built from source and these are passed as build args.
+# In prod, they are automatically derived from AUTH_TYPE, ETL_SERVICE, and the port settings above.
# NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=LOCAL
# NEXT_PUBLIC_ETL_SERVICE=DOCLING
-
-# Deployment mode (self-hosted or cloud)
# NEXT_PUBLIC_DEPLOYMENT_MODE=self-hosted
-
-# Electric auth mode (insecure or secure)
# NEXT_PUBLIC_ELECTRIC_AUTH_MODE=insecure
# ------------------------------------------------------------------------------
From f051c19aca5d3979265dbe4894f59c8a46ae7428 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 16:15:47 +0530
Subject: [PATCH 25/57] feat: add database migration script and update
installation instructions for legacy all-in-one users
---
docker/scripts/install.sh | 14 +
docker/scripts/migrate-database.sh | 433 ++++++++++++++++++
.../content/docs/docker-installation.mdx | 10 +
surfsense_web/content/docs/how-to/meta.json | 2 +-
.../docs/how-to/migrate-from-allinone.mdx | 226 +++++++++
5 files changed, 684 insertions(+), 1 deletion(-)
create mode 100755 docker/scripts/migrate-database.sh
create mode 100644 surfsense_web/content/docs/how-to/migrate-from-allinone.mdx
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 7a12c591a..89008a7d9 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -21,6 +21,20 @@ error() { printf "${RED}[SurfSense]${NC} %s\n" "$1" >&2; exit 1; }
command -v docker >/dev/null 2>&1 || error "Docker is not installed. Please install Docker first: https://docs.docker.com/get-docker/"
+# Detect legacy all-in-one volume — must migrate before installing
+if docker volume ls --format '{{.Name}}' 2>/dev/null | grep -q '^surfsense-data$'; then
+ printf "${RED}[SurfSense]${NC} Legacy volume 'surfsense-data' detected.\n" >&2
+ printf "${YELLOW}[SurfSense]${NC} You appear to be upgrading from the old all-in-one SurfSense container.\n" >&2
+ printf "${YELLOW}[SurfSense]${NC} The database has been upgraded from PostgreSQL 14 to 17 and your data\n" >&2
+ printf "${YELLOW}[SurfSense]${NC} must be migrated before running the new stack.\n" >&2
+ printf "\n" >&2
+ printf "${YELLOW}[SurfSense]${NC} Run the migration script first:\n" >&2
+ printf "${CYAN}[SurfSense]${NC} curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/migrate-database.sh | bash\n" >&2
+ printf "\n" >&2
+ printf "${YELLOW}[SurfSense]${NC} See the full guide at: https://surfsense.net/docs/how-to/migrate-from-allinone\n" >&2
+ exit 1
+fi
+
if docker compose version >/dev/null 2>&1; then
DC="docker compose"
elif command -v docker-compose >/dev/null 2>&1; then
diff --git a/docker/scripts/migrate-database.sh b/docker/scripts/migrate-database.sh
new file mode 100755
index 000000000..a3421c83c
--- /dev/null
+++ b/docker/scripts/migrate-database.sh
@@ -0,0 +1,433 @@
+#!/usr/bin/env bash
+# =============================================================================
+# SurfSense — Database Migration Script
+#
+# Migrates data from the legacy all-in-one surfsense-data volume (PostgreSQL 14)
+# to the new multi-container surfsense-postgres volume (PostgreSQL 17) using
+# a logical pg_dump / psql restore — safe across major PG versions.
+#
+# Usage:
+# bash migrate-database.sh [options]
+#
+# Options:
+# --db-user USER Old PostgreSQL username (default: surfsense)
+# --db-password PASS Old PostgreSQL password (default: surfsense)
+# --db-name NAME Old PostgreSQL database (default: surfsense)
+# --install-dir DIR New installation directory (default: ./surfsense)
+# --yes / -y Skip all confirmation prompts
+# --help / -h Show this help
+#
+# Prerequisites:
+# - Docker and Docker Compose installed and running
+# - The legacy surfsense-data volume must exist
+# - ~500 MB free disk space for the dump file
+#
+# What this script does NOT do:
+# - Delete the original surfsense-data volume (you must do this manually
+# after verifying the migration succeeded)
+# =============================================================================
+
+set -euo pipefail
+
+# ── Colours ──────────────────────────────────────────────────────────────────
+CYAN='\033[1;36m'
+YELLOW='\033[1;33m'
+GREEN='\033[0;32m'
+RED='\033[0;31m'
+BOLD='\033[1m'
+NC='\033[0m'
+
+# ── Logging — tee everything to a log file ───────────────────────────────────
+LOG_FILE="./surfsense-migration.log"
+exec > >(tee -a "${LOG_FILE}") 2>&1
+
+# ── Output helpers ────────────────────────────────────────────────────────────
+info() { printf "${CYAN}[SurfSense]${NC} %s\n" "$1"; }
+success() { printf "${GREEN}[SurfSense]${NC} %s\n" "$1"; }
+warn() { printf "${YELLOW}[SurfSense]${NC} %s\n" "$1"; }
+error() { printf "${RED}[SurfSense]${NC} ERROR: %s\n" "$1" >&2; exit 1; }
+step() { printf "\n${BOLD}${CYAN}── Step %s: %s${NC}\n" "$1" "$2"; }
+
+# ── Constants ─────────────────────────────────────────────────────────────────
+REPO_RAW="https://raw.githubusercontent.com/MODSetter/SurfSense/main"
+OLD_VOLUME="surfsense-data"
+NEW_PG_VOLUME="surfsense-postgres"
+TEMP_CONTAINER="surfsense-pg14-migration"
+DUMP_FILE="./surfsense_migration_backup.sql"
+PG14_IMAGE="postgres:14"
+
+# ── Defaults ──────────────────────────────────────────────────────────────────
+OLD_DB_USER="surfsense"
+OLD_DB_PASSWORD="surfsense"
+OLD_DB_NAME="surfsense"
+INSTALL_DIR="./surfsense"
+AUTO_YES=false
+
+# ── Argument parsing ──────────────────────────────────────────────────────────
+while [[ $# -gt 0 ]]; do
+ case "$1" in
+ --db-user) OLD_DB_USER="$2"; shift 2 ;;
+ --db-password) OLD_DB_PASSWORD="$2"; shift 2 ;;
+ --db-name) OLD_DB_NAME="$2"; shift 2 ;;
+ --install-dir) INSTALL_DIR="$2"; shift 2 ;;
+ --yes|-y) AUTO_YES=true; shift ;;
+ --help|-h)
+ grep '^#' "$0" | grep -v '^#!/' | sed 's/^# \{0,1\}//'
+ exit 0
+ ;;
+ *) error "Unknown option: $1 — run with --help for usage." ;;
+ esac
+done
+
+# ── Confirmation helper ───────────────────────────────────────────────────────
+confirm() {
+ if $AUTO_YES; then return 0; fi
+ printf "${YELLOW}[SurfSense]${NC} %s [y/N] " "$1"
+ read -r reply
+ [[ "$reply" =~ ^[Yy]$ ]] || { warn "Aborted."; exit 0; }
+}
+
+# ── Cleanup trap — always remove the temp container ──────────────────────────
+cleanup() {
+ local exit_code=$?
+ if docker ps -a --format '{{.Names}}' 2>/dev/null | grep -q "^${TEMP_CONTAINER}$"; then
+ info "Cleaning up temporary container '${TEMP_CONTAINER}'..."
+ docker stop "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
+ docker rm "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
+ fi
+ if [[ $exit_code -ne 0 ]]; then
+ printf "\n${RED}[SurfSense]${NC} Migration failed (exit code %s).\n" "${exit_code}" >&2
+ printf "${RED}[SurfSense]${NC} Full log: %s\n" "${LOG_FILE}" >&2
+ printf "${YELLOW}[SurfSense]${NC} Your original data in '${OLD_VOLUME}' is untouched.\n" >&2
+ fi
+}
+trap cleanup EXIT
+
+# ── Wait-for-postgres helper ──────────────────────────────────────────────────
+# $1 = container name/id $2 = db user $3 = label for messages
+wait_for_pg() {
+ local container="$1"
+ local user="$2"
+ local label="${3:-PostgreSQL}"
+ local max_attempts=45
+ local attempt=0
+
+ info "Waiting for ${label} to accept connections..."
+ until docker exec "${container}" pg_isready -U "${user}" -q 2>/dev/null; do
+ attempt=$((attempt + 1))
+ if [[ $attempt -ge $max_attempts ]]; then
+ error "${label} did not become ready after $((max_attempts * 2)) seconds.\nCheck logs: docker logs ${container}"
+ fi
+ printf "."
+ sleep 2
+ done
+ printf "\n"
+ success "${label} is ready."
+}
+
+# ── Banner ────────────────────────────────────────────────────────────────────
+printf "\n${BOLD}${CYAN}"
+cat << 'EOF'
+ ____ __ ____
+/ ___| _ _ _ __ / _|/ ___| ___ _ __ ___ ___
+\___ \| | | | '__| |_\___ \ / _ \ '_ \/ __|/ _ \
+ ___) | |_| | | | _|___) | __/ | | \__ \ __/
+|____/ \__,_|_| |_| |____/ \___|_| |_|___/\___|
+
+EOF
+printf "${NC}"
+printf "${CYAN} Database Migration: All-in-One → Multi-Container (PG 14 → 17)${NC}\n"
+printf "${CYAN}══════════════════════════════════════════════════════════════${NC}\n\n"
+
+# ── Step 0: Pre-flight checks ─────────────────────────────────────────────────
+step "0" "Pre-flight checks"
+
+# Docker CLI
+command -v docker >/dev/null 2>&1 \
+ || error "Docker is not installed. Install it at: https://docs.docker.com/get-docker/"
+
+# Docker daemon
+docker info >/dev/null 2>&1 \
+ || error "Docker daemon is not running. Please start Docker and try again."
+
+# Docker Compose
+if docker compose version >/dev/null 2>&1; then
+ DC="docker compose"
+elif command -v docker-compose >/dev/null 2>&1; then
+ DC="docker-compose"
+else
+ error "Docker Compose not found. Install it at: https://docs.docker.com/compose/install/"
+fi
+info "Docker Compose: ${DC}"
+
+# OS detection (needed for sed -i portability)
+case "$(uname -s)" in
+ Darwin*) OS_TYPE="darwin" ;;
+ Linux*) OS_TYPE="linux" ;;
+ CYGWIN*|MINGW*|MSYS*) OS_TYPE="windows" ;;
+ *) OS_TYPE="unknown" ;;
+esac
+info "OS: ${OS_TYPE}"
+
+# Old volume must exist
+docker volume ls --format '{{.Name}}' | grep -q "^${OLD_VOLUME}$" \
+ || error "Legacy volume '${OLD_VOLUME}' not found.\n Are you sure you ran the old all-in-one SurfSense container?"
+success "Found legacy volume: ${OLD_VOLUME}"
+
+# New PG volume must NOT already exist
+if docker volume ls --format '{{.Name}}' | grep -q "^${NEW_PG_VOLUME}$"; then
+ warn "Volume '${NEW_PG_VOLUME}' already exists."
+ warn "If migration already succeeded, you do not need to run this script again."
+ warn "If a previous run failed partway, remove the partial volume first:"
+ warn " docker volume rm ${NEW_PG_VOLUME}"
+ error "Aborting to avoid overwriting existing data."
+fi
+success "Target volume '${NEW_PG_VOLUME}' does not yet exist — safe to proceed."
+
+# Clean up any stale temp container from a previous failed run
+if docker ps -a --format '{{.Names}}' | grep -q "^${TEMP_CONTAINER}$"; then
+ warn "Stale migration container '${TEMP_CONTAINER}' found — removing it."
+ docker stop "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
+ docker rm "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
+fi
+
+# Disk space (warn if < 500 MB free)
+if command -v df >/dev/null 2>&1; then
+ FREE_KB=$(df -k . | awk 'NR==2 {print $4}')
+ FREE_MB=$(( FREE_KB / 1024 ))
+ if [[ $FREE_MB -lt 500 ]]; then
+ warn "Low disk space: ${FREE_MB} MB free. At least 500 MB recommended for the dump."
+ confirm "Continue anyway?"
+ else
+ success "Disk space: ${FREE_MB} MB free."
+ fi
+fi
+
+success "All pre-flight checks passed."
+
+# ── Confirmation prompt ───────────────────────────────────────────────────────
+printf "\n${BOLD}Migration plan:${NC}\n"
+printf " Source volume : ${YELLOW}%s${NC} (PG14 data at /data/postgres)\n" "${OLD_VOLUME}"
+printf " Target volume : ${YELLOW}%s${NC} (PG17 multi-container stack)\n" "${NEW_PG_VOLUME}"
+printf " Old credentials : user=${YELLOW}%s${NC} db=${YELLOW}%s${NC}\n" "${OLD_DB_USER}" "${OLD_DB_NAME}"
+printf " Install dir : ${YELLOW}%s${NC}\n" "${INSTALL_DIR}"
+printf " Dump saved to : ${YELLOW}%s${NC}\n" "${DUMP_FILE}"
+printf " Log file : ${YELLOW}%s${NC}\n\n" "${LOG_FILE}"
+confirm "Start migration? (Your original data will not be deleted.)"
+
+# ── Step 1: Start temporary PostgreSQL 14 container ──────────────────────────
+step "1" "Starting temporary PostgreSQL 14 container"
+
+info "Pulling ${PG14_IMAGE}..."
+docker pull "${PG14_IMAGE}" >/dev/null 2>&1 \
+ || warn "Could not pull ${PG14_IMAGE} — using cached image if available."
+
+docker run -d \
+ --name "${TEMP_CONTAINER}" \
+ -v "${OLD_VOLUME}:/data" \
+ -e PGDATA=/data/postgres \
+ -e POSTGRES_USER="${OLD_DB_USER}" \
+ -e POSTGRES_PASSWORD="${OLD_DB_PASSWORD}" \
+ -e POSTGRES_DB="${OLD_DB_NAME}" \
+ "${PG14_IMAGE}" >/dev/null
+
+success "Temporary container '${TEMP_CONTAINER}' started."
+wait_for_pg "${TEMP_CONTAINER}" "${OLD_DB_USER}" "PostgreSQL 14"
+
+# ── Step 2: Dump the database ─────────────────────────────────────────────────
+step "2" "Dumping PostgreSQL 14 database"
+
+info "Running pg_dump — this may take a while for large databases..."
+
+# Run pg_dump and capture stderr separately to detect real failures
+if ! docker exec \
+ -e PGPASSWORD="${OLD_DB_PASSWORD}" \
+ "${TEMP_CONTAINER}" \
+ pg_dump -U "${OLD_DB_USER}" --no-password "${OLD_DB_NAME}" \
+ > "${DUMP_FILE}" 2>/tmp/pg_dump_err; then
+ cat /tmp/pg_dump_err >&2
+ error "pg_dump failed. See above for details."
+fi
+
+# Validate: non-empty file
+[[ -s "${DUMP_FILE}" ]] \
+ || error "Dump file '${DUMP_FILE}' is empty. Something went wrong with pg_dump."
+
+# Validate: looks like a real PG dump
+grep -q "PostgreSQL database dump" "${DUMP_FILE}" \
+ || error "Dump file does not contain a valid PostgreSQL dump header — the file may be corrupt."
+
+# Validate: sanity-check line count
+DUMP_LINES=$(wc -l < "${DUMP_FILE}" | tr -d ' ')
+[[ $DUMP_LINES -ge 10 ]] \
+ || error "Dump has only ${DUMP_LINES} lines — suspiciously small. Aborting."
+
+DUMP_SIZE=$(du -sh "${DUMP_FILE}" 2>/dev/null | cut -f1)
+success "Dump complete: ${DUMP_SIZE} (${DUMP_LINES} lines) → ${DUMP_FILE}"
+
+# Stop the temp container now (trap will also handle it on unexpected exit)
+info "Stopping temporary PostgreSQL 14 container..."
+docker stop "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
+docker rm "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
+success "Temporary container removed."
+
+# ── Step 3: Recover SECRET_KEY ────────────────────────────────────────────────
+step "3" "Recovering SECRET_KEY"
+
+RECOVERED_KEY=""
+
+if docker run --rm -v "${OLD_VOLUME}:/data" alpine \
+ sh -c 'test -f /data/.secret_key && cat /data/.secret_key' \
+ 2>/dev/null | grep -q .; then
+ RECOVERED_KEY=$(
+ docker run --rm -v "${OLD_VOLUME}:/data" alpine \
+ cat /data/.secret_key 2>/dev/null | tr -d '[:space:]'
+ )
+ success "Recovered SECRET_KEY from '${OLD_VOLUME}'."
+else
+ warn "No SECRET_KEY file found at /data/.secret_key in '${OLD_VOLUME}'."
+ warn "This means the all-in-one was launched with SECRET_KEY set as an explicit environment variable."
+ printf "${YELLOW}[SurfSense]${NC} Enter the SECRET_KEY from your old container's environment\n"
+ printf "${YELLOW}[SurfSense]${NC} (press Enter to generate a new one — existing sessions will be invalidated): "
+ read -r RECOVERED_KEY
+ if [[ -z "${RECOVERED_KEY}" ]]; then
+ RECOVERED_KEY=$(openssl rand -base64 32 2>/dev/null \
+ || head -c 32 /dev/urandom | base64 | tr -d '\n')
+ warn "Generated a new SECRET_KEY. All active browser sessions will be logged out after migration."
+ fi
+fi
+
+# ── Step 4: Set up the new installation ───────────────────────────────────────
+step "4" "Setting up new SurfSense installation"
+
+if [[ -f "${INSTALL_DIR}/docker-compose.yml" ]]; then
+ warn "Directory '${INSTALL_DIR}' already exists — skipping file download."
+else
+ info "Creating installation directory: ${INSTALL_DIR}"
+ mkdir -p "${INSTALL_DIR}/scripts"
+
+ FILES=(
+ "docker/docker-compose.yml:docker-compose.yml"
+ "docker/.env.example:.env.example"
+ "docker/postgresql.conf:postgresql.conf"
+ "docker/scripts/init-electric-user.sh:scripts/init-electric-user.sh"
+ )
+
+ for entry in "${FILES[@]}"; do
+ src="${entry%%:*}"
+ dest="${entry##*:}"
+ info "Downloading ${dest}..."
+ curl -fsSL "${REPO_RAW}/${src}" -o "${INSTALL_DIR}/${dest}" \
+ || error "Failed to download ${src}. Check your internet connection."
+ done
+
+ chmod +x "${INSTALL_DIR}/scripts/init-electric-user.sh"
+ success "Compose files downloaded to ${INSTALL_DIR}/"
+fi
+
+# Create .env from example if it does not exist
+if [[ ! -f "${INSTALL_DIR}/.env" ]]; then
+ cp "${INSTALL_DIR}/.env.example" "${INSTALL_DIR}/.env"
+ info "Created ${INSTALL_DIR}/.env from .env.example"
+fi
+
+# Write the recovered SECRET_KEY into .env (handles both placeholder and pre-set values)
+if [[ "${OS_TYPE}" == "darwin" ]]; then
+ sed -i '' "s|SECRET_KEY=replace_me_with_a_random_string|SECRET_KEY=${RECOVERED_KEY}|" "${INSTALL_DIR}/.env"
+ sed -i '' "s|^SECRET_KEY=.*|SECRET_KEY=${RECOVERED_KEY}|" "${INSTALL_DIR}/.env"
+else
+ sed -i "s|SECRET_KEY=replace_me_with_a_random_string|SECRET_KEY=${RECOVERED_KEY}|" "${INSTALL_DIR}/.env"
+ sed -i "s|^SECRET_KEY=.*|SECRET_KEY=${RECOVERED_KEY}|" "${INSTALL_DIR}/.env"
+fi
+success "SECRET_KEY written to ${INSTALL_DIR}/.env"
+
+# ── Step 5: Start PostgreSQL 17 (new stack) ───────────────────────────────────
+step "5" "Starting PostgreSQL 17"
+
+(cd "${INSTALL_DIR}" && ${DC} up -d db)
+
+# Resolve the running container name for direct docker exec calls
+PG17_CONTAINER=$(cd "${INSTALL_DIR}" && ${DC} ps -q db 2>/dev/null | head -n1 || true)
+if [[ -z "${PG17_CONTAINER}" ]]; then
+ # Fallback to the predictable compose container name
+ PG17_CONTAINER="surfsense-db-1"
+fi
+info "PostgreSQL 17 container: ${PG17_CONTAINER}"
+
+wait_for_pg "${PG17_CONTAINER}" "${OLD_DB_USER}" "PostgreSQL 17"
+
+# ── Step 6: Restore the dump ──────────────────────────────────────────────────
+step "6" "Restoring database into PostgreSQL 17"
+
+info "Running psql restore — this may take a while for large databases..."
+
+RESTORE_ERR_FILE="/tmp/surfsense_restore_err.log"
+
+docker exec -i \
+ -e PGPASSWORD="${OLD_DB_PASSWORD}" \
+ "${PG17_CONTAINER}" \
+ psql -U "${OLD_DB_USER}" -d "${OLD_DB_NAME}" \
+ < "${DUMP_FILE}" \
+ 2>"${RESTORE_ERR_FILE}" || true # psql exits non-zero on warnings; check below
+
+# Surface any real (non-benign) errors
+FATAL_ERRORS=$(grep -i "^ERROR:" "${RESTORE_ERR_FILE}" \
+ | grep -iv "already exists" \
+ | grep -iv "multiple primary keys" \
+ || true)
+
+if [[ -n "${FATAL_ERRORS}" ]]; then
+ warn "Restore completed with the following errors:"
+ printf "%s\n" "${FATAL_ERRORS}"
+ confirm "These may be harmless (e.g. pre-existing system objects). Continue?"
+else
+ success "Restore completed with no fatal errors."
+fi
+
+# Smoke test — verify tables exist in the restored database
+TABLE_COUNT=$(
+ docker exec \
+ -e PGPASSWORD="${OLD_DB_PASSWORD}" \
+ "${PG17_CONTAINER}" \
+ psql -U "${OLD_DB_USER}" -d "${OLD_DB_NAME}" -t \
+ -c "SELECT count(*) FROM information_schema.tables WHERE table_schema = 'public';" \
+ 2>/dev/null | tr -d ' \n' || echo "0"
+)
+
+if [[ "${TABLE_COUNT}" == "0" || -z "${TABLE_COUNT}" ]]; then
+ warn "Smoke test: no tables found in the restored database."
+ warn "The restore may have failed silently. Inspect the dump and restore manually:"
+ warn " docker exec -i ${PG17_CONTAINER} psql -U ${OLD_DB_USER} -d ${OLD_DB_NAME} < ${DUMP_FILE}"
+ confirm "Continue starting the rest of the stack anyway?"
+else
+ success "Smoke test passed: ${TABLE_COUNT} table(s) found in the restored database."
+fi
+
+# ── Step 7: Start all remaining services ──────────────────────────────────────
+step "7" "Starting all SurfSense services"
+
+(cd "${INSTALL_DIR}" && ${DC} up -d)
+success "All services started."
+
+# ── Done ──────────────────────────────────────────────────────────────────────
+printf "\n${GREEN}${BOLD}"
+printf "══════════════════════════════════════════════════════════════\n"
+printf " Migration complete!\n"
+printf "══════════════════════════════════════════════════════════════\n"
+printf "${NC}\n"
+
+success " Frontend : http://localhost:3000"
+success " Backend : http://localhost:8000"
+success " API Docs : http://localhost:8000/docs"
+printf "\n"
+info " Config : ${INSTALL_DIR}/.env"
+info " Logs : cd ${INSTALL_DIR} && ${DC} logs -f"
+printf "\n"
+warn "Next steps:"
+warn " 1. Open http://localhost:3000 and verify your data is intact."
+warn " 2. Once satisfied, remove the legacy volume (IRREVERSIBLE):"
+warn " docker volume rm ${OLD_VOLUME}"
+warn " 3. Delete the dump file once you no longer need it as a backup:"
+warn " rm ${DUMP_FILE}"
+warn " Full migration log saved to: ${LOG_FILE}"
+printf "\n"
diff --git a/surfsense_web/content/docs/docker-installation.mdx b/surfsense_web/content/docs/docker-installation.mdx
index 4ca525d7c..486f79b6a 100644
--- a/surfsense_web/content/docs/docker-installation.mdx
+++ b/surfsense_web/content/docs/docker-installation.mdx
@@ -171,6 +171,16 @@ All services start automatically with `docker compose up -d`.
---
+## Migrating from the All-in-One Container
+
+
+If you were previously using `docker-compose.quickstart.yml` (the legacy all-in-one `surfsense` container), your data lives in a `surfsense-data` volume and requires a **one-time migration** before switching to the current setup. PostgreSQL has been upgraded from version 14 to 17, so a simple volume swap will not work.
+
+See the full step-by-step guide: [Migrate from the All-in-One Container](/docs/how-to/migrate-from-allinone).
+
+
+---
+
## Updating
**Option 1 — Watchtower (recommended):**
diff --git a/surfsense_web/content/docs/how-to/meta.json b/surfsense_web/content/docs/how-to/meta.json
index 9051b0585..97ea22261 100644
--- a/surfsense_web/content/docs/how-to/meta.json
+++ b/surfsense_web/content/docs/how-to/meta.json
@@ -1,5 +1,5 @@
{
"title": "How to",
- "pages": ["electric-sql", "realtime-collaboration"],
+ "pages": ["electric-sql", "realtime-collaboration", "migrate-from-allinone"],
"defaultOpen": false
}
diff --git a/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx b/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx
new file mode 100644
index 000000000..74d3d0d0b
--- /dev/null
+++ b/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx
@@ -0,0 +1,226 @@
+---
+title: Migrate from the All-in-One Container
+description: How to migrate your data from the legacy surfsense all-in-one Docker image to the current multi-container setup
+---
+
+The original SurfSense all-in-one image (`ghcr.io/modsetter/surfsense:latest`, run via `docker-compose.quickstart.yml`) stored all data — PostgreSQL, Redis, and configuration — in a single Docker volume named `surfsense-data`. The current setup uses separate named volumes and has upgraded PostgreSQL from **version 14 to 17**.
+
+Because PostgreSQL data files are not compatible between major versions, a **logical dump and restore** is required. This is a one-time migration.
+
+
+This guide only applies to users who ran the legacy `docker-compose.quickstart.yml` (the all-in-one `surfsense` container). If you were already using `docker/docker-compose.yml`, you do not need to migrate.
+
+
+
+If you try to run `install.sh` while the old `surfsense-data` volume exists, the script will detect it and stop with instructions to migrate first.
+
+
+---
+
+## Option A — Migration Script (recommended)
+
+A single script handles the entire process automatically: it dumps your PostgreSQL 14 data, recovers your `SECRET_KEY`, sets up the new stack, and restores into PostgreSQL 17.
+
+**Prerequisites:** Docker running, ~500 MB free disk space, internet access.
+
+```bash
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/migrate-database.sh | bash
+```
+
+Or download and inspect it first (recommended):
+
+```bash
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/migrate-database.sh -o migrate-database.sh
+# Review the script, then run:
+bash migrate-database.sh
+```
+
+### Options
+
+| Flag | Description | Default |
+|------|-------------|---------|
+| `--db-user USER` | Old PostgreSQL username | `surfsense` |
+| `--db-password PASS` | Old PostgreSQL password | `surfsense` |
+| `--db-name NAME` | Old PostgreSQL database | `surfsense` |
+| `--install-dir DIR` | New installation directory | `./surfsense` |
+| `--yes` / `-y` | Skip confirmation prompts | — |
+
+If you customised the database credentials in your old all-in-one container, pass them explicitly:
+
+```bash
+bash migrate-database.sh --db-user myuser --db-password mypass --db-name mydb
+```
+
+### What the script does
+
+1. Checks prerequisites and confirms the `surfsense-data` volume exists
+2. Starts a temporary `postgres:14` container against the old data
+3. Runs `pg_dump` and validates the dump file (size + header check)
+4. Recovers your `SECRET_KEY` from the old volume (or prompts if not found)
+5. Downloads the new compose files into `./surfsense/` (skips if already present)
+6. Writes the recovered `SECRET_KEY` into `./surfsense/.env`
+7. Starts the new `db` service (PostgreSQL 17), waits for readiness
+8. Restores the dump with `psql` and runs a smoke test
+9. Starts all remaining services
+
+The original `surfsense-data` volume is **never deleted** — you remove it manually after verifying the migration.
+
+### After the script completes
+
+1. Open [http://localhost:3000](http://localhost:3000) and confirm your data is intact.
+2. Once satisfied, remove the old volume:
+ ```bash
+ docker volume rm surfsense-data
+ ```
+3. Delete the backup dump once you no longer need it:
+ ```bash
+ rm ./surfsense_migration_backup.sql
+ ```
+
+---
+
+## Option B — Manual Steps
+
+Use these steps if the migration script doesn't work on your platform (e.g. Windows without WSL2), or if you want full control over each step.
+
+### Before you start
+
+- Confirm the old volume exists: `docker volume ls | grep surfsense-data`
+- Have ~500 MB free disk space for the SQL dump.
+
+### Step 1 — Start a temporary PostgreSQL 14 container
+
+```bash
+docker run -d --name surfsense-pg14-temp \
+ -v surfsense-data:/data \
+ -e PGDATA=/data/postgres \
+ -e POSTGRES_USER=surfsense \
+ -e POSTGRES_PASSWORD=surfsense \
+ -e POSTGRES_DB=surfsense \
+ postgres:14
+```
+
+Wait ~10 seconds, then confirm it is healthy:
+
+```bash
+docker exec surfsense-pg14-temp pg_isready -U surfsense
+```
+
+### Step 2 — Dump the database
+
+```bash
+docker exec -e PGPASSWORD=surfsense surfsense-pg14-temp \
+ pg_dump -U surfsense surfsense > surfsense_backup.sql
+```
+
+Verify the dump is valid:
+
+```bash
+wc -l surfsense_backup.sql
+grep "PostgreSQL database dump" surfsense_backup.sql
+```
+
+### Step 3 — Recover your SECRET\_KEY
+
+```bash
+docker run --rm -v surfsense-data:/data alpine cat /data/.secret_key
+```
+
+Copy the printed value for the next step.
+
+### Step 4 — Set up the new stack
+
+```bash
+git clone https://github.com/MODSetter/SurfSense.git
+cd SurfSense/docker
+cp .env.example .env
+```
+
+Set `SECRET_KEY` in `.env` to the value recovered above.
+
+### Step 5 — Start PostgreSQL 17
+
+```bash
+docker compose up -d db
+```
+
+Wait until ready:
+
+```bash
+docker compose exec db pg_isready -U surfsense
+```
+
+### Step 6 — Restore the database
+
+```bash
+docker compose exec -T db \
+ psql -U surfsense -d surfsense < surfsense_backup.sql
+```
+
+Harmless notices like `ERROR: role "surfsense" already exists` are expected.
+
+### Step 7 — Start all services
+
+```bash
+docker compose up -d
+```
+
+### Step 8 — Clean up
+
+After verifying everything works:
+
+```bash
+# Remove temporary PG14 container
+docker stop surfsense-pg14-temp && docker rm surfsense-pg14-temp
+
+# Remove old volume (irreversible — only after confirming migration success)
+docker volume rm surfsense-data
+```
+
+---
+
+## Troubleshooting
+
+### Script exits with "surfsense-postgres already exists"
+
+A previous migration attempt partially completed. Remove the incomplete volume and retry:
+
+```bash
+docker volume rm surfsense-postgres
+bash migrate-database.sh
+```
+
+### PostgreSQL 14 container fails to start
+
+Check the container logs:
+
+```bash
+docker logs surfsense-pg14-temp
+```
+
+If you see permission errors, the data directory may need ownership correction. Run:
+
+```bash
+docker exec surfsense-pg14-temp chown -R postgres:postgres /data/postgres
+```
+
+Then restart the container.
+
+### Empty or corrupt dump file
+
+If `surfsense_backup.sql` is smaller than expected, run the dump command again with verbose output:
+
+```bash
+docker exec -e PGPASSWORD=surfsense surfsense-pg14-temp \
+ pg_dump -U surfsense surfsense -v 2>&1 | head -40
+```
+
+### Cannot find `/data/.secret_key`
+
+If the all-in-one was launched with `SECRET_KEY` set explicitly as an environment variable, the key was never written to the volume. Set the same value manually in `docker/.env`. If it is lost, generate a new one:
+
+```bash
+openssl rand -base64 32
+```
+
+Note: a new key invalidates all existing browser sessions — users will need to log in again.
From 5030dec96b618b06d9ba0b0e038b2bdf64816991 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 16:18:13 +0530
Subject: [PATCH 26/57] chore: update banner in installation and database
migration scripts for improved visibility
---
docker/scripts/install.sh | 2 ++
docker/scripts/migrate-database.sh | 16 +++++++++++-----
2 files changed, 13 insertions(+), 5 deletions(-)
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 89008a7d9..5cae0a328 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -91,6 +91,7 @@ echo ""
printf '\033[1;37m'
cat << 'EOF'
+
.d8888b. .d888 .d8888b.
d88P Y88b d88P" d88P Y88b
Y88b. 888 Y88b.
@@ -100,6 +101,7 @@ Y88b. 888 Y88b.
Y88b d88P Y88b 888 888 888 Y88b d88P Y8b. 888 888 X88 Y8b.
"Y8888P" "Y88888 888 888 "Y8888P" "Y8888 888 888 88888P' "Y8888
+
EOF
printf " Your personal AI-powered search engine ${YELLOW}v${SURFSENSE_VERSION:-latest}${NC}\n"
printf "${CYAN}══════════════════════════════════════════════════════════════${NC}\n\n"
diff --git a/docker/scripts/migrate-database.sh b/docker/scripts/migrate-database.sh
index a3421c83c..c55fb002e 100755
--- a/docker/scripts/migrate-database.sh
+++ b/docker/scripts/migrate-database.sh
@@ -128,11 +128,17 @@ wait_for_pg() {
# ── Banner ────────────────────────────────────────────────────────────────────
printf "\n${BOLD}${CYAN}"
cat << 'EOF'
- ____ __ ____
-/ ___| _ _ _ __ / _|/ ___| ___ _ __ ___ ___
-\___ \| | | | '__| |_\___ \ / _ \ '_ \/ __|/ _ \
- ___) | |_| | | | _|___) | __/ | | \__ \ __/
-|____/ \__,_|_| |_| |____/ \___|_| |_|___/\___|
+
+
+ .d8888b. .d888 .d8888b.
+d88P Y88b d88P" d88P Y88b
+Y88b. 888 Y88b.
+ "Y888b. 888 888 888d888 888888 "Y888b. .d88b. 88888b. .d8888b .d88b.
+ "Y88b. 888 888 888P" 888 "Y88b. d8P Y8b 888 "88b 88K d8P Y8b
+ "888 888 888 888 888 "888 88888888 888 888 "Y8888b. 88888888
+Y88b d88P Y88b 888 888 888 Y88b d88P Y8b. 888 888 X88 Y8b.
+ "Y8888P" "Y88888 888 888 "Y8888P" "Y8888 888 888 88888P' "Y8888
+
EOF
printf "${NC}"
From 025643ffa284aa2f5a1ace0b7923bf1409ddae9a Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 17:24:02 +0530
Subject: [PATCH 27/57] feat: enhance installation and migration scripts to
automate legacy data migration process
- Updated install.sh to handle fresh installations and migrations from the legacy all-in-one container.
- Added checks for Docker and Docker Compose prerequisites.
- Implemented a wait-for-postgres function to ensure database readiness.
- Enhanced migration script to extract data and recover SECRET_KEY automatically.
- Updated documentation to reflect the new automated migration process.
---
docker/scripts/install.sh | 213 ++++++++++++--
docker/scripts/migrate-database.sh | 276 ++++++------------
.../docs/how-to/migrate-from-allinone.mdx | 197 +++++--------
3 files changed, 357 insertions(+), 329 deletions(-)
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 5cae0a328..c08b49729 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -2,50 +2,85 @@
# =============================================================================
# SurfSense — One-line Install Script
# Usage: curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
+#
+# Handles two cases automatically:
+# 1. Fresh install — no prior SurfSense data detected
+# 2. Migration from the legacy all-in-one container (surfsense-data volume)
+# Downloads and runs migrate-database.sh --yes, then restores the dump
+# into the new PostgreSQL 17 stack. The user runs one command for both.
+#
+# If you used custom database credentials in the old all-in-one container, run
+# migrate-database.sh manually first (with --db-user / --db-password flags),
+# then re-run this script:
+# curl -fsSL .../docker/scripts/migrate-database.sh | bash -s -- --db-user X --db-password Y
# =============================================================================
set -euo pipefail
REPO_RAW="https://raw.githubusercontent.com/MODSetter/SurfSense/main"
INSTALL_DIR="./surfsense"
+OLD_VOLUME="surfsense-data"
+DUMP_FILE="./surfsense_migration_backup.sql"
+KEY_FILE="./surfsense_migration_secret.key"
+MIGRATION_MODE=false
+
CYAN='\033[1;36m'
YELLOW='\033[1;33m'
+GREEN='\033[0;32m'
RED='\033[0;31m'
+BOLD='\033[1m'
NC='\033[0m'
-info() { printf "${CYAN}[SurfSense]${NC} %s\n" "$1"; }
-warn() { printf "${YELLOW}[SurfSense]${NC} %s\n" "$1"; }
-error() { printf "${RED}[SurfSense]${NC} %s\n" "$1" >&2; exit 1; }
+info() { printf "${CYAN}[SurfSense]${NC} %s\n" "$1"; }
+success() { printf "${GREEN}[SurfSense]${NC} %s\n" "$1"; }
+warn() { printf "${YELLOW}[SurfSense]${NC} %s\n" "$1"; }
+error() { printf "${RED}[SurfSense]${NC} ERROR: %s\n" "$1" >&2; exit 1; }
+step() { printf "\n${BOLD}${CYAN}── %s${NC}\n" "$1"; }
-# ── Pre-flight checks ───────────────────────────────────────────────────────
+# ── Pre-flight checks ────────────────────────────────────────────────────────
-command -v docker >/dev/null 2>&1 || error "Docker is not installed. Please install Docker first: https://docs.docker.com/get-docker/"
+step "Checking prerequisites"
-# Detect legacy all-in-one volume — must migrate before installing
-if docker volume ls --format '{{.Name}}' 2>/dev/null | grep -q '^surfsense-data$'; then
- printf "${RED}[SurfSense]${NC} Legacy volume 'surfsense-data' detected.\n" >&2
- printf "${YELLOW}[SurfSense]${NC} You appear to be upgrading from the old all-in-one SurfSense container.\n" >&2
- printf "${YELLOW}[SurfSense]${NC} The database has been upgraded from PostgreSQL 14 to 17 and your data\n" >&2
- printf "${YELLOW}[SurfSense]${NC} must be migrated before running the new stack.\n" >&2
- printf "\n" >&2
- printf "${YELLOW}[SurfSense]${NC} Run the migration script first:\n" >&2
- printf "${CYAN}[SurfSense]${NC} curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/migrate-database.sh | bash\n" >&2
- printf "\n" >&2
- printf "${YELLOW}[SurfSense]${NC} See the full guide at: https://surfsense.net/docs/how-to/migrate-from-allinone\n" >&2
- exit 1
-fi
+command -v docker >/dev/null 2>&1 \
+ || error "Docker is not installed. Install it at: https://docs.docker.com/get-docker/"
+success "Docker found."
+
+docker info >/dev/null 2>&1 \
+ || error "Docker daemon is not running. Please start Docker and try again."
+success "Docker daemon is running."
if docker compose version >/dev/null 2>&1; then
DC="docker compose"
elif command -v docker-compose >/dev/null 2>&1; then
DC="docker-compose"
else
- error "Docker Compose is not installed. Please install Docker Compose: https://docs.docker.com/compose/install/"
+ error "Docker Compose is not installed. Install it at: https://docs.docker.com/compose/install/"
fi
+success "Docker Compose found ($DC)."
+
+# ── Wait-for-postgres helper ─────────────────────────────────────────────────
+wait_for_pg() {
+ local db_user="$1"
+ local max_attempts=45
+ local attempt=0
+
+ info "Waiting for PostgreSQL to accept connections..."
+ until (cd "${INSTALL_DIR}" && ${DC} exec -T db pg_isready -U "${db_user}" -q 2>/dev/null); do
+ attempt=$((attempt + 1))
+ if [[ $attempt -ge $max_attempts ]]; then
+ error "PostgreSQL did not become ready after $((max_attempts * 2)) seconds.\nCheck logs: cd ${INSTALL_DIR} && ${DC} logs db"
+ fi
+ printf "."
+ sleep 2
+ done
+ printf "\n"
+ success "PostgreSQL is ready."
+}
# ── Download files ───────────────────────────────────────────────────────────
-info "Creating installation directory: ${INSTALL_DIR}"
+step "Downloading SurfSense files"
+info "Installation directory: ${INSTALL_DIR}"
mkdir -p "${INSTALL_DIR}/scripts"
FILES=(
@@ -53,39 +88,148 @@ FILES=(
"docker/.env.example:.env.example"
"docker/postgresql.conf:postgresql.conf"
"docker/scripts/init-electric-user.sh:scripts/init-electric-user.sh"
+ "docker/scripts/migrate-database.sh:scripts/migrate-database.sh"
)
for entry in "${FILES[@]}"; do
src="${entry%%:*}"
dest="${entry##*:}"
info "Downloading ${dest}..."
- curl -fsSL "${REPO_RAW}/${src}" -o "${INSTALL_DIR}/${dest}" || error "Failed to download ${src}"
+ curl -fsSL "${REPO_RAW}/${src}" -o "${INSTALL_DIR}/${dest}" \
+ || error "Failed to download ${dest}. Check your internet connection and try again."
done
chmod +x "${INSTALL_DIR}/scripts/init-electric-user.sh"
+chmod +x "${INSTALL_DIR}/scripts/migrate-database.sh"
+success "All files downloaded to ${INSTALL_DIR}/"
+
+# ── Legacy all-in-one detection ──────────────────────────────────────────────
+# Detect surfsense-data volume → migration mode.
+# If a dump already exists (from a previous partial run) skip extraction and
+# go straight to restore — this makes re-runs safe and idempotent.
+
+if docker volume ls --format '{{.Name}}' 2>/dev/null | grep -q "^${OLD_VOLUME}$"; then
+ MIGRATION_MODE=true
+
+ if [[ -f "${DUMP_FILE}" ]]; then
+ step "Migration mode — using existing dump (skipping extraction)"
+ info "Found existing dump: ${DUMP_FILE}"
+ info "Skipping data extraction — proceeding directly to restore."
+ info "To force a fresh extraction, remove the dump first: rm ${DUMP_FILE}"
+ else
+ step "Migration mode — legacy all-in-one container detected"
+ warn "Volume '${OLD_VOLUME}' found. Your data will be migrated automatically."
+ warn "PostgreSQL is being upgraded from version 14 to 17."
+ warn "Your original data will NOT be deleted."
+ printf "\n"
+ info "Running data extraction (migrate-database.sh --yes)..."
+ info "Full extraction log: ./surfsense-migration.log"
+ printf "\n"
+
+ # Run extraction non-interactively. On failure the error from
+ # migrate-database.sh is printed and install.sh exits here.
+ bash "${INSTALL_DIR}/scripts/migrate-database.sh" --yes \
+ || error "Data extraction failed. See ./surfsense-migration.log for details.\nYou can also run migrate-database.sh manually with custom flags:\n bash ${INSTALL_DIR}/scripts/migrate-database.sh --db-user X --db-password Y"
+
+ printf "\n"
+ success "Data extraction complete. Proceeding with installation and restore."
+ fi
+fi
# ── Set up .env ──────────────────────────────────────────────────────────────
+step "Configuring environment"
+
if [ ! -f "${INSTALL_DIR}/.env" ]; then
cp "${INSTALL_DIR}/.env.example" "${INSTALL_DIR}/.env"
- SECRET_KEY=$(openssl rand -base64 32 2>/dev/null || head -c 32 /dev/urandom | base64)
+ if $MIGRATION_MODE && [[ -f "${KEY_FILE}" ]]; then
+ SECRET_KEY=$(cat "${KEY_FILE}" | tr -d '[:space:]')
+ success "Using SECRET_KEY recovered from legacy container."
+ else
+ SECRET_KEY=$(openssl rand -base64 32 2>/dev/null \
+ || head -c 32 /dev/urandom | base64 | tr -d '\n')
+ success "Generated new random SECRET_KEY."
+ fi
+
if [[ "$OSTYPE" == "darwin"* ]]; then
sed -i '' "s|SECRET_KEY=replace_me_with_a_random_string|SECRET_KEY=${SECRET_KEY}|" "${INSTALL_DIR}/.env"
else
sed -i "s|SECRET_KEY=replace_me_with_a_random_string|SECRET_KEY=${SECRET_KEY}|" "${INSTALL_DIR}/.env"
fi
-
- info "Generated random SECRET_KEY in .env"
+ info "Created ${INSTALL_DIR}/.env"
else
- warn ".env already exists — skipping (your existing config is preserved)"
+ warn ".env already exists — keeping your existing configuration."
fi
# ── Start containers ─────────────────────────────────────────────────────────
-info "Starting SurfSense..."
-cd "${INSTALL_DIR}"
-${DC} up -d
+if $MIGRATION_MODE; then
+ # Read DB credentials from .env (fall back to defaults from docker-compose.yml)
+ DB_USER=$(grep '^DB_USER=' "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 | tr -d '"' | head -1)
+ DB_PASS=$(grep '^DB_PASSWORD=' "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 | tr -d '"' | head -1)
+ DB_NAME=$(grep '^DB_NAME=' "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 | tr -d '"' | head -1)
+ DB_USER="${DB_USER:-surfsense}"
+ DB_PASS="${DB_PASS:-surfsense}"
+ DB_NAME="${DB_NAME:-surfsense}"
+
+ step "Starting PostgreSQL 17"
+ (cd "${INSTALL_DIR}" && ${DC} up -d db)
+ wait_for_pg "${DB_USER}"
+
+ step "Restoring database"
+ info "Restoring dump into PostgreSQL 17 — this may take a while for large databases..."
+
+ RESTORE_ERR="/tmp/surfsense_restore_err.log"
+ (cd "${INSTALL_DIR}" && ${DC} exec -T \
+ -e PGPASSWORD="${DB_PASS}" \
+ db psql -U "${DB_USER}" -d "${DB_NAME}" \
+ 2>"${RESTORE_ERR}") < "${DUMP_FILE}" || true
+
+ # Surface real errors; ignore benign "already exists" noise from pg_dump headers
+ FATAL_ERRORS=$(grep -i "^ERROR:" "${RESTORE_ERR}" \
+ | grep -iv "already exists" \
+ | grep -iv "multiple primary keys" \
+ || true)
+
+ if [[ -n "${FATAL_ERRORS}" ]]; then
+ warn "Restore completed with errors (may be harmless pg_dump header noise):"
+ printf "%s\n" "${FATAL_ERRORS}"
+ warn "If SurfSense behaves incorrectly, inspect manually:"
+ warn " cd ${INSTALL_DIR} && ${DC} exec db psql -U ${DB_USER} -d ${DB_NAME} < ${DUMP_FILE}"
+ else
+ success "Database restored with no fatal errors."
+ fi
+
+ # Smoke test — verify tables are present
+ TABLE_COUNT=$(
+ cd "${INSTALL_DIR}" && ${DC} exec -T \
+ -e PGPASSWORD="${DB_PASS}" \
+ db psql -U "${DB_USER}" -d "${DB_NAME}" -t \
+ -c "SELECT count(*) FROM information_schema.tables WHERE table_schema = 'public';" \
+ 2>/dev/null | tr -d ' \n' || echo "0"
+ )
+ if [[ "${TABLE_COUNT}" == "0" || -z "${TABLE_COUNT}" ]]; then
+ warn "Smoke test: no tables found after restore."
+ warn "The restore may have failed silently. Check: cd ${INSTALL_DIR} && ${DC} logs db"
+ else
+ success "Smoke test passed: ${TABLE_COUNT} table(s) restored successfully."
+ fi
+
+ step "Starting all SurfSense services"
+ (cd "${INSTALL_DIR}" && ${DC} up -d)
+ success "All services started."
+
+ # Key file is no longer needed — SECRET_KEY is now in .env
+ rm -f "${KEY_FILE}"
+
+else
+ step "Starting SurfSense"
+ (cd "${INSTALL_DIR}" && ${DC} up -d)
+ success "All services started."
+fi
+
+# ── Done ─────────────────────────────────────────────────────────────────────
echo ""
printf '\033[1;37m'
@@ -105,6 +249,7 @@ Y88b d88P Y88b 888 888 888 Y88b d88P Y8b. 888 888 X88 Y8b.
EOF
printf " Your personal AI-powered search engine ${YELLOW}v${SURFSENSE_VERSION:-latest}${NC}\n"
printf "${CYAN}══════════════════════════════════════════════════════════════${NC}\n\n"
+
info " Frontend: http://localhost:3000"
info " Backend: http://localhost:8000"
info " API Docs: http://localhost:8000/docs"
@@ -114,5 +259,13 @@ info " Logs: cd ${INSTALL_DIR} && ${DC} logs -f"
info " Stop: cd ${INSTALL_DIR} && ${DC} down"
info " Update: cd ${INSTALL_DIR} && ${DC} pull && ${DC} up -d"
info ""
-warn " First startup may take sometime."
-warn " Edit .env to configure OAuth connectors, API keys, etc."
+
+if $MIGRATION_MODE; then
+ warn " Migration complete! Open frontend and verify your data."
+ warn " Once verified, clean up the legacy volume and dump file:"
+ warn " docker volume rm ${OLD_VOLUME}"
+ warn " rm ${DUMP_FILE}"
+else
+ warn " First startup may take a few minutes while images are pulled."
+ warn " Edit ${INSTALL_DIR}/.env to configure API keys, OAuth, etc."
+fi
diff --git a/docker/scripts/migrate-database.sh b/docker/scripts/migrate-database.sh
index c55fb002e..3e5c29cb8 100755
--- a/docker/scripts/migrate-database.sh
+++ b/docker/scripts/migrate-database.sh
@@ -2,9 +2,8 @@
# =============================================================================
# SurfSense — Database Migration Script
#
-# Migrates data from the legacy all-in-one surfsense-data volume (PostgreSQL 14)
-# to the new multi-container surfsense-postgres volume (PostgreSQL 17) using
-# a logical pg_dump / psql restore — safe across major PG versions.
+# Extracts data from the legacy all-in-one surfsense-data volume (PostgreSQL 14)
+# and saves it as a SQL dump + SECRET_KEY file ready for install.sh to restore.
#
# Usage:
# bash migrate-database.sh [options]
@@ -13,18 +12,30 @@
# --db-user USER Old PostgreSQL username (default: surfsense)
# --db-password PASS Old PostgreSQL password (default: surfsense)
# --db-name NAME Old PostgreSQL database (default: surfsense)
-# --install-dir DIR New installation directory (default: ./surfsense)
# --yes / -y Skip all confirmation prompts
# --help / -h Show this help
#
# Prerequisites:
-# - Docker and Docker Compose installed and running
+# - Docker installed and running
# - The legacy surfsense-data volume must exist
# - ~500 MB free disk space for the dump file
#
+# What this script does:
+# 1. Stops any container using surfsense-data (to prevent corruption)
+# 2. Starts a temporary PG14 container against the old volume
+# 3. Dumps the database to ./surfsense_migration_backup.sql
+# 4. Recovers the SECRET_KEY to ./surfsense_migration_secret.key
+# 5. Exits — leaving installation to install.sh
+#
# What this script does NOT do:
-# - Delete the original surfsense-data volume (you must do this manually
-# after verifying the migration succeeded)
+# - Delete the original surfsense-data volume (do this manually after verifying)
+# - Install the new SurfSense stack (install.sh handles that automatically)
+#
+# Note:
+# install.sh downloads and runs this script automatically when it detects the
+# legacy surfsense-data volume. You only need to run this script manually if
+# you have custom database credentials (--db-user / --db-password / --db-name)
+# or if the automatic migration inside install.sh fails at the extraction step.
# =============================================================================
set -euo pipefail
@@ -49,18 +60,16 @@ error() { printf "${RED}[SurfSense]${NC} ERROR: %s\n" "$1" >&2; exit 1; }
step() { printf "\n${BOLD}${CYAN}── Step %s: %s${NC}\n" "$1" "$2"; }
# ── Constants ─────────────────────────────────────────────────────────────────
-REPO_RAW="https://raw.githubusercontent.com/MODSetter/SurfSense/main"
OLD_VOLUME="surfsense-data"
-NEW_PG_VOLUME="surfsense-postgres"
TEMP_CONTAINER="surfsense-pg14-migration"
DUMP_FILE="./surfsense_migration_backup.sql"
-PG14_IMAGE="postgres:14"
+KEY_FILE="./surfsense_migration_secret.key"
+PG14_IMAGE="pgvector/pgvector:pg14"
# ── Defaults ──────────────────────────────────────────────────────────────────
OLD_DB_USER="surfsense"
OLD_DB_PASSWORD="surfsense"
OLD_DB_NAME="surfsense"
-INSTALL_DIR="./surfsense"
AUTO_YES=false
# ── Argument parsing ──────────────────────────────────────────────────────────
@@ -69,7 +78,6 @@ while [[ $# -gt 0 ]]; do
--db-user) OLD_DB_USER="$2"; shift 2 ;;
--db-password) OLD_DB_PASSWORD="$2"; shift 2 ;;
--db-name) OLD_DB_NAME="$2"; shift 2 ;;
- --install-dir) INSTALL_DIR="$2"; shift 2 ;;
--yes|-y) AUTO_YES=true; shift ;;
--help|-h)
grep '^#' "$0" | grep -v '^#!/' | sed 's/^# \{0,1\}//'
@@ -96,7 +104,7 @@ cleanup() {
docker rm "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
fi
if [[ $exit_code -ne 0 ]]; then
- printf "\n${RED}[SurfSense]${NC} Migration failed (exit code %s).\n" "${exit_code}" >&2
+ printf "\n${RED}[SurfSense]${NC} Migration data extraction failed (exit code %s).\n" "${exit_code}" >&2
printf "${RED}[SurfSense]${NC} Full log: %s\n" "${LOG_FILE}" >&2
printf "${YELLOW}[SurfSense]${NC} Your original data in '${OLD_VOLUME}' is untouched.\n" >&2
fi
@@ -104,7 +112,6 @@ cleanup() {
trap cleanup EXIT
# ── Wait-for-postgres helper ──────────────────────────────────────────────────
-# $1 = container name/id $2 = db user $3 = label for messages
wait_for_pg() {
local container="$1"
local user="$2"
@@ -116,7 +123,7 @@ wait_for_pg() {
until docker exec "${container}" pg_isready -U "${user}" -q 2>/dev/null; do
attempt=$((attempt + 1))
if [[ $attempt -ge $max_attempts ]]; then
- error "${label} did not become ready after $((max_attempts * 2)) seconds.\nCheck logs: docker logs ${container}"
+ error "${label} did not become ready after $((max_attempts * 2)) seconds. Check: docker logs ${container}"
fi
printf "."
sleep 2
@@ -142,7 +149,7 @@ Y88b d88P Y88b 888 888 888 Y88b d88P Y8b. 888 888 X88 Y8b.
EOF
printf "${NC}"
-printf "${CYAN} Database Migration: All-in-One → Multi-Container (PG 14 → 17)${NC}\n"
+printf "${CYAN} Data Extraction: All-in-One (PG14) → Migration Dump${NC}\n"
printf "${CYAN}══════════════════════════════════════════════════════════════${NC}\n\n"
# ── Step 0: Pre-flight checks ─────────────────────────────────────────────────
@@ -156,39 +163,31 @@ command -v docker >/dev/null 2>&1 \
docker info >/dev/null 2>&1 \
|| error "Docker daemon is not running. Please start Docker and try again."
-# Docker Compose
-if docker compose version >/dev/null 2>&1; then
- DC="docker compose"
-elif command -v docker-compose >/dev/null 2>&1; then
- DC="docker-compose"
-else
- error "Docker Compose not found. Install it at: https://docs.docker.com/compose/install/"
-fi
-info "Docker Compose: ${DC}"
-
-# OS detection (needed for sed -i portability)
-case "$(uname -s)" in
- Darwin*) OS_TYPE="darwin" ;;
- Linux*) OS_TYPE="linux" ;;
- CYGWIN*|MINGW*|MSYS*) OS_TYPE="windows" ;;
- *) OS_TYPE="unknown" ;;
-esac
-info "OS: ${OS_TYPE}"
-
# Old volume must exist
docker volume ls --format '{{.Name}}' | grep -q "^${OLD_VOLUME}$" \
|| error "Legacy volume '${OLD_VOLUME}' not found.\n Are you sure you ran the old all-in-one SurfSense container?"
success "Found legacy volume: ${OLD_VOLUME}"
-# New PG volume must NOT already exist
-if docker volume ls --format '{{.Name}}' | grep -q "^${NEW_PG_VOLUME}$"; then
- warn "Volume '${NEW_PG_VOLUME}' already exists."
- warn "If migration already succeeded, you do not need to run this script again."
- warn "If a previous run failed partway, remove the partial volume first:"
- warn " docker volume rm ${NEW_PG_VOLUME}"
- error "Aborting to avoid overwriting existing data."
+# Detect and stop any container currently using the old volume
+# (mounting a live PG volume into a second container causes the new container's
+# entrypoint to chown the data files, breaking the running container's access)
+OLD_CONTAINER=$(docker ps --filter "volume=${OLD_VOLUME}" --format '{{.Names}}' | head -n1 || true)
+if [[ -n "${OLD_CONTAINER}" ]]; then
+ warn "Container '${OLD_CONTAINER}' is running and using the '${OLD_VOLUME}' volume."
+ warn "It must be stopped before migration to prevent data file corruption."
+ confirm "Stop '${OLD_CONTAINER}' now and proceed with data extraction?"
+ docker stop "${OLD_CONTAINER}" >/dev/null 2>&1 \
+ || error "Failed to stop '${OLD_CONTAINER}'. Try: docker stop ${OLD_CONTAINER}"
+ success "Container '${OLD_CONTAINER}' stopped."
+fi
+
+# Bail out if a dump already exists — don't overwrite a previous successful run
+if [[ -f "${DUMP_FILE}" ]]; then
+ warn "Dump file '${DUMP_FILE}' already exists."
+ warn "If a previous extraction succeeded, just run install.sh now."
+ warn "To re-extract, remove the file first: rm ${DUMP_FILE}"
+ error "Aborting to avoid overwriting an existing dump."
fi
-success "Target volume '${NEW_PG_VOLUME}' does not yet exist — safe to proceed."
# Clean up any stale temp container from a previous failed run
if docker ps -a --format '{{.Names}}' | grep -q "^${TEMP_CONTAINER}$"; then
@@ -212,14 +211,13 @@ fi
success "All pre-flight checks passed."
# ── Confirmation prompt ───────────────────────────────────────────────────────
-printf "\n${BOLD}Migration plan:${NC}\n"
+printf "\n${BOLD}Extraction plan:${NC}\n"
printf " Source volume : ${YELLOW}%s${NC} (PG14 data at /data/postgres)\n" "${OLD_VOLUME}"
-printf " Target volume : ${YELLOW}%s${NC} (PG17 multi-container stack)\n" "${NEW_PG_VOLUME}"
printf " Old credentials : user=${YELLOW}%s${NC} db=${YELLOW}%s${NC}\n" "${OLD_DB_USER}" "${OLD_DB_NAME}"
-printf " Install dir : ${YELLOW}%s${NC}\n" "${INSTALL_DIR}"
printf " Dump saved to : ${YELLOW}%s${NC}\n" "${DUMP_FILE}"
+printf " SECRET_KEY to : ${YELLOW}%s${NC}\n" "${KEY_FILE}"
printf " Log file : ${YELLOW}%s${NC}\n\n" "${LOG_FILE}"
-confirm "Start migration? (Your original data will not be deleted.)"
+confirm "Start data extraction? (Your original data will not be deleted or modified.)"
# ── Step 1: Start temporary PostgreSQL 14 container ──────────────────────────
step "1" "Starting temporary PostgreSQL 14 container"
@@ -228,6 +226,21 @@ info "Pulling ${PG14_IMAGE}..."
docker pull "${PG14_IMAGE}" >/dev/null 2>&1 \
|| warn "Could not pull ${PG14_IMAGE} — using cached image if available."
+# Detect the UID that owns the existing data files and run the temp container
+# as that user. This prevents the official postgres image entrypoint from
+# running as root and doing `chown -R postgres /data/postgres`, which would
+# re-own the files to UID 999 and break any subsequent access by the original
+# container's postgres process (which may run as a different UID).
+DATA_UID=$(docker run --rm -v "${OLD_VOLUME}:/data" alpine \
+ stat -c '%u' /data/postgres 2>/dev/null || echo "")
+if [[ -z "${DATA_UID}" || "${DATA_UID}" == "0" ]]; then
+ warn "Could not detect data directory UID — falling back to default (may chown files)."
+ USER_FLAG=""
+else
+ info "Data directory owned by UID ${DATA_UID} — starting temp container as that user."
+ USER_FLAG="--user ${DATA_UID}"
+fi
+
docker run -d \
--name "${TEMP_CONTAINER}" \
-v "${OLD_VOLUME}:/data" \
@@ -235,6 +248,7 @@ docker run -d \
-e POSTGRES_USER="${OLD_DB_USER}" \
-e POSTGRES_PASSWORD="${OLD_DB_PASSWORD}" \
-e POSTGRES_DB="${OLD_DB_NAME}" \
+ ${USER_FLAG} \
"${PG14_IMAGE}" >/dev/null
success "Temporary container '${TEMP_CONTAINER}' started."
@@ -245,13 +259,12 @@ step "2" "Dumping PostgreSQL 14 database"
info "Running pg_dump — this may take a while for large databases..."
-# Run pg_dump and capture stderr separately to detect real failures
if ! docker exec \
-e PGPASSWORD="${OLD_DB_PASSWORD}" \
"${TEMP_CONTAINER}" \
pg_dump -U "${OLD_DB_USER}" --no-password "${OLD_DB_NAME}" \
- > "${DUMP_FILE}" 2>/tmp/pg_dump_err; then
- cat /tmp/pg_dump_err >&2
+ > "${DUMP_FILE}" 2>/tmp/surfsense_pgdump_err; then
+ cat /tmp/surfsense_pgdump_err >&2
error "pg_dump failed. See above for details."
fi
@@ -271,7 +284,7 @@ DUMP_LINES=$(wc -l < "${DUMP_FILE}" | tr -d ' ')
DUMP_SIZE=$(du -sh "${DUMP_FILE}" 2>/dev/null | cut -f1)
success "Dump complete: ${DUMP_SIZE} (${DUMP_LINES} lines) → ${DUMP_FILE}"
-# Stop the temp container now (trap will also handle it on unexpected exit)
+# Stop the temp container (trap will also handle it on unexpected exit)
info "Stopping temporary PostgreSQL 14 container..."
docker stop "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
docker rm "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
@@ -292,148 +305,49 @@ if docker run --rm -v "${OLD_VOLUME}:/data" alpine \
success "Recovered SECRET_KEY from '${OLD_VOLUME}'."
else
warn "No SECRET_KEY file found at /data/.secret_key in '${OLD_VOLUME}'."
- warn "This means the all-in-one was launched with SECRET_KEY set as an explicit environment variable."
- printf "${YELLOW}[SurfSense]${NC} Enter the SECRET_KEY from your old container's environment\n"
- printf "${YELLOW}[SurfSense]${NC} (press Enter to generate a new one — existing sessions will be invalidated): "
- read -r RECOVERED_KEY
- if [[ -z "${RECOVERED_KEY}" ]]; then
+ warn "This means the all-in-one container was launched with SECRET_KEY set as an explicit env var."
+ if $AUTO_YES; then
+ # Non-interactive (called from install.sh) — auto-generate rather than hanging on read
RECOVERED_KEY=$(openssl rand -base64 32 2>/dev/null \
|| head -c 32 /dev/urandom | base64 | tr -d '\n')
- warn "Generated a new SECRET_KEY. All active browser sessions will be logged out after migration."
+ warn "Non-interactive mode: generated a new SECRET_KEY automatically."
+ warn "All active browser sessions will be logged out after migration."
+ warn "To restore your original key, update SECRET_KEY in ./surfsense/.env afterwards."
+ else
+ printf "${YELLOW}[SurfSense]${NC} Enter the SECRET_KEY from your old container's environment\n"
+ printf "${YELLOW}[SurfSense]${NC} (press Enter to generate a new one — existing sessions will be invalidated): "
+ read -r RECOVERED_KEY
+ if [[ -z "${RECOVERED_KEY}" ]]; then
+ RECOVERED_KEY=$(openssl rand -base64 32 2>/dev/null \
+ || head -c 32 /dev/urandom | base64 | tr -d '\n')
+ warn "Generated a new SECRET_KEY. All active browser sessions will be logged out after migration."
+ fi
fi
fi
-# ── Step 4: Set up the new installation ───────────────────────────────────────
-step "4" "Setting up new SurfSense installation"
-
-if [[ -f "${INSTALL_DIR}/docker-compose.yml" ]]; then
- warn "Directory '${INSTALL_DIR}' already exists — skipping file download."
-else
- info "Creating installation directory: ${INSTALL_DIR}"
- mkdir -p "${INSTALL_DIR}/scripts"
-
- FILES=(
- "docker/docker-compose.yml:docker-compose.yml"
- "docker/.env.example:.env.example"
- "docker/postgresql.conf:postgresql.conf"
- "docker/scripts/init-electric-user.sh:scripts/init-electric-user.sh"
- )
-
- for entry in "${FILES[@]}"; do
- src="${entry%%:*}"
- dest="${entry##*:}"
- info "Downloading ${dest}..."
- curl -fsSL "${REPO_RAW}/${src}" -o "${INSTALL_DIR}/${dest}" \
- || error "Failed to download ${src}. Check your internet connection."
- done
-
- chmod +x "${INSTALL_DIR}/scripts/init-electric-user.sh"
- success "Compose files downloaded to ${INSTALL_DIR}/"
-fi
-
-# Create .env from example if it does not exist
-if [[ ! -f "${INSTALL_DIR}/.env" ]]; then
- cp "${INSTALL_DIR}/.env.example" "${INSTALL_DIR}/.env"
- info "Created ${INSTALL_DIR}/.env from .env.example"
-fi
-
-# Write the recovered SECRET_KEY into .env (handles both placeholder and pre-set values)
-if [[ "${OS_TYPE}" == "darwin" ]]; then
- sed -i '' "s|SECRET_KEY=replace_me_with_a_random_string|SECRET_KEY=${RECOVERED_KEY}|" "${INSTALL_DIR}/.env"
- sed -i '' "s|^SECRET_KEY=.*|SECRET_KEY=${RECOVERED_KEY}|" "${INSTALL_DIR}/.env"
-else
- sed -i "s|SECRET_KEY=replace_me_with_a_random_string|SECRET_KEY=${RECOVERED_KEY}|" "${INSTALL_DIR}/.env"
- sed -i "s|^SECRET_KEY=.*|SECRET_KEY=${RECOVERED_KEY}|" "${INSTALL_DIR}/.env"
-fi
-success "SECRET_KEY written to ${INSTALL_DIR}/.env"
-
-# ── Step 5: Start PostgreSQL 17 (new stack) ───────────────────────────────────
-step "5" "Starting PostgreSQL 17"
-
-(cd "${INSTALL_DIR}" && ${DC} up -d db)
-
-# Resolve the running container name for direct docker exec calls
-PG17_CONTAINER=$(cd "${INSTALL_DIR}" && ${DC} ps -q db 2>/dev/null | head -n1 || true)
-if [[ -z "${PG17_CONTAINER}" ]]; then
- # Fallback to the predictable compose container name
- PG17_CONTAINER="surfsense-db-1"
-fi
-info "PostgreSQL 17 container: ${PG17_CONTAINER}"
-
-wait_for_pg "${PG17_CONTAINER}" "${OLD_DB_USER}" "PostgreSQL 17"
-
-# ── Step 6: Restore the dump ──────────────────────────────────────────────────
-step "6" "Restoring database into PostgreSQL 17"
-
-info "Running psql restore — this may take a while for large databases..."
-
-RESTORE_ERR_FILE="/tmp/surfsense_restore_err.log"
-
-docker exec -i \
- -e PGPASSWORD="${OLD_DB_PASSWORD}" \
- "${PG17_CONTAINER}" \
- psql -U "${OLD_DB_USER}" -d "${OLD_DB_NAME}" \
- < "${DUMP_FILE}" \
- 2>"${RESTORE_ERR_FILE}" || true # psql exits non-zero on warnings; check below
-
-# Surface any real (non-benign) errors
-FATAL_ERRORS=$(grep -i "^ERROR:" "${RESTORE_ERR_FILE}" \
- | grep -iv "already exists" \
- | grep -iv "multiple primary keys" \
- || true)
-
-if [[ -n "${FATAL_ERRORS}" ]]; then
- warn "Restore completed with the following errors:"
- printf "%s\n" "${FATAL_ERRORS}"
- confirm "These may be harmless (e.g. pre-existing system objects). Continue?"
-else
- success "Restore completed with no fatal errors."
-fi
-
-# Smoke test — verify tables exist in the restored database
-TABLE_COUNT=$(
- docker exec \
- -e PGPASSWORD="${OLD_DB_PASSWORD}" \
- "${PG17_CONTAINER}" \
- psql -U "${OLD_DB_USER}" -d "${OLD_DB_NAME}" -t \
- -c "SELECT count(*) FROM information_schema.tables WHERE table_schema = 'public';" \
- 2>/dev/null | tr -d ' \n' || echo "0"
-)
-
-if [[ "${TABLE_COUNT}" == "0" || -z "${TABLE_COUNT}" ]]; then
- warn "Smoke test: no tables found in the restored database."
- warn "The restore may have failed silently. Inspect the dump and restore manually:"
- warn " docker exec -i ${PG17_CONTAINER} psql -U ${OLD_DB_USER} -d ${OLD_DB_NAME} < ${DUMP_FILE}"
- confirm "Continue starting the rest of the stack anyway?"
-else
- success "Smoke test passed: ${TABLE_COUNT} table(s) found in the restored database."
-fi
-
-# ── Step 7: Start all remaining services ──────────────────────────────────────
-step "7" "Starting all SurfSense services"
-
-(cd "${INSTALL_DIR}" && ${DC} up -d)
-success "All services started."
+# Save SECRET_KEY to a file for install.sh to pick up
+printf '%s' "${RECOVERED_KEY}" > "${KEY_FILE}"
+success "SECRET_KEY saved to ${KEY_FILE}"
# ── Done ──────────────────────────────────────────────────────────────────────
printf "\n${GREEN}${BOLD}"
printf "══════════════════════════════════════════════════════════════\n"
-printf " Migration complete!\n"
+printf " Data extraction complete!\n"
printf "══════════════════════════════════════════════════════════════\n"
printf "${NC}\n"
-success " Frontend : http://localhost:3000"
-success " Backend : http://localhost:8000"
-success " API Docs : http://localhost:8000/docs"
+success "Dump file : ${DUMP_FILE} (${DUMP_SIZE})"
+success "Secret key: ${KEY_FILE}"
printf "\n"
-info " Config : ${INSTALL_DIR}/.env"
-info " Logs : cd ${INSTALL_DIR} && ${DC} logs -f"
+info "Next step — run install.sh from this same directory:"
printf "\n"
-warn "Next steps:"
-warn " 1. Open http://localhost:3000 and verify your data is intact."
-warn " 2. Once satisfied, remove the legacy volume (IRREVERSIBLE):"
-warn " docker volume rm ${OLD_VOLUME}"
-warn " 3. Delete the dump file once you no longer need it as a backup:"
-warn " rm ${DUMP_FILE}"
-warn " Full migration log saved to: ${LOG_FILE}"
+printf "${CYAN} curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash${NC}\n"
+printf "\n"
+info "install.sh will detect the dump, restore your data into PostgreSQL 17,"
+info "and start the full SurfSense stack automatically."
+printf "\n"
+warn "Keep both files until you have verified the migration:"
+warn " ${DUMP_FILE}"
+warn " ${KEY_FILE}"
+warn "Full log saved to: ${LOG_FILE}"
printf "\n"
diff --git a/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx b/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx
index 74d3d0d0b..36233145d 100644
--- a/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx
+++ b/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx
@@ -11,82 +11,79 @@ Because PostgreSQL data files are not compatible between major versions, a **log
This guide only applies to users who ran the legacy `docker-compose.quickstart.yml` (the all-in-one `surfsense` container). If you were already using `docker/docker-compose.yml`, you do not need to migrate.
-
-If you try to run `install.sh` while the old `surfsense-data` volume exists, the script will detect it and stop with instructions to migrate first.
-
+---
+
+## Option A — One command (recommended)
+
+`install.sh` detects the legacy `surfsense-data` volume and handles the full migration automatically — no separate migration script needed. Just run the same install command you would use for a fresh install:
+
+```bash
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
+```
+
+**What it does automatically:**
+
+1. Downloads all SurfSense files (including `migrate-database.sh`) into `./surfsense/`
+2. Detects the `surfsense-data` volume and enters migration mode
+3. Stops the old all-in-one container if it is still running
+4. Starts a temporary PostgreSQL 14 container and dumps your database
+5. Recovers your `SECRET_KEY` from the old volume
+6. Starts PostgreSQL 17, restores the dump, runs a smoke test
+7. Starts all services
+
+Your original `surfsense-data` volume is **never deleted** — you remove it manually after verifying.
+
+### After it completes
+
+1. Open [http://localhost:3000](http://localhost:3000) and confirm your data is intact.
+2. Once satisfied, remove the old volume (irreversible):
+ ```bash
+ docker volume rm surfsense-data
+ ```
+3. Delete the dump file once you no longer need it as a backup:
+ ```bash
+ rm ./surfsense_migration_backup.sql
+ ```
+
+### If the migration fails mid-way
+
+The dump file is saved to `./surfsense_migration_backup.sql` as a checkpoint. Simply re-run `install.sh` — it will detect the existing dump and skip straight to the restore step without re-extracting.
---
-## Option A — Migration Script (recommended)
+## Option B — Manual migration script (custom credentials)
-A single script handles the entire process automatically: it dumps your PostgreSQL 14 data, recovers your `SECRET_KEY`, sets up the new stack, and restores into PostgreSQL 17.
-
-**Prerequisites:** Docker running, ~500 MB free disk space, internet access.
+If you launched the old all-in-one container with custom database credentials (`POSTGRES_USER`, `POSTGRES_PASSWORD`, `POSTGRES_DB` environment variables), the automatic path will use wrong credentials. Run `migrate-database.sh` manually first:
```bash
-curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/migrate-database.sh | bash
+# 1. Extract data with your custom credentials
+bash ./surfsense/scripts/migrate-database.sh --db-user myuser --db-password mypass --db-name mydb
+
+# 2. Install and restore (detects the dump automatically)
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
-Or download and inspect it first (recommended):
+Or download and run if you haven't run `install.sh` yet:
```bash
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/migrate-database.sh -o migrate-database.sh
-# Review the script, then run:
-bash migrate-database.sh
+bash migrate-database.sh --db-user myuser --db-password mypass --db-name mydb
```
-### Options
+### Migration script options
| Flag | Description | Default |
|------|-------------|---------|
| `--db-user USER` | Old PostgreSQL username | `surfsense` |
| `--db-password PASS` | Old PostgreSQL password | `surfsense` |
| `--db-name NAME` | Old PostgreSQL database | `surfsense` |
-| `--install-dir DIR` | New installation directory | `./surfsense` |
-| `--yes` / `-y` | Skip confirmation prompts | — |
-
-If you customised the database credentials in your old all-in-one container, pass them explicitly:
-
-```bash
-bash migrate-database.sh --db-user myuser --db-password mypass --db-name mydb
-```
-
-### What the script does
-
-1. Checks prerequisites and confirms the `surfsense-data` volume exists
-2. Starts a temporary `postgres:14` container against the old data
-3. Runs `pg_dump` and validates the dump file (size + header check)
-4. Recovers your `SECRET_KEY` from the old volume (or prompts if not found)
-5. Downloads the new compose files into `./surfsense/` (skips if already present)
-6. Writes the recovered `SECRET_KEY` into `./surfsense/.env`
-7. Starts the new `db` service (PostgreSQL 17), waits for readiness
-8. Restores the dump with `psql` and runs a smoke test
-9. Starts all remaining services
-
-The original `surfsense-data` volume is **never deleted** — you remove it manually after verifying the migration.
-
-### After the script completes
-
-1. Open [http://localhost:3000](http://localhost:3000) and confirm your data is intact.
-2. Once satisfied, remove the old volume:
- ```bash
- docker volume rm surfsense-data
- ```
-3. Delete the backup dump once you no longer need it:
- ```bash
- rm ./surfsense_migration_backup.sql
- ```
+| `--yes` / `-y` | Skip confirmation prompts (used automatically by `install.sh`) | — |
---
-## Option B — Manual Steps
+## Option C — Manual steps
-Use these steps if the migration script doesn't work on your platform (e.g. Windows without WSL2), or if you want full control over each step.
-
-### Before you start
-
-- Confirm the old volume exists: `docker volume ls | grep surfsense-data`
-- Have ~500 MB free disk space for the SQL dump.
+For users who prefer full control or whose platform doesn't support bash scripts (e.g. Windows without WSL2).
### Step 1 — Start a temporary PostgreSQL 14 container
@@ -97,7 +94,7 @@ docker run -d --name surfsense-pg14-temp \
-e POSTGRES_USER=surfsense \
-e POSTGRES_PASSWORD=surfsense \
-e POSTGRES_DB=surfsense \
- postgres:14
+ pgvector/pgvector:pg14
```
Wait ~10 seconds, then confirm it is healthy:
@@ -113,114 +110,78 @@ docker exec -e PGPASSWORD=surfsense surfsense-pg14-temp \
pg_dump -U surfsense surfsense > surfsense_backup.sql
```
-Verify the dump is valid:
-
-```bash
-wc -l surfsense_backup.sql
-grep "PostgreSQL database dump" surfsense_backup.sql
-```
-
### Step 3 — Recover your SECRET\_KEY
```bash
docker run --rm -v surfsense-data:/data alpine cat /data/.secret_key
```
-Copy the printed value for the next step.
-
### Step 4 — Set up the new stack
```bash
-git clone https://github.com/MODSetter/SurfSense.git
-cd SurfSense/docker
-cp .env.example .env
+mkdir -p surfsense/scripts
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/docker-compose.yml -o surfsense/docker-compose.yml
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/.env.example -o surfsense/.env.example
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/postgresql.conf -o surfsense/postgresql.conf
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/init-electric-user.sh -o surfsense/scripts/init-electric-user.sh
+chmod +x surfsense/scripts/init-electric-user.sh
+cp surfsense/.env.example surfsense/.env
```
-Set `SECRET_KEY` in `.env` to the value recovered above.
+Set `SECRET_KEY` in `surfsense/.env` to the value from Step 3.
-### Step 5 — Start PostgreSQL 17
+### Step 5 — Start PostgreSQL 17 and restore
```bash
+cd surfsense
docker compose up -d db
+docker compose exec db pg_isready -U surfsense # wait until ready
+docker compose exec -T db psql -U surfsense -d surfsense < ../surfsense_backup.sql
```
-Wait until ready:
-
-```bash
-docker compose exec db pg_isready -U surfsense
-```
-
-### Step 6 — Restore the database
-
-```bash
-docker compose exec -T db \
- psql -U surfsense -d surfsense < surfsense_backup.sql
-```
-
-Harmless notices like `ERROR: role "surfsense" already exists` are expected.
-
-### Step 7 — Start all services
+### Step 6 — Start all services
```bash
docker compose up -d
```
-### Step 8 — Clean up
-
-After verifying everything works:
+### Step 7 — Clean up
```bash
-# Remove temporary PG14 container
docker stop surfsense-pg14-temp && docker rm surfsense-pg14-temp
-
-# Remove old volume (irreversible — only after confirming migration success)
-docker volume rm surfsense-data
+docker volume rm surfsense-data # only after verifying migration succeeded
```
---
## Troubleshooting
-### Script exits with "surfsense-postgres already exists"
+### `install.sh` runs normally with a blank database (no migration happened)
-A previous migration attempt partially completed. Remove the incomplete volume and retry:
+The legacy volume was not detected. Confirm it exists:
```bash
-docker volume rm surfsense-postgres
-bash migrate-database.sh
+docker volume ls | grep surfsense-data
```
-### PostgreSQL 14 container fails to start
-
-Check the container logs:
+If it doesn't appear, the old container may have used a different volume name. Check with:
```bash
-docker logs surfsense-pg14-temp
+docker volume ls | grep -i surfsense
```
-If you see permission errors, the data directory may need ownership correction. Run:
+### Extraction fails with permission errors
-```bash
-docker exec surfsense-pg14-temp chown -R postgres:postgres /data/postgres
-```
-
-Then restart the container.
-
-### Empty or corrupt dump file
-
-If `surfsense_backup.sql` is smaller than expected, run the dump command again with verbose output:
-
-```bash
-docker exec -e PGPASSWORD=surfsense surfsense-pg14-temp \
- pg_dump -U surfsense surfsense -v 2>&1 | head -40
-```
+The script detects the UID of the data files and runs the temporary PG14 container as that user. If you see permission errors in `./surfsense-migration.log`, run `migrate-database.sh` manually and check the log for details.
### Cannot find `/data/.secret_key`
-If the all-in-one was launched with `SECRET_KEY` set explicitly as an environment variable, the key was never written to the volume. Set the same value manually in `docker/.env`. If it is lost, generate a new one:
+The all-in-one entrypoint always writes the key to `/data/.secret_key` unless you explicitly set `SECRET_KEY=` as an environment variable. If the key is missing, the migration script auto-generates a new one (with a warning). You can update it manually in `./surfsense/.env` afterwards. Note that a new key invalidates all existing browser sessions — users will need to log in again.
+
+### Restore errors after re-running `install.sh`
+
+If `surfsense-postgres` volume already exists from a previous partial run, remove it before retrying:
```bash
-openssl rand -base64 32
+docker volume rm surfsense-postgres
```
-
-Note: a new key invalidates all existing browser sessions — users will need to log in again.
From 22bd38f346dd0b296d0e39755afe9d27ce1129a3 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 17:49:10 +0530
Subject: [PATCH 28/57] fix: ensure DB credentials are set correctly in
migration mode by adding fallback to defaults
---
docker/scripts/install.sh | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index c08b49729..15c20561d 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -166,9 +166,9 @@ fi
if $MIGRATION_MODE; then
# Read DB credentials from .env (fall back to defaults from docker-compose.yml)
- DB_USER=$(grep '^DB_USER=' "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 | tr -d '"' | head -1)
- DB_PASS=$(grep '^DB_PASSWORD=' "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 | tr -d '"' | head -1)
- DB_NAME=$(grep '^DB_NAME=' "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 | tr -d '"' | head -1)
+ DB_USER=$(grep '^DB_USER=' "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 | tr -d '"' | head -1 || true)
+ DB_PASS=$(grep '^DB_PASSWORD=' "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 | tr -d '"' | head -1 || true)
+ DB_NAME=$(grep '^DB_NAME=' "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 | tr -d '"' | head -1 || true)
DB_USER="${DB_USER:-surfsense}"
DB_PASS="${DB_PASS:-surfsense}"
DB_NAME="${DB_NAME:-surfsense}"
From bb83ee5b8537f08853294840eae86305a8185f3b Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 17:56:18 +0530
Subject: [PATCH 29/57] refactor: update version display in installation script
and streamline database migration script
---
docker/scripts/install.sh | 7 ++++++-
docker/scripts/migrate-database.sh | 20 +-------------------
2 files changed, 7 insertions(+), 20 deletions(-)
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 15c20561d..062754970 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -247,7 +247,12 @@ Y88b d88P Y88b 888 888 888 Y88b d88P Y8b. 888 888 X88 Y8b.
EOF
-printf " Your personal AI-powered search engine ${YELLOW}v${SURFSENSE_VERSION:-latest}${NC}\n"
+if [[ "${SURFSENSE_VERSION:-latest}" == "latest" ]]; then
+ _version_display="latest"
+else
+ _version_display="v${SURFSENSE_VERSION}"
+fi
+printf " Your personal AI-powered search engine ${YELLOW}[%s]${NC}\n" "${_version_display}"
printf "${CYAN}══════════════════════════════════════════════════════════════${NC}\n\n"
info " Frontend: http://localhost:3000"
diff --git a/docker/scripts/migrate-database.sh b/docker/scripts/migrate-database.sh
index 3e5c29cb8..8a7c9eae6 100755
--- a/docker/scripts/migrate-database.sh
+++ b/docker/scripts/migrate-database.sh
@@ -132,25 +132,7 @@ wait_for_pg() {
success "${label} is ready."
}
-# ── Banner ────────────────────────────────────────────────────────────────────
-printf "\n${BOLD}${CYAN}"
-cat << 'EOF'
-
-
- .d8888b. .d888 .d8888b.
-d88P Y88b d88P" d88P Y88b
-Y88b. 888 Y88b.
- "Y888b. 888 888 888d888 888888 "Y888b. .d88b. 88888b. .d8888b .d88b.
- "Y88b. 888 888 888P" 888 "Y88b. d8P Y8b 888 "88b 88K d8P Y8b
- "888 888 888 888 888 "888 88888888 888 888 "Y8888b. 88888888
-Y88b d88P Y88b 888 888 888 Y88b d88P Y8b. 888 888 X88 Y8b.
- "Y8888P" "Y88888 888 888 "Y8888P" "Y8888 888 888 88888P' "Y8888
-
-
-EOF
-printf "${NC}"
-printf "${CYAN} Data Extraction: All-in-One (PG14) → Migration Dump${NC}\n"
-printf "${CYAN}══════════════════════════════════════════════════════════════${NC}\n\n"
+step "Migrating data from legacy database (PostgreSQL 14 → 17)"
# ── Step 0: Pre-flight checks ─────────────────────────────────────────────────
step "0" "Pre-flight checks"
From 4f59e2c1f9d050e0118e5556dd62a5d5ed2cdd43 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 18:13:11 +0530
Subject: [PATCH 30/57] chore: remove obsolete .env.example file to declutter
configuration and streamline project setup
---
.env.example | 41 -----------------------------------------
1 file changed, 41 deletions(-)
delete mode 100644 .env.example
diff --git a/.env.example b/.env.example
deleted file mode 100644
index 894a989cc..000000000
--- a/.env.example
+++ /dev/null
@@ -1,41 +0,0 @@
-# Docker Specific Env's Only - Can skip if needed
-
-# Celery Config
-REDIS_PORT=6379
-FLOWER_PORT=5555
-
-# Frontend Configuration
-FRONTEND_PORT=3000
-NEXT_PUBLIC_FASTAPI_BACKEND_URL=http://localhost:8000 (Default: http://localhost:8000)
-NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=LOCAL or GOOGLE (Default: LOCAL)
-NEXT_PUBLIC_ETL_SERVICE=UNSTRUCTURED or LLAMACLOUD or DOCLING (Default: DOCLING)
-# Backend Configuration
-BACKEND_PORT=8000
-# Auth type for backend login flow (Default: LOCAL)
-# Set to GOOGLE if using Google OAuth
-AUTH_TYPE=LOCAL
-# Frontend URL used by backend for CORS allowed origins and OAuth redirects
-# Must match the URL your browser uses to access the frontend
-NEXT_FRONTEND_URL=http://localhost:3000
-
-# Database Configuration
-POSTGRES_USER=postgres
-POSTGRES_PASSWORD=postgres
-POSTGRES_DB=surfsense
-POSTGRES_PORT=5432
-
-# Electric-SQL Configuration
-ELECTRIC_PORT=5133
-# PostgreSQL host for Electric connection
-# - 'db' for Docker PostgreSQL (service name in docker-compose)
-# - 'host.docker.internal' for local PostgreSQL (recommended when Electric runs in Docker)
-# Note: host.docker.internal works on Docker Desktop (Mac/Windows) and can be enabled on Linux
-POSTGRES_HOST=db
-ELECTRIC_DB_USER=electric
-ELECTRIC_DB_PASSWORD=electric_password
-NEXT_PUBLIC_ELECTRIC_URL=http://localhost:5133
-
-# pgAdmin Configuration
-PGADMIN_PORT=5050
-PGADMIN_DEFAULT_EMAIL=admin@surfsense.com
-PGADMIN_DEFAULT_PASSWORD=surfsense
From 35025bf0c5a082014d417bec2c6267f84f05250c Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 18:39:24 +0530
Subject: [PATCH 31/57] chore: update Docker images to new repository and
enable Watchtower for automatic updates
---
README.md | 2 +-
docker/docker-compose.yml | 18 +++++++++++++-----
2 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/README.md b/README.md
index c839e9c99..ba4eb7560 100644
--- a/README.md
+++ b/README.md
@@ -90,7 +90,7 @@ For Docker Compose and other deployment options, see the [Docker Installation do
**Update (recommended — Watchtower):**
```bash
-docker run --rm -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --run-once --label-filter "com.docker.compose.project=surfsense"
+docker run --rm -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --run-once --label-enable
```
**Update (manual):**
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index e1352ea32..c71529441 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -43,7 +43,7 @@ services:
retries: 5
backend:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
ports:
- "${BACKEND_PORT:-8000}:8000"
volumes:
@@ -68,6 +68,8 @@ services:
# DAYTONA_API_URL: ${DAYTONA_API_URL:-https://app.daytona.io/api}
# DAYTONA_TARGET: ${DAYTONA_TARGET:-us}
SERVICE_ROLE: api
+ labels:
+ - "com.centurylinklabs.watchtower.enable=true"
depends_on:
db:
condition: service_healthy
@@ -76,7 +78,7 @@ services:
restart: unless-stopped
celery_worker:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
volumes:
- shared_temp:/shared_tmp
env_file:
@@ -98,10 +100,12 @@ services:
condition: service_healthy
backend:
condition: service_started
+ labels:
+ - "com.centurylinklabs.watchtower.enable=true"
restart: unless-stopped
celery_beat:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
env_file:
- .env
environment:
@@ -118,10 +122,12 @@ services:
condition: service_healthy
celery_worker:
condition: service_started
+ labels:
+ - "com.centurylinklabs.watchtower.enable=true"
restart: unless-stopped
# flower:
- # image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ # image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
# ports:
# - "${FLOWER_PORT:-5555}:5555"
# env_file:
@@ -155,7 +161,7 @@ services:
retries: 5
frontend:
- image: ghcr.io/modsetter/surfsense-web:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-web:${SURFSENSE_VERSION:-latest}
ports:
- "${FRONTEND_PORT:-3000}:3000"
environment:
@@ -165,6 +171,8 @@ services:
NEXT_PUBLIC_ETL_SERVICE: ${ETL_SERVICE:-DOCLING}
NEXT_PUBLIC_DEPLOYMENT_MODE: ${DEPLOYMENT_MODE:-self-hosted}
NEXT_PUBLIC_ELECTRIC_AUTH_MODE: ${NEXT_PUBLIC_ELECTRIC_AUTH_MODE:-insecure}
+ labels:
+ - "com.centurylinklabs.watchtower.enable=true"
depends_on:
- backend
- electric
From 71a995da234bcf4a6576ec39306713121c5bb606 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 18:51:13 +0530
Subject: [PATCH 32/57] docs: update installation instructions across multiple
languages to include new Docker command and manual update process
---
README.es.md | 21 ++++++++++++++++-----
README.hi.md | 21 ++++++++++++++++-----
README.md | 2 +-
README.pt-BR.md | 21 ++++++++++++++++-----
README.zh-CN.md | 21 ++++++++++++++++-----
5 files changed, 65 insertions(+), 21 deletions(-)
diff --git a/README.es.md b/README.es.md
index 5472ad069..5aeb7c310 100644
--- a/README.es.md
+++ b/README.es.md
@@ -81,15 +81,26 @@ https://github.com/user-attachments/assets/a0a16566-6967-4374-ac51-9b3e07fbecd7
Ejecuta SurfSense en tu propia infraestructura para control total de datos y privacidad.
-**Inicio Rápido (Docker en un solo comando):**
-
```bash
-docker run -d -p 3000:3000 -p 8000:8000 -p 5133:5133 -v surfsense-data:/data --name surfsense --restart unless-stopped ghcr.io/modsetter/surfsense:latest
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
-Después de iniciar, abre [http://localhost:3000](http://localhost:3000) en tu navegador.
+Para Docker Compose y otras opciones de despliegue, consulta la [documentación de instalación Docker](https://www.surfsense.com/docs/docker-installation).
-Para Docker Compose, instalación manual y otras opciones de despliegue, consulta la [documentación](https://www.surfsense.com/docs/).
+**Actualización (recomendada — Watchtower):**
+
+```bash
+docker run -d --name watchtower -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --label-enable --interval 86400
+```
+
+**Actualización (manual):**
+
+```bash
+cd surfsense # o SurfSense/docker si usaste la Opción 2
+docker compose pull && docker compose up -d
+```
+
+Para instalación manual y otras opciones de despliegue, consulta la [documentación](https://www.surfsense.com/docs/).
### Cómo Colaborar en Tiempo Real (Beta)
diff --git a/README.hi.md b/README.hi.md
index d4388b616..cfa7a5b9f 100644
--- a/README.hi.md
+++ b/README.hi.md
@@ -81,15 +81,26 @@ https://github.com/user-attachments/assets/a0a16566-6967-4374-ac51-9b3e07fbecd7
पूर्ण डेटा नियंत्रण और गोपनीयता के लिए SurfSense को अपने स्वयं के बुनियादी ढांचे पर चलाएं।
-**त्वरित शुरुआत (Docker एक कमांड में):**
-
```bash
-docker run -d -p 3000:3000 -p 8000:8000 -p 5133:5133 -v surfsense-data:/data --name surfsense --restart unless-stopped ghcr.io/modsetter/surfsense:latest
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
-शुरू करने के बाद, अपने ब्राउज़र में [http://localhost:3000](http://localhost:3000) खोलें।
+Docker Compose और अन्य डिप्लॉयमेंट विकल्पों के लिए, [Docker इंस्टॉलेशन डॉक्स](https://www.surfsense.com/docs/docker-installation) देखें।
-Docker Compose, मैनुअल इंस्टॉलेशन और अन्य डिप्लॉयमेंट विकल्पों के लिए, [डॉक्स](https://www.surfsense.com/docs/) देखें।
+**अपडेट (अनुशंसित — Watchtower):**
+
+```bash
+docker run -d --name watchtower -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --label-enable --interval 86400
+```
+
+**अपडेट (मैनुअल):**
+
+```bash
+cd surfsense # या SurfSense/docker यदि आपने Option 2 का उपयोग किया
+docker compose pull && docker compose up -d
+```
+
+मैनुअल इंस्टॉलेशन और अन्य डिप्लॉयमेंट विकल्पों के लिए, [डॉक्स](https://www.surfsense.com/docs/) देखें।
### रीयल-टाइम सहयोग कैसे करें (बीटा)
diff --git a/README.md b/README.md
index ba4eb7560..fbf9f3347 100644
--- a/README.md
+++ b/README.md
@@ -90,7 +90,7 @@ For Docker Compose and other deployment options, see the [Docker Installation do
**Update (recommended — Watchtower):**
```bash
-docker run --rm -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --run-once --label-enable
+docker run -d --name watchtower -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --label-enable --interval 86400
```
**Update (manual):**
diff --git a/README.pt-BR.md b/README.pt-BR.md
index 351cdb85b..edf25e5f3 100644
--- a/README.pt-BR.md
+++ b/README.pt-BR.md
@@ -81,15 +81,26 @@ https://github.com/user-attachments/assets/a0a16566-6967-4374-ac51-9b3e07fbecd7
Execute o SurfSense na sua própria infraestrutura para controle total de dados e privacidade.
-**Início Rápido (Docker em um único comando):**
-
```bash
-docker run -d -p 3000:3000 -p 8000:8000 -p 5133:5133 -v surfsense-data:/data --name surfsense --restart unless-stopped ghcr.io/modsetter/surfsense:latest
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
-Após iniciar, abra [http://localhost:3000](http://localhost:3000) no seu navegador.
+Para Docker Compose e outras opções de implantação, consulte a [documentação de instalação Docker](https://www.surfsense.com/docs/docker-installation).
-Para Docker Compose, instalação manual e outras opções de implantação, consulte a [documentação](https://www.surfsense.com/docs/).
+**Atualização (recomendada — Watchtower):**
+
+```bash
+docker run -d --name watchtower -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --label-enable --interval 86400
+```
+
+**Atualização (manual):**
+
+```bash
+cd surfsense # ou SurfSense/docker se você usou a Opção 2
+docker compose pull && docker compose up -d
+```
+
+Para instalação manual e outras opções de implantação, consulte a [documentação](https://www.surfsense.com/docs/).
### Como Colaborar em Tempo Real (Beta)
diff --git a/README.zh-CN.md b/README.zh-CN.md
index 49ef64619..080d5f554 100644
--- a/README.zh-CN.md
+++ b/README.zh-CN.md
@@ -81,15 +81,26 @@ https://github.com/user-attachments/assets/a0a16566-6967-4374-ac51-9b3e07fbecd7
在您自己的基础设施上运行 SurfSense,实现完全的数据控制和隐私保护。
-**快速开始(Docker 一行命令):**
-
```bash
-docker run -d -p 3000:3000 -p 8000:8000 -p 5133:5133 -v surfsense-data:/data --name surfsense --restart unless-stopped ghcr.io/modsetter/surfsense:latest
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
-启动后,在浏览器中打开 [http://localhost:3000](http://localhost:3000)。
+如需 Docker Compose 及其他部署方式,请查看 [Docker 安装文档](https://www.surfsense.com/docs/docker-installation)。
-如需 Docker Compose、手动安装及其他部署方式,请查看[文档](https://www.surfsense.com/docs/)。
+**更新(推荐 — Watchtower):**
+
+```bash
+docker run -d --name watchtower -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --label-enable --interval 86400
+```
+
+**更新(手动):**
+
+```bash
+cd surfsense # 若使用了方式二,则为 SurfSense/docker
+docker compose pull && docker compose up -d
+```
+
+如需手动安装及其他部署方式,请查看[文档](https://www.surfsense.com/docs/)。
### 如何实时协作(Beta)
From 90f18fac38741cd8dc2c0e8f6ebfe8e028192eaa Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 18:54:44 +0530
Subject: [PATCH 33/57] chore: update Docker image references to new repository
for backend and frontend services
---
docker/docker-compose.yml | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index c71529441..b4108ef65 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -43,7 +43,7 @@ services:
retries: 5
backend:
- image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
ports:
- "${BACKEND_PORT:-8000}:8000"
volumes:
@@ -78,7 +78,7 @@ services:
restart: unless-stopped
celery_worker:
- image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
volumes:
- shared_temp:/shared_tmp
env_file:
@@ -105,7 +105,7 @@ services:
restart: unless-stopped
celery_beat:
- image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
env_file:
- .env
environment:
@@ -127,7 +127,7 @@ services:
restart: unless-stopped
# flower:
- # image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ # image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
# ports:
# - "${FLOWER_PORT:-5555}:5555"
# env_file:
@@ -161,7 +161,7 @@ services:
retries: 5
frontend:
- image: ghcr.io/anishsarkar22/surfsense-web:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-web:${SURFSENSE_VERSION:-latest}
ports:
- "${FRONTEND_PORT:-3000}:3000"
environment:
From 2e8e85a4ef460ec2cfb1896621bd83ef78d1a913 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 19:11:57 +0530
Subject: [PATCH 34/57] docs: enhance Docker installation and migration
documentation with updated steps and options for managing updates
---
.../content/docs/docker-installation.mdx | 60 +++++++++++--------
.../docs/how-to/migrate-from-allinone.mdx | 24 +++++---
2 files changed, 51 insertions(+), 33 deletions(-)
diff --git a/surfsense_web/content/docs/docker-installation.mdx b/surfsense_web/content/docs/docker-installation.mdx
index 486f79b6a..e6aec07ea 100644
--- a/surfsense_web/content/docs/docker-installation.mdx
+++ b/surfsense_web/content/docs/docker-installation.mdx
@@ -36,6 +36,41 @@ After starting, access SurfSense at:
---
+## Updating
+
+**Option 1 — Watchtower daemon (recommended, auto-updates every 24 h):**
+
+```bash
+docker run -d --name watchtower \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ nickfedor/watchtower \
+ --label-enable \
+ --interval 86400
+```
+
+**Option 2 — Watchtower one-time update:**
+
+```bash
+docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \
+ nickfedor/watchtower --run-once \
+ --label-filter "com.docker.compose.project=surfsense"
+```
+
+
+Use `nickfedor/watchtower`. The original `containrrr/watchtower` is no longer maintained and may fail with newer Docker versions.
+
+
+**Option 3 — Manual:**
+
+```bash
+cd surfsense # or SurfSense/docker if you cloned manually
+docker compose pull && docker compose up -d
+```
+
+Database migrations are applied automatically on every startup.
+
+---
+
## Configuration
All configuration lives in a single `docker/.env` file (or `surfsense/.env` if you used the install script). Copy `.env.example` to `.env` and edit the values you need.
@@ -181,31 +216,6 @@ See the full step-by-step guide: [Migrate from the All-in-One Container](/docs/h
---
-## Updating
-
-**Option 1 — Watchtower (recommended):**
-
-```bash
-docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \
- nickfedor/watchtower --run-once \
- --label-filter "com.docker.compose.project=surfsense"
-```
-
-
-Use `nickfedor/watchtower`. The original `containrrr/watchtower` is no longer maintained and may fail with newer Docker versions.
-
-
-**Option 2 — Manual:**
-
-```bash
-cd surfsense # or SurfSense/docker if you cloned manually
-docker compose pull && docker compose up -d
-```
-
-Database migrations are applied automatically on every startup.
-
----
-
## Useful Commands
```bash
diff --git a/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx b/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx
index 36233145d..3de0b043d 100644
--- a/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx
+++ b/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx
@@ -85,7 +85,15 @@ bash migrate-database.sh --db-user myuser --db-password mypass --db-name mydb
For users who prefer full control or whose platform doesn't support bash scripts (e.g. Windows without WSL2).
-### Step 1 — Start a temporary PostgreSQL 14 container
+### Step 1 — Stop the old all-in-one container
+
+Before mounting the `surfsense-data` volume into a new container, stop the existing one to prevent two PostgreSQL processes from writing to the same data directory:
+
+```bash
+docker stop surfsense 2>/dev/null || true
+```
+
+### Step 2 — Start a temporary PostgreSQL 14 container
```bash
docker run -d --name surfsense-pg14-temp \
@@ -103,20 +111,20 @@ Wait ~10 seconds, then confirm it is healthy:
docker exec surfsense-pg14-temp pg_isready -U surfsense
```
-### Step 2 — Dump the database
+### Step 3 — Dump the database
```bash
docker exec -e PGPASSWORD=surfsense surfsense-pg14-temp \
pg_dump -U surfsense surfsense > surfsense_backup.sql
```
-### Step 3 — Recover your SECRET\_KEY
+### Step 4 — Recover your SECRET\_KEY
```bash
docker run --rm -v surfsense-data:/data alpine cat /data/.secret_key
```
-### Step 4 — Set up the new stack
+### Step 5 — Set up the new stack
```bash
mkdir -p surfsense/scripts
@@ -128,9 +136,9 @@ chmod +x surfsense/scripts/init-electric-user.sh
cp surfsense/.env.example surfsense/.env
```
-Set `SECRET_KEY` in `surfsense/.env` to the value from Step 3.
+Set `SECRET_KEY` in `surfsense/.env` to the value from Step 4.
-### Step 5 — Start PostgreSQL 17 and restore
+### Step 6 — Start PostgreSQL 17 and restore
```bash
cd surfsense
@@ -139,13 +147,13 @@ docker compose exec db pg_isready -U surfsense # wait until ready
docker compose exec -T db psql -U surfsense -d surfsense < ../surfsense_backup.sql
```
-### Step 6 — Start all services
+### Step 7 — Start all services
```bash
docker compose up -d
```
-### Step 7 — Clean up
+### Step 8 — Clean up
```bash
docker stop surfsense-pg14-temp && docker rm surfsense-pg14-temp
From 7d99dbc0d5249acf63fc2c09c4cd7e0fafc92053 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 19:24:55 +0530
Subject: [PATCH 35/57] docs: update connector documentation to specify adding
credentials to .env file instead of Docker run command
---
.../content/docs/connectors/airtable.mdx | 20 ++++++++---------
.../content/docs/connectors/clickup.mdx | 20 ++++++++---------
.../content/docs/connectors/confluence.mdx | 20 ++++++++---------
.../content/docs/connectors/discord.mdx | 22 +++++++++----------
.../content/docs/connectors/gmail.mdx | 20 ++++++++---------
.../docs/connectors/google-calendar.mdx | 20 ++++++++---------
.../content/docs/connectors/google-drive.mdx | 20 ++++++++---------
.../content/docs/connectors/jira.mdx | 20 ++++++++---------
.../content/docs/connectors/linear.mdx | 20 ++++++++---------
.../docs/connectors/microsoft-teams.mdx | 20 ++++++++---------
.../content/docs/connectors/notion.mdx | 20 ++++++++---------
.../content/docs/connectors/slack.mdx | 20 ++++++++---------
12 files changed, 121 insertions(+), 121 deletions(-)
diff --git a/surfsense_web/content/docs/connectors/airtable.mdx b/surfsense_web/content/docs/connectors/airtable.mdx
index db7fe3ac0..71148335c 100644
--- a/surfsense_web/content/docs/connectors/airtable.mdx
+++ b/surfsense_web/content/docs/connectors/airtable.mdx
@@ -88,16 +88,16 @@ After saving, you'll find your OAuth credentials on the integration page:
## Running SurfSense with Airtable Connector
-Add the Airtable environment variables to your Docker run command:
+Add the Airtable credentials to your `.env` file (created during [Docker installation](/docs/docker-installation)):
```bash
-docker run -d -p 3000:3000 -p 8000:8000 \
- -v surfsense-data:/data \
- # Airtable Connector
- -e AIRTABLE_CLIENT_ID=your_airtable_client_id \
- -e AIRTABLE_CLIENT_SECRET=your_airtable_client_secret \
- -e AIRTABLE_REDIRECT_URI=http://localhost:8000/api/v1/auth/airtable/connector/callback \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+AIRTABLE_CLIENT_ID=your_airtable_client_id
+AIRTABLE_CLIENT_SECRET=your_airtable_client_secret
+AIRTABLE_REDIRECT_URI=http://localhost:8000/api/v1/auth/airtable/connector/callback
+```
+
+Then restart the services:
+
+```bash
+docker compose up -d
```
\ No newline at end of file
diff --git a/surfsense_web/content/docs/connectors/clickup.mdx b/surfsense_web/content/docs/connectors/clickup.mdx
index 960b88370..768bca859 100644
--- a/surfsense_web/content/docs/connectors/clickup.mdx
+++ b/surfsense_web/content/docs/connectors/clickup.mdx
@@ -44,16 +44,16 @@ After creating the app, you'll see your credentials:
## Running SurfSense with ClickUp Connector
-Add the ClickUp environment variables to your Docker run command:
+Add the ClickUp credentials to your `.env` file (created during [Docker installation](/docs/docker-installation)):
```bash
-docker run -d -p 3000:3000 -p 8000:8000 \
- -v surfsense-data:/data \
- # ClickUp Connector
- -e CLICKUP_CLIENT_ID=your_clickup_client_id \
- -e CLICKUP_CLIENT_SECRET=your_clickup_client_secret \
- -e CLICKUP_REDIRECT_URI=http://localhost:8000/api/v1/auth/clickup/connector/callback \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+CLICKUP_CLIENT_ID=your_clickup_client_id
+CLICKUP_CLIENT_SECRET=your_clickup_client_secret
+CLICKUP_REDIRECT_URI=http://localhost:8000/api/v1/auth/clickup/connector/callback
+```
+
+Then restart the services:
+
+```bash
+docker compose up -d
```
\ No newline at end of file
diff --git a/surfsense_web/content/docs/connectors/confluence.mdx b/surfsense_web/content/docs/connectors/confluence.mdx
index 57116cf29..3ee3394a4 100644
--- a/surfsense_web/content/docs/connectors/confluence.mdx
+++ b/surfsense_web/content/docs/connectors/confluence.mdx
@@ -97,16 +97,16 @@ Select the **"Granular scopes"** tab and enable:
## Running SurfSense with Confluence Connector
-Add the Atlassian environment variables to your Docker run command:
+Add the Atlassian credentials to your `.env` file (created during [Docker installation](/docs/docker-installation)):
```bash
-docker run -d -p 3000:3000 -p 8000:8000 \
- -v surfsense-data:/data \
- # Confluence Connector
- -e ATLASSIAN_CLIENT_ID=your_atlassian_client_id \
- -e ATLASSIAN_CLIENT_SECRET=your_atlassian_client_secret \
- -e CONFLUENCE_REDIRECT_URI=http://localhost:8000/api/v1/auth/confluence/connector/callback \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+ATLASSIAN_CLIENT_ID=your_atlassian_client_id
+ATLASSIAN_CLIENT_SECRET=your_atlassian_client_secret
+CONFLUENCE_REDIRECT_URI=http://localhost:8000/api/v1/auth/confluence/connector/callback
+```
+
+Then restart the services:
+
+```bash
+docker compose up -d
```
diff --git a/surfsense_web/content/docs/connectors/discord.mdx b/surfsense_web/content/docs/connectors/discord.mdx
index a90bcfe87..05825e0ea 100644
--- a/surfsense_web/content/docs/connectors/discord.mdx
+++ b/surfsense_web/content/docs/connectors/discord.mdx
@@ -64,17 +64,17 @@ You'll also see your **Application ID** and **Public Key** on this page.
## Running SurfSense with Discord Connector
-Add the Discord environment variables to your Docker run command:
+Add the Discord credentials to your `.env` file (created during [Docker installation](/docs/docker-installation)):
```bash
-docker run -d -p 3000:3000 -p 8000:8000 \
- -v surfsense-data:/data \
- # Discord Connector
- -e DISCORD_CLIENT_ID=your_discord_client_id \
- -e DISCORD_CLIENT_SECRET=your_discord_client_secret \
- -e DISCORD_REDIRECT_URI=http://localhost:8000/api/v1/auth/discord/connector/callback \
- -e DISCORD_BOT_TOKEN=http://localhost:8000/api/v1/auth/discord/connector/callback \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+DISCORD_CLIENT_ID=your_discord_client_id
+DISCORD_CLIENT_SECRET=your_discord_client_secret
+DISCORD_REDIRECT_URI=http://localhost:8000/api/v1/auth/discord/connector/callback
+DISCORD_BOT_TOKEN=your_discord_bot_token
+```
+
+Then restart the services:
+
+```bash
+docker compose up -d
```
diff --git a/surfsense_web/content/docs/connectors/gmail.mdx b/surfsense_web/content/docs/connectors/gmail.mdx
index 2b514f89e..1b3f81efe 100644
--- a/surfsense_web/content/docs/connectors/gmail.mdx
+++ b/surfsense_web/content/docs/connectors/gmail.mdx
@@ -70,16 +70,16 @@ This guide walks you through setting up a Google OAuth 2.0 integration for SurfS
## Running SurfSense with Gmail Connector
-Add the Google OAuth environment variables to your Docker run command:
+Add the Google OAuth credentials to your `.env` file (created during [Docker installation](/docs/docker-installation)):
```bash
-docker run -d -p 3000:3000 -p 8000:8000 \
- -v surfsense-data:/data \
- # Gmail Connector
- -e GOOGLE_OAUTH_CLIENT_ID=your_google_client_id \
- -e GOOGLE_OAUTH_CLIENT_SECRET=your_google_client_secret \
- -e GOOGLE_GMAIL_REDIRECT_URI=http://localhost:8000/api/v1/auth/google/gmail/connector/callback \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+GOOGLE_OAUTH_CLIENT_ID=your_google_client_id
+GOOGLE_OAUTH_CLIENT_SECRET=your_google_client_secret
+GOOGLE_GMAIL_REDIRECT_URI=http://localhost:8000/api/v1/auth/google/gmail/connector/callback
+```
+
+Then restart the services:
+
+```bash
+docker compose up -d
```
diff --git a/surfsense_web/content/docs/connectors/google-calendar.mdx b/surfsense_web/content/docs/connectors/google-calendar.mdx
index 7919d0361..481b05444 100644
--- a/surfsense_web/content/docs/connectors/google-calendar.mdx
+++ b/surfsense_web/content/docs/connectors/google-calendar.mdx
@@ -69,16 +69,16 @@ This guide walks you through setting up a Google OAuth 2.0 integration for SurfS
## Running SurfSense with Google Calendar Connector
-Add the Google OAuth environment variables to your Docker run command:
+Add the Google OAuth credentials to your `.env` file (created during [Docker installation](/docs/docker-installation)):
```bash
-docker run -d -p 3000:3000 -p 8000:8000 \
- -v surfsense-data:/data \
- # Google Calendar Connector
- -e GOOGLE_OAUTH_CLIENT_ID=your_google_client_id \
- -e GOOGLE_OAUTH_CLIENT_SECRET=your_google_client_secret \
- -e GOOGLE_CALENDAR_REDIRECT_URI=http://localhost:8000/api/v1/auth/google/calendar/connector/callback \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+GOOGLE_OAUTH_CLIENT_ID=your_google_client_id
+GOOGLE_OAUTH_CLIENT_SECRET=your_google_client_secret
+GOOGLE_CALENDAR_REDIRECT_URI=http://localhost:8000/api/v1/auth/google/calendar/connector/callback
+```
+
+Then restart the services:
+
+```bash
+docker compose up -d
```
diff --git a/surfsense_web/content/docs/connectors/google-drive.mdx b/surfsense_web/content/docs/connectors/google-drive.mdx
index 402b25566..238100860 100644
--- a/surfsense_web/content/docs/connectors/google-drive.mdx
+++ b/surfsense_web/content/docs/connectors/google-drive.mdx
@@ -70,16 +70,16 @@ This guide walks you through setting up a Google OAuth 2.0 integration for SurfS
## Running SurfSense with Google Drive Connector
-Add the Google OAuth environment variables to your Docker run command:
+Add the Google OAuth credentials to your `.env` file (created during [Docker installation](/docs/docker-installation)):
```bash
-docker run -d -p 3000:3000 -p 8000:8000 \
- -v surfsense-data:/data \
- # Google Drive Connector
- -e GOOGLE_OAUTH_CLIENT_ID=your_google_client_id \
- -e GOOGLE_OAUTH_CLIENT_SECRET=your_google_client_secret \
- -e GOOGLE_DRIVE_REDIRECT_URI=http://localhost:8000/api/v1/auth/google/drive/connector/callback \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+GOOGLE_OAUTH_CLIENT_ID=your_google_client_id
+GOOGLE_OAUTH_CLIENT_SECRET=your_google_client_secret
+GOOGLE_DRIVE_REDIRECT_URI=http://localhost:8000/api/v1/auth/google/drive/connector/callback
+```
+
+Then restart the services:
+
+```bash
+docker compose up -d
```
diff --git a/surfsense_web/content/docs/connectors/jira.mdx b/surfsense_web/content/docs/connectors/jira.mdx
index c6b5a26e1..5bddbab8d 100644
--- a/surfsense_web/content/docs/connectors/jira.mdx
+++ b/surfsense_web/content/docs/connectors/jira.mdx
@@ -84,16 +84,16 @@ This guide walks you through setting up an Atlassian OAuth 2.0 (3LO) integration
## Running SurfSense with Jira Connector
-Add the Atlassian environment variables to your Docker run command:
+Add the Atlassian credentials to your `.env` file (created during [Docker installation](/docs/docker-installation)):
```bash
-docker run -d -p 3000:3000 -p 8000:8000 \
- -v surfsense-data:/data \
- # Jira Connector
- -e ATLASSIAN_CLIENT_ID=your_atlassian_client_id \
- -e ATLASSIAN_CLIENT_SECRET=your_atlassian_client_secret \
- -e JIRA_REDIRECT_URI=http://localhost:8000/api/v1/auth/jira/connector/callback \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+ATLASSIAN_CLIENT_ID=your_atlassian_client_id
+ATLASSIAN_CLIENT_SECRET=your_atlassian_client_secret
+JIRA_REDIRECT_URI=http://localhost:8000/api/v1/auth/jira/connector/callback
+```
+
+Then restart the services:
+
+```bash
+docker compose up -d
```
diff --git a/surfsense_web/content/docs/connectors/linear.mdx b/surfsense_web/content/docs/connectors/linear.mdx
index 5fb7bc8c5..3fd82aba1 100644
--- a/surfsense_web/content/docs/connectors/linear.mdx
+++ b/surfsense_web/content/docs/connectors/linear.mdx
@@ -53,17 +53,17 @@ After creating the application, you'll see your OAuth credentials:
## Running SurfSense with Linear Connector
-Add the Linear environment variables to your Docker run command:
+Add the Linear credentials to your `.env` file (created during [Docker installation](/docs/docker-installation)):
```bash
-docker run -d -p 3000:3000 -p 8000:8000 \
- -v surfsense-data:/data \
- # Linear Connector
- -e LINEAR_CLIENT_ID=your_linear_client_id \
- -e LINEAR_CLIENT_SECRET=your_linear_client_secret \
- -e LINEAR_REDIRECT_URI=http://localhost:8000/api/v1/auth/linear/connector/callback \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+LINEAR_CLIENT_ID=your_linear_client_id
+LINEAR_CLIENT_SECRET=your_linear_client_secret
+LINEAR_REDIRECT_URI=http://localhost:8000/api/v1/auth/linear/connector/callback
+```
+
+Then restart the services:
+
+```bash
+docker compose up -d
```
diff --git a/surfsense_web/content/docs/connectors/microsoft-teams.mdx b/surfsense_web/content/docs/connectors/microsoft-teams.mdx
index 53f36c249..5a05be709 100644
--- a/surfsense_web/content/docs/connectors/microsoft-teams.mdx
+++ b/surfsense_web/content/docs/connectors/microsoft-teams.mdx
@@ -90,16 +90,16 @@ After registration, you'll be taken to the app's **Overview** page. Here you'll
## Running SurfSense with Microsoft Teams Connector
-Add the Microsoft Teams environment variables to your Docker run command:
+Add the Microsoft Teams credentials to your `.env` file (created during [Docker installation](/docs/docker-installation)):
```bash
-docker run -d -p 3000:3000 -p 8000:8000 \
- -v surfsense-data:/data \
- # Microsoft Teams Connector
- -e TEAMS_CLIENT_ID=your_microsoft_client_id \
- -e TEAMS_CLIENT_SECRET=your_microsoft_client_secret \
- -e TEAMS_REDIRECT_URI=http://localhost:8000/api/v1/auth/teams/connector/callback \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+TEAMS_CLIENT_ID=your_microsoft_client_id
+TEAMS_CLIENT_SECRET=your_microsoft_client_secret
+TEAMS_REDIRECT_URI=http://localhost:8000/api/v1/auth/teams/connector/callback
+```
+
+Then restart the services:
+
+```bash
+docker compose up -d
```
diff --git a/surfsense_web/content/docs/connectors/notion.mdx b/surfsense_web/content/docs/connectors/notion.mdx
index 6fcda8dae..ca5856340 100644
--- a/surfsense_web/content/docs/connectors/notion.mdx
+++ b/surfsense_web/content/docs/connectors/notion.mdx
@@ -91,16 +91,16 @@ For additional information:
## Running SurfSense with Notion Connector
-Add the Notion environment variables to your Docker run command:
+Add the Notion credentials to your `.env` file (created during [Docker installation](/docs/docker-installation)):
```bash
-docker run -d -p 3000:3000 -p 8000:8000 \
- -v surfsense-data:/data \
- # Notion Connector
- -e NOTION_OAUTH_CLIENT_ID=your_notion_client_id \
- -e NOTION_OAUTH_CLIENT_SECRET=your_notion_client_secret \
- -e NOTION_REDIRECT_URI=http://localhost:8000/api/v1/auth/notion/connector/callback \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+NOTION_OAUTH_CLIENT_ID=your_notion_client_id
+NOTION_OAUTH_CLIENT_SECRET=your_notion_client_secret
+NOTION_REDIRECT_URI=http://localhost:8000/api/v1/auth/notion/connector/callback
+```
+
+Then restart the services:
+
+```bash
+docker compose up -d
```
diff --git a/surfsense_web/content/docs/connectors/slack.mdx b/surfsense_web/content/docs/connectors/slack.mdx
index 072b83343..af38487cc 100644
--- a/surfsense_web/content/docs/connectors/slack.mdx
+++ b/surfsense_web/content/docs/connectors/slack.mdx
@@ -80,16 +80,16 @@ Click **"Add an OAuth Scope"** to add each scope.
## Running SurfSense with Slack Connector
-Add the Slack environment variables to your Docker run command:
+Add the Slack credentials to your `.env` file (created during [Docker installation](/docs/docker-installation)):
```bash
-docker run -d -p 3000:3000 -p 8000:8000 \
- -v surfsense-data:/data \
- # Slack Connector
- -e SLACK_CLIENT_ID=your_slack_client_id \
- -e SLACK_CLIENT_SECRET=your_slack_client_secret \
- -e SLACK_REDIRECT_URI=https://localhost:8000/api/v1/auth/slack/connector/callback \
- --name surfsense \
- --restart unless-stopped \
- ghcr.io/modsetter/surfsense:latest
+SLACK_CLIENT_ID=your_slack_client_id
+SLACK_CLIENT_SECRET=your_slack_client_secret
+SLACK_REDIRECT_URI=http://localhost:8000/api/v1/auth/slack/connector/callback
+```
+
+Then restart the services:
+
+```bash
+docker compose up -d
```
From 32d092b2934011dab6de154696726973778a7200 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 19:39:07 +0530
Subject: [PATCH 36/57] feat: Add clean GHCR version tags alongside build tags;
also update docker installation docs
---
.github/workflows/docker_build.yaml | 8 ++++++++
surfsense_web/content/docs/docker-installation.mdx | 2 +-
2 files changed, 9 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/docker_build.yaml b/.github/workflows/docker_build.yaml
index 73ae1882c..6fd336cf9 100644
--- a/.github/workflows/docker_build.yaml
+++ b/.github/workflows/docker_build.yaml
@@ -189,6 +189,7 @@ jobs:
run: |
VERSION_TAG="${{ needs.tag_release.outputs.new_tag }}"
IMAGE="${{ steps.image.outputs.name }}"
+ APP_VERSION=$(echo "$VERSION_TAG" | rev | cut -d. -f2- | rev)
docker manifest create ${IMAGE}:${VERSION_TAG} \
${IMAGE}:${VERSION_TAG}-amd64 \
@@ -197,6 +198,12 @@ jobs:
docker manifest push ${IMAGE}:${VERSION_TAG}
if [[ "${{ github.ref }}" == "refs/heads/${{ github.event.repository.default_branch }}" ]] || [[ "${{ github.event.inputs.branch }}" == "${{ github.event.repository.default_branch }}" ]]; then
+ docker manifest create ${IMAGE}:${APP_VERSION} \
+ ${IMAGE}:${VERSION_TAG}-amd64 \
+ ${IMAGE}:${VERSION_TAG}-arm64
+
+ docker manifest push ${IMAGE}:${APP_VERSION}
+
docker manifest create ${IMAGE}:latest \
${IMAGE}:${VERSION_TAG}-amd64 \
${IMAGE}:${VERSION_TAG}-arm64
@@ -208,4 +215,5 @@ jobs:
run: |
echo "Multi-arch manifest created for ${{ matrix.name }}!"
echo "Versioned: ${{ steps.image.outputs.name }}:${{ needs.tag_release.outputs.new_tag }}"
+ echo "App version: ${{ steps.image.outputs.name }}:$(echo '${{ needs.tag_release.outputs.new_tag }}' | rev | cut -d. -f2- | rev)"
echo "Latest: ${{ steps.image.outputs.name }}:latest"
diff --git a/surfsense_web/content/docs/docker-installation.mdx b/surfsense_web/content/docs/docker-installation.mdx
index e6aec07ea..0d65542e2 100644
--- a/surfsense_web/content/docs/docker-installation.mdx
+++ b/surfsense_web/content/docs/docker-installation.mdx
@@ -85,7 +85,7 @@ All configuration lives in a single `docker/.env` file (or `surfsense/.env` if y
| Variable | Description | Default |
|----------|-------------|---------|
-| `SURFSENSE_VERSION` | Image tag to deploy. Pin to a version (e.g. `0.0.13.1`) or use `latest` | `latest` |
+| `SURFSENSE_VERSION` | Image tag to deploy. Use `latest`, a clean version (e.g. `0.0.14`), or a specific build (e.g. `0.0.14.1`) | `latest` |
| `AUTH_TYPE` | Authentication method: `LOCAL` (email/password) or `GOOGLE` (OAuth) | `LOCAL` |
| `ETL_SERVICE` | Document parsing: `DOCLING` (local), `UNSTRUCTURED`, or `LLAMACLOUD` | `DOCLING` |
| `EMBEDDING_MODEL` | Embedding model for vector search | `sentence-transformers/all-MiniLM-L6-v2` |
From bc3db01778fee8f6775fb83eb84e85ee4e6a0774 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 19:40:36 +0530
Subject: [PATCH 37/57] docs: update SurfSense version comment in .env.example
to clarify versioning options
---
docker/.env.example | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docker/.env.example b/docker/.env.example
index 470037ee3..7025cac52 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -4,7 +4,7 @@
# Database, Redis, and internal service wiring are handled automatically.
# ==============================================================================
-# SurfSense version (pin to a specific version like "0.0.13.1" or use "latest")
+# SurfSense version (use "latest", a clean version like "0.0.14", or a specific build like "0.0.14.1")
SURFSENSE_VERSION=latest
# ------------------------------------------------------------------------------
From 06e6c1c1be1d2b78b82bc3bdffaf30b1286560f6 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 20:08:18 +0530
Subject: [PATCH 38/57] chore: add healthcheck configuration for backend and
update service dependencies to ensure healthy startup
---
docker/docker-compose.dev.yml | 14 +++++++++++---
docker/docker-compose.yml | 14 +++++++++++---
2 files changed, 22 insertions(+), 6 deletions(-)
diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml
index 94811b2aa..3408a80c0 100644
--- a/docker/docker-compose.dev.yml
+++ b/docker/docker-compose.dev.yml
@@ -92,6 +92,12 @@ services:
condition: service_healthy
redis:
condition: service_healthy
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
+ interval: 15s
+ timeout: 5s
+ retries: 30
+ start_period: 200s
celery_worker:
build: ../surfsense_backend
@@ -116,7 +122,7 @@ services:
redis:
condition: service_healthy
backend:
- condition: service_started
+ condition: service_healthy
celery_beat:
build: ../surfsense_backend
@@ -184,8 +190,10 @@ services:
env_file:
- ../surfsense_web/.env
depends_on:
- - backend
- - electric
+ backend:
+ condition: service_healthy
+ electric:
+ condition: service_healthy
volumes:
postgres_data:
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index b4108ef65..7d7606938 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -76,6 +76,12 @@ services:
redis:
condition: service_healthy
restart: unless-stopped
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
+ interval: 15s
+ timeout: 5s
+ retries: 30
+ start_period: 200s
celery_worker:
image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
@@ -99,7 +105,7 @@ services:
redis:
condition: service_healthy
backend:
- condition: service_started
+ condition: service_healthy
labels:
- "com.centurylinklabs.watchtower.enable=true"
restart: unless-stopped
@@ -174,8 +180,10 @@ services:
labels:
- "com.centurylinklabs.watchtower.enable=true"
depends_on:
- - backend
- - electric
+ backend:
+ condition: service_healthy
+ electric:
+ condition: service_healthy
restart: unless-stopped
volumes:
From 232622f9b3370acbcbc4365ce8cb5ab55dc0d15d Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Thu, 26 Feb 2026 20:17:19 +0530
Subject: [PATCH 39/57] docs: enhance Docker installation documentation with
development compose file details and health check information
---
.../content/docs/docker-installation.mdx | 30 +++++++++++++++++++
1 file changed, 30 insertions(+)
diff --git a/surfsense_web/content/docs/docker-installation.mdx b/surfsense_web/content/docs/docker-installation.mdx
index 0d65542e2..52479a961 100644
--- a/surfsense_web/content/docs/docker-installation.mdx
+++ b/surfsense_web/content/docs/docker-installation.mdx
@@ -204,6 +204,36 @@ For Airtable, create an OAuth integration at the [Airtable Developer Hub](https:
All services start automatically with `docker compose up -d`.
+The backend includes a health check — dependent services (workers, frontend) wait until the API is fully ready before starting. You can monitor startup progress with `docker compose ps` (look for `(health: starting)` → `(healthy)`).
+
+---
+
+## Development Compose File
+
+If you're contributing to SurfSense and want to build from source, use `docker-compose.dev.yml` instead:
+
+```bash
+cd SurfSense/docker
+docker compose -f docker-compose.dev.yml up --build
+```
+
+This file builds the backend and frontend from your local source code (instead of pulling prebuilt images) and includes pgAdmin for database inspection at [http://localhost:5050](http://localhost:5050). Use the production `docker-compose.yml` for all other cases.
+
+The following `.env` variables are **only used by the dev compose file** (they have no effect on the production `docker-compose.yml`):
+
+| Variable | Description | Default |
+|----------|-------------|---------|
+| `PGADMIN_PORT` | pgAdmin web UI port | `5050` |
+| `PGADMIN_DEFAULT_EMAIL` | pgAdmin login email | `admin@surfsense.com` |
+| `PGADMIN_DEFAULT_PASSWORD` | pgAdmin login password | `surfsense` |
+| `REDIS_PORT` | Exposed Redis port (internal-only in prod) | `6379` |
+| `NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE` | Frontend build arg for auth type | `LOCAL` |
+| `NEXT_PUBLIC_ETL_SERVICE` | Frontend build arg for ETL service | `DOCLING` |
+| `NEXT_PUBLIC_DEPLOYMENT_MODE` | Frontend build arg for deployment mode | `self-hosted` |
+| `NEXT_PUBLIC_ELECTRIC_AUTH_MODE` | Frontend build arg for Electric auth | `insecure` |
+
+In the production compose file, the `NEXT_PUBLIC_*` frontend variables are automatically derived from `AUTH_TYPE`, `ETL_SERVICE`, and the port settings. In the dev compose file, they are passed as build args since the frontend is built from source.
+
---
## Migrating from the All-in-One Container
From f03a7463853e925f7eba7dd52433878e1a7c0858 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 02:44:25 +0530
Subject: [PATCH 40/57] chore: update Docker installation script to point to
the new repository for SurfSense
---
docker/scripts/install.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 062754970..4a2cc90f4 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -17,7 +17,7 @@
set -euo pipefail
-REPO_RAW="https://raw.githubusercontent.com/MODSetter/SurfSense/main"
+REPO_RAW="https://raw.githubusercontent.com/AnishSarkar/SurfSense/fix/docker"
INSTALL_DIR="./surfsense"
OLD_VOLUME="surfsense-data"
DUMP_FILE="./surfsense_migration_backup.sql"
From 233279b109db8cbdd042abb759dcb8cb3cb5dfda Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 03:44:07 +0530
Subject: [PATCH 41/57] chore: update Docker installation script to use the new
repository URL for SurfSense
---
docker/scripts/install.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 4a2cc90f4..fc14fc5cb 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -17,7 +17,7 @@
set -euo pipefail
-REPO_RAW="https://raw.githubusercontent.com/AnishSarkar/SurfSense/fix/docker"
+REPO_RAW="https://raw.githubusercontent.com/AnishSarkar22/SurfSense/fix/docker"
INSTALL_DIR="./surfsense"
OLD_VOLUME="surfsense-data"
DUMP_FILE="./surfsense_migration_backup.sql"
From 47a2ba0e567821529eecdad61c684165788662c6 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 03:45:10 +0530
Subject: [PATCH 42/57] chore: update Docker image references to use the new
repository for backend and frontend services
---
docker/docker-compose.yml | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index 7d7606938..d65f24b3a 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -43,7 +43,7 @@ services:
retries: 5
backend:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
ports:
- "${BACKEND_PORT:-8000}:8000"
volumes:
@@ -84,7 +84,7 @@ services:
start_period: 200s
celery_worker:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
volumes:
- shared_temp:/shared_tmp
env_file:
@@ -111,7 +111,7 @@ services:
restart: unless-stopped
celery_beat:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
env_file:
- .env
environment:
@@ -133,7 +133,7 @@ services:
restart: unless-stopped
# flower:
- # image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ # image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
# ports:
# - "${FLOWER_PORT:-5555}:5555"
# env_file:
@@ -167,7 +167,7 @@ services:
retries: 5
frontend:
- image: ghcr.io/modsetter/surfsense-web:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-web:${SURFSENSE_VERSION:-latest}
ports:
- "${FRONTEND_PORT:-3000}:3000"
environment:
From 3e29ae37fadee834472f681c45a43ac252abfc53 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 03:54:13 +0530
Subject: [PATCH 43/57] chore: update SURFSENSE_VERSION in .env.example to
0.0.13.6
---
docker/.env.example | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docker/.env.example b/docker/.env.example
index 7025cac52..06442c9c5 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -5,7 +5,7 @@
# ==============================================================================
# SurfSense version (use "latest", a clean version like "0.0.14", or a specific build like "0.0.14.1")
-SURFSENSE_VERSION=latest
+SURFSENSE_VERSION=0.0.13.6
# ------------------------------------------------------------------------------
# Core Settings
From ccae5ffeb16c8b692e0080bbf367f7537d120181 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 04:06:01 +0530
Subject: [PATCH 44/57] fix: add error handling for missing dump file in
migration script and update logging for migration step
---
docker/scripts/install.sh | 2 ++
docker/scripts/migrate-database.sh | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index fc14fc5cb..501a40221 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -178,6 +178,8 @@ if $MIGRATION_MODE; then
wait_for_pg "${DB_USER}"
step "Restoring database"
+ [[ -f "${DUMP_FILE}" ]] \
+ || error "Dump file '${DUMP_FILE}' not found. The migration script may have failed.\n Check: ./surfsense-migration.log\n Or run manually: bash ${INSTALL_DIR}/scripts/migrate-database.sh --yes"
info "Restoring dump into PostgreSQL 17 — this may take a while for large databases..."
RESTORE_ERR="/tmp/surfsense_restore_err.log"
diff --git a/docker/scripts/migrate-database.sh b/docker/scripts/migrate-database.sh
index 8a7c9eae6..26b276dad 100755
--- a/docker/scripts/migrate-database.sh
+++ b/docker/scripts/migrate-database.sh
@@ -132,7 +132,7 @@ wait_for_pg() {
success "${label} is ready."
}
-step "Migrating data from legacy database (PostgreSQL 14 → 17)"
+info "Migrating data from legacy database (PostgreSQL 14 → 17)"
# ── Step 0: Pre-flight checks ─────────────────────────────────────────────────
step "0" "Pre-flight checks"
From 48ee5e86aa44d581757bd329b72982d82491cf14 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 04:17:37 +0530
Subject: [PATCH 45/57] chore: updated links to the main branch for production
---
docker/.env.example | 2 +-
docker/docker-compose.yml | 10 +++++-----
docker/scripts/install.sh | 2 +-
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/docker/.env.example b/docker/.env.example
index 06442c9c5..7025cac52 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -5,7 +5,7 @@
# ==============================================================================
# SurfSense version (use "latest", a clean version like "0.0.14", or a specific build like "0.0.14.1")
-SURFSENSE_VERSION=0.0.13.6
+SURFSENSE_VERSION=latest
# ------------------------------------------------------------------------------
# Core Settings
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index d65f24b3a..7d7606938 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -43,7 +43,7 @@ services:
retries: 5
backend:
- image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
ports:
- "${BACKEND_PORT:-8000}:8000"
volumes:
@@ -84,7 +84,7 @@ services:
start_period: 200s
celery_worker:
- image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
volumes:
- shared_temp:/shared_tmp
env_file:
@@ -111,7 +111,7 @@ services:
restart: unless-stopped
celery_beat:
- image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
env_file:
- .env
environment:
@@ -133,7 +133,7 @@ services:
restart: unless-stopped
# flower:
- # image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ # image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
# ports:
# - "${FLOWER_PORT:-5555}:5555"
# env_file:
@@ -167,7 +167,7 @@ services:
retries: 5
frontend:
- image: ghcr.io/anishsarkar22/surfsense-web:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-web:${SURFSENSE_VERSION:-latest}
ports:
- "${FRONTEND_PORT:-3000}:3000"
environment:
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 501a40221..4fa80cfb3 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -17,7 +17,7 @@
set -euo pipefail
-REPO_RAW="https://raw.githubusercontent.com/AnishSarkar22/SurfSense/fix/docker"
+REPO_RAW="https://raw.githubusercontent.com/MODSetter/SurfSense/main"
INSTALL_DIR="./surfsense"
OLD_VOLUME="surfsense-data"
DUMP_FILE="./surfsense_migration_backup.sql"
From 4e401fdb923249bc63de4bd83251dea965a904ea Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 04:41:08 +0530
Subject: [PATCH 46/57] feat: enhance installation script to include automatic
Watchtower setup with customizable interval
---
README.md | 17 +-----
docker/scripts/install.sh | 52 +++++++++++++++++++
.../content/docs/docker-installation.mdx | 15 +++++-
3 files changed, 68 insertions(+), 16 deletions(-)
diff --git a/README.md b/README.md
index fbf9f3347..b5d621f15 100644
--- a/README.md
+++ b/README.md
@@ -85,22 +85,9 @@ Run SurfSense on your own infrastructure for full data control and privacy.
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
-For Docker Compose and other deployment options, see the [Docker Installation docs](https://www.surfsense.com/docs/docker-installation).
+The install script sets up [Watchtower](https://github.com/nicholas-fedor/watchtower) automatically for daily auto-updates. To skip it, add the `--no-watchtower` flag.
-**Update (recommended — Watchtower):**
-
-```bash
-docker run -d --name watchtower -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --label-enable --interval 86400
-```
-
-**Update (manual):**
-
-```bash
-cd surfsense # or SurfSense/docker if you used Option 2
-docker compose pull && docker compose up -d
-```
-
-For manual installation and other deployment options, check the [docs](https://www.surfsense.com/docs/).
+For Docker Compose, manual installation, and other deployment options, see the [docs](https://www.surfsense.com/docs/).
### How to Realtime Collaborate (Beta)
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 4fa80cfb3..0f0923a94 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -1,8 +1,14 @@
#!/usr/bin/env bash
# =============================================================================
# SurfSense — One-line Install Script
+#
+#
# Usage: curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
#
+# Flags:
+# --no-watchtower Skip automatic Watchtower setup
+# --watchtower-interval=SECS Check interval in seconds (default: 86400 = 24h)
+#
# Handles two cases automatically:
# 1. Fresh install — no prior SurfSense data detected
# 2. Migration from the legacy all-in-one container (surfsense-data volume)
@@ -23,6 +29,17 @@ OLD_VOLUME="surfsense-data"
DUMP_FILE="./surfsense_migration_backup.sql"
KEY_FILE="./surfsense_migration_secret.key"
MIGRATION_MODE=false
+SETUP_WATCHTOWER=true
+WATCHTOWER_INTERVAL=86400
+WATCHTOWER_CONTAINER="watchtower"
+
+# ── Parse flags ─────────────────────────────────────────────────────────────
+for arg in "$@"; do
+ case "$arg" in
+ --no-watchtower) SETUP_WATCHTOWER=false ;;
+ --watchtower-interval=*) WATCHTOWER_INTERVAL="${arg#*=}" ;;
+ esac
+done
CYAN='\033[1;36m'
YELLOW='\033[1;33m'
@@ -231,6 +248,34 @@ else
success "All services started."
fi
+# ── Watchtower (auto-update) ─────────────────────────────────────────────────
+
+if $SETUP_WATCHTOWER; then
+ step "Setting up Watchtower (auto-updates every $((WATCHTOWER_INTERVAL / 3600))h)"
+
+ WT_STATE=$(docker inspect -f '{{.State.Running}}' "${WATCHTOWER_CONTAINER}" 2>/dev/null || echo "missing")
+
+ if [[ "${WT_STATE}" == "true" ]]; then
+ success "Watchtower is already running — skipping."
+ else
+ if [[ "${WT_STATE}" != "missing" ]]; then
+ info "Removing stopped Watchtower container..."
+ docker rm -f "${WATCHTOWER_CONTAINER}" >/dev/null 2>&1 || true
+ fi
+ docker run -d \
+ --name "${WATCHTOWER_CONTAINER}" \
+ --restart unless-stopped \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ nickfedor/watchtower \
+ --label-enable \
+ --interval "${WATCHTOWER_INTERVAL}" >/dev/null 2>&1 \
+ && success "Watchtower started — labeled SurfSense containers will auto-update." \
+ || warn "Could not start Watchtower. You can set it up manually or use: docker compose pull && docker compose up -d"
+ fi
+else
+ info "Skipping Watchtower setup (--no-watchtower flag)."
+fi
+
# ── Done ─────────────────────────────────────────────────────────────────────
echo ""
@@ -267,6 +312,13 @@ info " Stop: cd ${INSTALL_DIR} && ${DC} down"
info " Update: cd ${INSTALL_DIR} && ${DC} pull && ${DC} up -d"
info ""
+if $SETUP_WATCHTOWER; then
+ info " Watchtower: auto-updates every $((WATCHTOWER_INTERVAL / 3600))h (stop: docker rm -f ${WATCHTOWER_CONTAINER})"
+else
+ warn " Watchtower skipped. For auto-updates, re-run without --no-watchtower."
+fi
+info ""
+
if $MIGRATION_MODE; then
warn " Migration complete! Open frontend and verify your data."
warn " Once verified, clean up the legacy volume and dump file:"
diff --git a/surfsense_web/content/docs/docker-installation.mdx b/surfsense_web/content/docs/docker-installation.mdx
index 52479a961..9cabb0049 100644
--- a/surfsense_web/content/docs/docker-installation.mdx
+++ b/surfsense_web/content/docs/docker-installation.mdx
@@ -9,7 +9,7 @@ This guide explains how to run SurfSense using Docker, with options ranging from
### Option 1 — Install Script (recommended)
-Downloads the compose files, generates a `SECRET_KEY`, and starts all services automatically:
+Downloads the compose files, generates a `SECRET_KEY`, starts all services, and sets up [Watchtower](https://github.com/nicholas-fedor/watchtower) for automatic daily updates:
```bash
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
@@ -17,6 +17,14 @@ curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scr
This creates a `./surfsense/` directory with `docker-compose.yml` and `.env`, then runs `docker compose up -d`.
+To skip Watchtower (e.g. in production where you manage updates yourself):
+
+```bash
+curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash -s -- --no-watchtower
+```
+
+To customise the check interval (default 24h), use `--watchtower-interval=SECONDS`.
+
### Option 2 — Manual Docker Compose
```bash
@@ -40,8 +48,13 @@ After starting, access SurfSense at:
**Option 1 — Watchtower daemon (recommended, auto-updates every 24 h):**
+If you used the install script (Option 1 above), Watchtower is already running. No extra setup needed.
+
+For manual Docker Compose installs (Option 2), start Watchtower separately:
+
```bash
docker run -d --name watchtower \
+ --restart unless-stopped \
-v /var/run/docker.sock:/var/run/docker.sock \
nickfedor/watchtower \
--label-enable \
From d6cb41d130bb21335fb7c8ff393182872e88348d Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 04:46:29 +0530
Subject: [PATCH 47/57] docs: update installation instructions across multiple
languages to clarify prerequisites and automatic Watchtower setup
---
README.es.md | 19 ++++---------------
README.hi.md | 19 ++++---------------
README.md | 2 ++
README.pt-BR.md | 19 ++++---------------
README.zh-CN.md | 19 ++++---------------
5 files changed, 18 insertions(+), 60 deletions(-)
diff --git a/README.es.md b/README.es.md
index 5aeb7c310..4795cc9a8 100644
--- a/README.es.md
+++ b/README.es.md
@@ -81,26 +81,15 @@ https://github.com/user-attachments/assets/a0a16566-6967-4374-ac51-9b3e07fbecd7
Ejecuta SurfSense en tu propia infraestructura para control total de datos y privacidad.
+**Requisitos previos:** [Docker](https://docs.docker.com/get-docker/) (con [Docker Compose](https://docs.docker.com/compose/install/)) debe estar instalado y en ejecución.
+
```bash
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
-Para Docker Compose y otras opciones de despliegue, consulta la [documentación de instalación Docker](https://www.surfsense.com/docs/docker-installation).
+El script de instalación configura [Watchtower](https://github.com/nicholas-fedor/watchtower) automáticamente para actualizaciones diarias. Para omitirlo, agrega la bandera `--no-watchtower`.
-**Actualización (recomendada — Watchtower):**
-
-```bash
-docker run -d --name watchtower -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --label-enable --interval 86400
-```
-
-**Actualización (manual):**
-
-```bash
-cd surfsense # o SurfSense/docker si usaste la Opción 2
-docker compose pull && docker compose up -d
-```
-
-Para instalación manual y otras opciones de despliegue, consulta la [documentación](https://www.surfsense.com/docs/).
+Para Docker Compose, instalación manual y otras opciones de despliegue, consulta la [documentación](https://www.surfsense.com/docs/).
### Cómo Colaborar en Tiempo Real (Beta)
diff --git a/README.hi.md b/README.hi.md
index cfa7a5b9f..3fc8beb71 100644
--- a/README.hi.md
+++ b/README.hi.md
@@ -81,26 +81,15 @@ https://github.com/user-attachments/assets/a0a16566-6967-4374-ac51-9b3e07fbecd7
पूर्ण डेटा नियंत्रण और गोपनीयता के लिए SurfSense को अपने स्वयं के बुनियादी ढांचे पर चलाएं।
+**आवश्यकताएँ:** [Docker](https://docs.docker.com/get-docker/) ([Docker Compose](https://docs.docker.com/compose/install/) सहित) इंस्टॉल और चालू होना चाहिए।
+
```bash
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
-Docker Compose और अन्य डिप्लॉयमेंट विकल्पों के लिए, [Docker इंस्टॉलेशन डॉक्स](https://www.surfsense.com/docs/docker-installation) देखें।
+इंस्टॉल स्क्रिप्ट दैनिक ऑटो-अपडेट के लिए स्वचालित रूप से [Watchtower](https://github.com/nicholas-fedor/watchtower) सेटअप करती है। इसे छोड़ने के लिए, `--no-watchtower` फ्लैग जोड़ें।
-**अपडेट (अनुशंसित — Watchtower):**
-
-```bash
-docker run -d --name watchtower -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --label-enable --interval 86400
-```
-
-**अपडेट (मैनुअल):**
-
-```bash
-cd surfsense # या SurfSense/docker यदि आपने Option 2 का उपयोग किया
-docker compose pull && docker compose up -d
-```
-
-मैनुअल इंस्टॉलेशन और अन्य डिप्लॉयमेंट विकल्पों के लिए, [डॉक्स](https://www.surfsense.com/docs/) देखें।
+Docker Compose, मैनुअल इंस्टॉलेशन और अन्य डिप्लॉयमेंट विकल्पों के लिए, [डॉक्स](https://www.surfsense.com/docs/) देखें।
### रीयल-टाइम सहयोग कैसे करें (बीटा)
diff --git a/README.md b/README.md
index b5d621f15..9de1a23bb 100644
--- a/README.md
+++ b/README.md
@@ -81,6 +81,8 @@ https://github.com/user-attachments/assets/a0a16566-6967-4374-ac51-9b3e07fbecd7
Run SurfSense on your own infrastructure for full data control and privacy.
+**Prerequisites:** [Docker](https://docs.docker.com/get-docker/) (with [Docker Compose](https://docs.docker.com/compose/install/)) must be installed and running.
+
```bash
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
diff --git a/README.pt-BR.md b/README.pt-BR.md
index edf25e5f3..adb102f40 100644
--- a/README.pt-BR.md
+++ b/README.pt-BR.md
@@ -81,26 +81,15 @@ https://github.com/user-attachments/assets/a0a16566-6967-4374-ac51-9b3e07fbecd7
Execute o SurfSense na sua própria infraestrutura para controle total de dados e privacidade.
+**Pré-requisitos:** [Docker](https://docs.docker.com/get-docker/) (com [Docker Compose](https://docs.docker.com/compose/install/)) deve estar instalado e em execução.
+
```bash
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
-Para Docker Compose e outras opções de implantação, consulte a [documentação de instalação Docker](https://www.surfsense.com/docs/docker-installation).
+O script de instalação configura o [Watchtower](https://github.com/nicholas-fedor/watchtower) automaticamente para atualizações diárias. Para pular, adicione a flag `--no-watchtower`.
-**Atualização (recomendada — Watchtower):**
-
-```bash
-docker run -d --name watchtower -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --label-enable --interval 86400
-```
-
-**Atualização (manual):**
-
-```bash
-cd surfsense # ou SurfSense/docker se você usou a Opção 2
-docker compose pull && docker compose up -d
-```
-
-Para instalação manual e outras opções de implantação, consulte a [documentação](https://www.surfsense.com/docs/).
+Para Docker Compose, instalação manual e outras opções de implantação, consulte a [documentação](https://www.surfsense.com/docs/).
### Como Colaborar em Tempo Real (Beta)
diff --git a/README.zh-CN.md b/README.zh-CN.md
index 080d5f554..9d5f7d6a4 100644
--- a/README.zh-CN.md
+++ b/README.zh-CN.md
@@ -81,26 +81,15 @@ https://github.com/user-attachments/assets/a0a16566-6967-4374-ac51-9b3e07fbecd7
在您自己的基础设施上运行 SurfSense,实现完全的数据控制和隐私保护。
+**前置条件:** 需要安装并运行 [Docker](https://docs.docker.com/get-docker/)(含 [Docker Compose](https://docs.docker.com/compose/install/))。
+
```bash
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
-如需 Docker Compose 及其他部署方式,请查看 [Docker 安装文档](https://www.surfsense.com/docs/docker-installation)。
+安装脚本会自动配置 [Watchtower](https://github.com/nicholas-fedor/watchtower) 以实现每日自动更新。如需跳过,请添加 `--no-watchtower` 参数。
-**更新(推荐 — Watchtower):**
-
-```bash
-docker run -d --name watchtower -v /var/run/docker.sock:/var/run/docker.sock nickfedor/watchtower --label-enable --interval 86400
-```
-
-**更新(手动):**
-
-```bash
-cd surfsense # 若使用了方式二,则为 SurfSense/docker
-docker compose pull && docker compose up -d
-```
-
-如需手动安装及其他部署方式,请查看[文档](https://www.surfsense.com/docs/)。
+如需 Docker Compose、手动安装及其他部署方式,请查看[文档](https://www.surfsense.com/docs/)。
### 如何实时协作(Beta)
From 66d9cd570bd9d8b92ce589900d33d24c200f869b Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 04:59:12 +0530
Subject: [PATCH 48/57] fix: suppress benign output during database restoration
in installation script
---
docker/scripts/install.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 0f0923a94..defcde088 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -203,7 +203,7 @@ if $MIGRATION_MODE; then
(cd "${INSTALL_DIR}" && ${DC} exec -T \
-e PGPASSWORD="${DB_PASS}" \
db psql -U "${DB_USER}" -d "${DB_NAME}" \
- 2>"${RESTORE_ERR}") < "${DUMP_FILE}" || true
+ >/dev/null 2>"${RESTORE_ERR}") < "${DUMP_FILE}" || true
# Surface real errors; ignore benign "already exists" noise from pg_dump headers
FATAL_ERRORS=$(grep -i "^ERROR:" "${RESTORE_ERR}" \
From 9124b19ee9c0f495686935a2e8b5fd09b1b277ab Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 20:46:16 +0530
Subject: [PATCH 49/57] chore: update Docker image references for testing
---
docker/.env.example | 2 +-
docker/docker-compose.yml | 10 +++++-----
docker/scripts/install.sh | 2 +-
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/docker/.env.example b/docker/.env.example
index 7025cac52..06442c9c5 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -5,7 +5,7 @@
# ==============================================================================
# SurfSense version (use "latest", a clean version like "0.0.14", or a specific build like "0.0.14.1")
-SURFSENSE_VERSION=latest
+SURFSENSE_VERSION=0.0.13.6
# ------------------------------------------------------------------------------
# Core Settings
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index 7d7606938..d65f24b3a 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -43,7 +43,7 @@ services:
retries: 5
backend:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
ports:
- "${BACKEND_PORT:-8000}:8000"
volumes:
@@ -84,7 +84,7 @@ services:
start_period: 200s
celery_worker:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
volumes:
- shared_temp:/shared_tmp
env_file:
@@ -111,7 +111,7 @@ services:
restart: unless-stopped
celery_beat:
- image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
env_file:
- .env
environment:
@@ -133,7 +133,7 @@ services:
restart: unless-stopped
# flower:
- # image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ # image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
# ports:
# - "${FLOWER_PORT:-5555}:5555"
# env_file:
@@ -167,7 +167,7 @@ services:
retries: 5
frontend:
- image: ghcr.io/modsetter/surfsense-web:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/anishsarkar22/surfsense-web:${SURFSENSE_VERSION:-latest}
ports:
- "${FRONTEND_PORT:-3000}:3000"
environment:
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index defcde088..6dbae20b0 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -23,7 +23,7 @@
set -euo pipefail
-REPO_RAW="https://raw.githubusercontent.com/MODSetter/SurfSense/main"
+REPO_RAW="https://raw.githubusercontent.com/AnishSarkar22/SurfSense/fix/docker"
INSTALL_DIR="./surfsense"
OLD_VOLUME="surfsense-data"
DUMP_FILE="./surfsense_migration_backup.sql"
From fbef82037780cb99d9152f8ab5bb04014787f236 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 21:22:15 +0530
Subject: [PATCH 50/57] fix: redirect stdin to /dev/null in Docker commands to
suppress unwanted input prompts
---
docker/scripts/install.sh | 22 ++++++++---------
docker/scripts/migrate-database.sh | 38 +++++++++++++++---------------
2 files changed, 30 insertions(+), 30 deletions(-)
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 6dbae20b0..91b5ea1e6 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -62,11 +62,11 @@ command -v docker >/dev/null 2>&1 \
|| error "Docker is not installed. Install it at: https://docs.docker.com/get-docker/"
success "Docker found."
-docker info >/dev/null 2>&1 \
+docker info >/dev/null 2>&1 < /dev/null \
|| error "Docker daemon is not running. Please start Docker and try again."
success "Docker daemon is running."
-if docker compose version >/dev/null 2>&1; then
+if docker compose version >/dev/null 2>&1 < /dev/null; then
DC="docker compose"
elif command -v docker-compose >/dev/null 2>&1; then
DC="docker-compose"
@@ -82,7 +82,7 @@ wait_for_pg() {
local attempt=0
info "Waiting for PostgreSQL to accept connections..."
- until (cd "${INSTALL_DIR}" && ${DC} exec -T db pg_isready -U "${db_user}" -q 2>/dev/null); do
+ until (cd "${INSTALL_DIR}" && ${DC} exec -T db pg_isready -U "${db_user}" -q 2>/dev/null) < /dev/null; do
attempt=$((attempt + 1))
if [[ $attempt -ge $max_attempts ]]; then
error "PostgreSQL did not become ready after $((max_attempts * 2)) seconds.\nCheck logs: cd ${INSTALL_DIR} && ${DC} logs db"
@@ -125,7 +125,7 @@ success "All files downloaded to ${INSTALL_DIR}/"
# If a dump already exists (from a previous partial run) skip extraction and
# go straight to restore — this makes re-runs safe and idempotent.
-if docker volume ls --format '{{.Name}}' 2>/dev/null | grep -q "^${OLD_VOLUME}$"; then
+if docker volume ls --format '{{.Name}}' 2>/dev/null < /dev/null | grep -q "^${OLD_VOLUME}$"; then
MIGRATION_MODE=true
if [[ -f "${DUMP_FILE}" ]]; then
@@ -191,7 +191,7 @@ if $MIGRATION_MODE; then
DB_NAME="${DB_NAME:-surfsense}"
step "Starting PostgreSQL 17"
- (cd "${INSTALL_DIR}" && ${DC} up -d db)
+ (cd "${INSTALL_DIR}" && ${DC} up -d db) < /dev/null
wait_for_pg "${DB_USER}"
step "Restoring database"
@@ -226,7 +226,7 @@ if $MIGRATION_MODE; then
-e PGPASSWORD="${DB_PASS}" \
db psql -U "${DB_USER}" -d "${DB_NAME}" -t \
-c "SELECT count(*) FROM information_schema.tables WHERE table_schema = 'public';" \
- 2>/dev/null | tr -d ' \n' || echo "0"
+ 2>/dev/null < /dev/null | tr -d ' \n' || echo "0"
)
if [[ "${TABLE_COUNT}" == "0" || -z "${TABLE_COUNT}" ]]; then
warn "Smoke test: no tables found after restore."
@@ -236,7 +236,7 @@ if $MIGRATION_MODE; then
fi
step "Starting all SurfSense services"
- (cd "${INSTALL_DIR}" && ${DC} up -d)
+ (cd "${INSTALL_DIR}" && ${DC} up -d) < /dev/null
success "All services started."
# Key file is no longer needed — SECRET_KEY is now in .env
@@ -244,7 +244,7 @@ if $MIGRATION_MODE; then
else
step "Starting SurfSense"
- (cd "${INSTALL_DIR}" && ${DC} up -d)
+ (cd "${INSTALL_DIR}" && ${DC} up -d) < /dev/null
success "All services started."
fi
@@ -253,14 +253,14 @@ fi
if $SETUP_WATCHTOWER; then
step "Setting up Watchtower (auto-updates every $((WATCHTOWER_INTERVAL / 3600))h)"
- WT_STATE=$(docker inspect -f '{{.State.Running}}' "${WATCHTOWER_CONTAINER}" 2>/dev/null || echo "missing")
+ WT_STATE=$(docker inspect -f '{{.State.Running}}' "${WATCHTOWER_CONTAINER}" 2>/dev/null < /dev/null || echo "missing")
if [[ "${WT_STATE}" == "true" ]]; then
success "Watchtower is already running — skipping."
else
if [[ "${WT_STATE}" != "missing" ]]; then
info "Removing stopped Watchtower container..."
- docker rm -f "${WATCHTOWER_CONTAINER}" >/dev/null 2>&1 || true
+ docker rm -f "${WATCHTOWER_CONTAINER}" >/dev/null 2>&1 < /dev/null || true
fi
docker run -d \
--name "${WATCHTOWER_CONTAINER}" \
@@ -268,7 +268,7 @@ if $SETUP_WATCHTOWER; then
-v /var/run/docker.sock:/var/run/docker.sock \
nickfedor/watchtower \
--label-enable \
- --interval "${WATCHTOWER_INTERVAL}" >/dev/null 2>&1 \
+ --interval "${WATCHTOWER_INTERVAL}" >/dev/null 2>&1 < /dev/null \
&& success "Watchtower started — labeled SurfSense containers will auto-update." \
|| warn "Could not start Watchtower. You can set it up manually or use: docker compose pull && docker compose up -d"
fi
diff --git a/docker/scripts/migrate-database.sh b/docker/scripts/migrate-database.sh
index 26b276dad..9bc1d90b7 100755
--- a/docker/scripts/migrate-database.sh
+++ b/docker/scripts/migrate-database.sh
@@ -98,10 +98,10 @@ confirm() {
# ── Cleanup trap — always remove the temp container ──────────────────────────
cleanup() {
local exit_code=$?
- if docker ps -a --format '{{.Names}}' 2>/dev/null | grep -q "^${TEMP_CONTAINER}$"; then
+ if docker ps -a --format '{{.Names}}' 2>/dev/null < /dev/null | grep -q "^${TEMP_CONTAINER}$"; then
info "Cleaning up temporary container '${TEMP_CONTAINER}'..."
- docker stop "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
- docker rm "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
+ docker stop "${TEMP_CONTAINER}" >/dev/null 2>&1 < /dev/null || true
+ docker rm "${TEMP_CONTAINER}" >/dev/null 2>&1 < /dev/null || true
fi
if [[ $exit_code -ne 0 ]]; then
printf "\n${RED}[SurfSense]${NC} Migration data extraction failed (exit code %s).\n" "${exit_code}" >&2
@@ -120,7 +120,7 @@ wait_for_pg() {
local attempt=0
info "Waiting for ${label} to accept connections..."
- until docker exec "${container}" pg_isready -U "${user}" -q 2>/dev/null; do
+ until docker exec "${container}" pg_isready -U "${user}" -q 2>/dev/null < /dev/null; do
attempt=$((attempt + 1))
if [[ $attempt -ge $max_attempts ]]; then
error "${label} did not become ready after $((max_attempts * 2)) seconds. Check: docker logs ${container}"
@@ -142,23 +142,23 @@ command -v docker >/dev/null 2>&1 \
|| error "Docker is not installed. Install it at: https://docs.docker.com/get-docker/"
# Docker daemon
-docker info >/dev/null 2>&1 \
+docker info >/dev/null 2>&1 < /dev/null \
|| error "Docker daemon is not running. Please start Docker and try again."
# Old volume must exist
-docker volume ls --format '{{.Name}}' | grep -q "^${OLD_VOLUME}$" \
+docker volume ls --format '{{.Name}}' < /dev/null | grep -q "^${OLD_VOLUME}$" \
|| error "Legacy volume '${OLD_VOLUME}' not found.\n Are you sure you ran the old all-in-one SurfSense container?"
success "Found legacy volume: ${OLD_VOLUME}"
# Detect and stop any container currently using the old volume
# (mounting a live PG volume into a second container causes the new container's
# entrypoint to chown the data files, breaking the running container's access)
-OLD_CONTAINER=$(docker ps --filter "volume=${OLD_VOLUME}" --format '{{.Names}}' | head -n1 || true)
+OLD_CONTAINER=$(docker ps --filter "volume=${OLD_VOLUME}" --format '{{.Names}}' < /dev/null | head -n1 || true)
if [[ -n "${OLD_CONTAINER}" ]]; then
warn "Container '${OLD_CONTAINER}' is running and using the '${OLD_VOLUME}' volume."
warn "It must be stopped before migration to prevent data file corruption."
confirm "Stop '${OLD_CONTAINER}' now and proceed with data extraction?"
- docker stop "${OLD_CONTAINER}" >/dev/null 2>&1 \
+ docker stop "${OLD_CONTAINER}" >/dev/null 2>&1 < /dev/null \
|| error "Failed to stop '${OLD_CONTAINER}'. Try: docker stop ${OLD_CONTAINER}"
success "Container '${OLD_CONTAINER}' stopped."
fi
@@ -172,10 +172,10 @@ if [[ -f "${DUMP_FILE}" ]]; then
fi
# Clean up any stale temp container from a previous failed run
-if docker ps -a --format '{{.Names}}' | grep -q "^${TEMP_CONTAINER}$"; then
+if docker ps -a --format '{{.Names}}' < /dev/null | grep -q "^${TEMP_CONTAINER}$"; then
warn "Stale migration container '${TEMP_CONTAINER}' found — removing it."
- docker stop "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
- docker rm "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
+ docker stop "${TEMP_CONTAINER}" >/dev/null 2>&1 < /dev/null || true
+ docker rm "${TEMP_CONTAINER}" >/dev/null 2>&1 < /dev/null || true
fi
# Disk space (warn if < 500 MB free)
@@ -205,7 +205,7 @@ confirm "Start data extraction? (Your original data will not be deleted or modif
step "1" "Starting temporary PostgreSQL 14 container"
info "Pulling ${PG14_IMAGE}..."
-docker pull "${PG14_IMAGE}" >/dev/null 2>&1 \
+docker pull "${PG14_IMAGE}" >/dev/null 2>&1 < /dev/null \
|| warn "Could not pull ${PG14_IMAGE} — using cached image if available."
# Detect the UID that owns the existing data files and run the temp container
@@ -214,7 +214,7 @@ docker pull "${PG14_IMAGE}" >/dev/null 2>&1 \
# re-own the files to UID 999 and break any subsequent access by the original
# container's postgres process (which may run as a different UID).
DATA_UID=$(docker run --rm -v "${OLD_VOLUME}:/data" alpine \
- stat -c '%u' /data/postgres 2>/dev/null || echo "")
+ stat -c '%u' /data/postgres 2>/dev/null < /dev/null || echo "")
if [[ -z "${DATA_UID}" || "${DATA_UID}" == "0" ]]; then
warn "Could not detect data directory UID — falling back to default (may chown files)."
USER_FLAG=""
@@ -231,7 +231,7 @@ docker run -d \
-e POSTGRES_PASSWORD="${OLD_DB_PASSWORD}" \
-e POSTGRES_DB="${OLD_DB_NAME}" \
${USER_FLAG} \
- "${PG14_IMAGE}" >/dev/null
+ "${PG14_IMAGE}" >/dev/null < /dev/null
success "Temporary container '${TEMP_CONTAINER}' started."
wait_for_pg "${TEMP_CONTAINER}" "${OLD_DB_USER}" "PostgreSQL 14"
@@ -245,7 +245,7 @@ if ! docker exec \
-e PGPASSWORD="${OLD_DB_PASSWORD}" \
"${TEMP_CONTAINER}" \
pg_dump -U "${OLD_DB_USER}" --no-password "${OLD_DB_NAME}" \
- > "${DUMP_FILE}" 2>/tmp/surfsense_pgdump_err; then
+ > "${DUMP_FILE}" 2>/tmp/surfsense_pgdump_err < /dev/null; then
cat /tmp/surfsense_pgdump_err >&2
error "pg_dump failed. See above for details."
fi
@@ -268,8 +268,8 @@ success "Dump complete: ${DUMP_SIZE} (${DUMP_LINES} lines) → ${DUMP_FILE}"
# Stop the temp container (trap will also handle it on unexpected exit)
info "Stopping temporary PostgreSQL 14 container..."
-docker stop "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
-docker rm "${TEMP_CONTAINER}" >/dev/null 2>&1 || true
+docker stop "${TEMP_CONTAINER}" >/dev/null 2>&1 < /dev/null || true
+docker rm "${TEMP_CONTAINER}" >/dev/null 2>&1 < /dev/null || true
success "Temporary container removed."
# ── Step 3: Recover SECRET_KEY ────────────────────────────────────────────────
@@ -279,10 +279,10 @@ RECOVERED_KEY=""
if docker run --rm -v "${OLD_VOLUME}:/data" alpine \
sh -c 'test -f /data/.secret_key && cat /data/.secret_key' \
- 2>/dev/null | grep -q .; then
+ 2>/dev/null < /dev/null | grep -q .; then
RECOVERED_KEY=$(
docker run --rm -v "${OLD_VOLUME}:/data" alpine \
- cat /data/.secret_key 2>/dev/null | tr -d '[:space:]'
+ cat /data/.secret_key 2>/dev/null < /dev/null | tr -d '[:space:]'
)
success "Recovered SECRET_KEY from '${OLD_VOLUME}'."
else
From 494307b2b9badb61e3c5ea8abb06dc3a30f4c3a0 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 21:40:49 +0530
Subject: [PATCH 51/57] refactor: encapsulate installation script logic in a
main function for improved structure and readability
---
docker/scripts/install.sh | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 91b5ea1e6..d8ac8c0e7 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -23,6 +23,8 @@
set -euo pipefail
+main() {
+
REPO_RAW="https://raw.githubusercontent.com/AnishSarkar22/SurfSense/fix/docker"
INSTALL_DIR="./surfsense"
OLD_VOLUME="surfsense-data"
@@ -145,7 +147,7 @@ if docker volume ls --format '{{.Name}}' 2>/dev/null < /dev/null | grep -q "^${O
# Run extraction non-interactively. On failure the error from
# migrate-database.sh is printed and install.sh exits here.
- bash "${INSTALL_DIR}/scripts/migrate-database.sh" --yes \
+ bash "${INSTALL_DIR}/scripts/migrate-database.sh" --yes < /dev/null \
|| error "Data extraction failed. See ./surfsense-migration.log for details.\nYou can also run migrate-database.sh manually with custom flags:\n bash ${INSTALL_DIR}/scripts/migrate-database.sh --db-user X --db-password Y"
printf "\n"
@@ -328,3 +330,7 @@ else
warn " First startup may take a few minutes while images are pulled."
warn " Edit ${INSTALL_DIR}/.env to configure API keys, OAuth, etc."
fi
+
+} # end main()
+
+main "$@"
From 6626a0221c552270306c827c86ae50fe3b076786 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 21:49:42 +0530
Subject: [PATCH 52/57] chore: update Redis and ElectricSQL Docker images to
latest versions
---
docker/docker-compose.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index d65f24b3a..10d1a2077 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -31,7 +31,7 @@ services:
retries: 5
redis:
- image: redis:7-alpine
+ image: redis:8-alpine
volumes:
- redis_data:/data
command: redis-server --appendonly yes
@@ -149,7 +149,7 @@ services:
# restart: unless-stopped
electric:
- image: electricsql/electric:1.4.6
+ image: electricsql/electric:1.4.10
ports:
- "${ELECTRIC_PORT:-5133}:3000"
environment:
From 848ad550713877248ceb93acfde15f28ff33d769 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 22:21:44 +0530
Subject: [PATCH 53/57] chore: update URL links for production
---
docker/.env.example | 2 +-
docker/docker-compose.yml | 10 +++++-----
docker/scripts/install.sh | 2 +-
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/docker/.env.example b/docker/.env.example
index 06442c9c5..7025cac52 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -5,7 +5,7 @@
# ==============================================================================
# SurfSense version (use "latest", a clean version like "0.0.14", or a specific build like "0.0.14.1")
-SURFSENSE_VERSION=0.0.13.6
+SURFSENSE_VERSION=latest
# ------------------------------------------------------------------------------
# Core Settings
diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml
index 10d1a2077..9fca4dfb5 100644
--- a/docker/docker-compose.yml
+++ b/docker/docker-compose.yml
@@ -43,7 +43,7 @@ services:
retries: 5
backend:
- image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
ports:
- "${BACKEND_PORT:-8000}:8000"
volumes:
@@ -84,7 +84,7 @@ services:
start_period: 200s
celery_worker:
- image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
volumes:
- shared_temp:/shared_tmp
env_file:
@@ -111,7 +111,7 @@ services:
restart: unless-stopped
celery_beat:
- image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
env_file:
- .env
environment:
@@ -133,7 +133,7 @@ services:
restart: unless-stopped
# flower:
- # image: ghcr.io/anishsarkar22/surfsense-backend:${SURFSENSE_VERSION:-latest}
+ # image: ghcr.io/modsetter/surfsense-backend:${SURFSENSE_VERSION:-latest}
# ports:
# - "${FLOWER_PORT:-5555}:5555"
# env_file:
@@ -167,7 +167,7 @@ services:
retries: 5
frontend:
- image: ghcr.io/anishsarkar22/surfsense-web:${SURFSENSE_VERSION:-latest}
+ image: ghcr.io/modsetter/surfsense-web:${SURFSENSE_VERSION:-latest}
ports:
- "${FRONTEND_PORT:-3000}:3000"
environment:
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index d8ac8c0e7..38bacb6b9 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -25,7 +25,7 @@ set -euo pipefail
main() {
-REPO_RAW="https://raw.githubusercontent.com/AnishSarkar22/SurfSense/fix/docker"
+REPO_RAW="https://raw.githubusercontent.com/MODSetter/SurfSense/main"
INSTALL_DIR="./surfsense"
OLD_VOLUME="surfsense-data"
DUMP_FILE="./surfsense_migration_backup.sql"
From 57a1c715828fe37d7800b84de26b33ae41f82375 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 22:26:02 +0530
Subject: [PATCH 54/57] chore: upgrade Redis to version 8-alpine and
ElectricSQL to version 1.4.10 in Docker Compose configuration
---
docker/docker-compose.dev.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml
index 3408a80c0..b76f26b2d 100644
--- a/docker/docker-compose.dev.yml
+++ b/docker/docker-compose.dev.yml
@@ -45,7 +45,7 @@ services:
- db
redis:
- image: redis:7-alpine
+ image: redis:8-alpine
ports:
- "${REDIS_PORT:-6379}:6379"
volumes:
@@ -159,7 +159,7 @@ services:
# - celery_worker
electric:
- image: electricsql/electric:1.4.6
+ image: electricsql/electric:1.4.10
ports:
- "${ELECTRIC_PORT:-5133}:3000"
# depends_on:
From 6e1ca96d5764344b6baf4eed8ccddc19c1714245 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Fri, 27 Feb 2026 22:59:51 +0530
Subject: [PATCH 55/57] feat: add migration completion tracking to installation
script
---
docker/scripts/install.sh | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh
index 38bacb6b9..4bd41a85a 100644
--- a/docker/scripts/install.sh
+++ b/docker/scripts/install.sh
@@ -30,6 +30,7 @@ INSTALL_DIR="./surfsense"
OLD_VOLUME="surfsense-data"
DUMP_FILE="./surfsense_migration_backup.sql"
KEY_FILE="./surfsense_migration_secret.key"
+MIGRATION_DONE_FILE="${INSTALL_DIR}/.migration_done"
MIGRATION_MODE=false
SETUP_WATCHTOWER=true
WATCHTOWER_INTERVAL=86400
@@ -127,7 +128,8 @@ success "All files downloaded to ${INSTALL_DIR}/"
# If a dump already exists (from a previous partial run) skip extraction and
# go straight to restore — this makes re-runs safe and idempotent.
-if docker volume ls --format '{{.Name}}' 2>/dev/null < /dev/null | grep -q "^${OLD_VOLUME}$"; then
+if docker volume ls --format '{{.Name}}' 2>/dev/null < /dev/null | grep -q "^${OLD_VOLUME}$" \
+ && [[ ! -f "${MIGRATION_DONE_FILE}" ]]; then
MIGRATION_MODE=true
if [[ -f "${DUMP_FILE}" ]]; then
@@ -235,6 +237,7 @@ if $MIGRATION_MODE; then
warn "The restore may have failed silently. Check: cd ${INSTALL_DIR} && ${DC} logs db"
else
success "Smoke test passed: ${TABLE_COUNT} table(s) restored successfully."
+ touch "${MIGRATION_DONE_FILE}"
fi
step "Starting all SurfSense services"
@@ -323,9 +326,10 @@ info ""
if $MIGRATION_MODE; then
warn " Migration complete! Open frontend and verify your data."
- warn " Once verified, clean up the legacy volume and dump file:"
+ warn " Once verified, clean up the legacy volume and migration files:"
warn " docker volume rm ${OLD_VOLUME}"
warn " rm ${DUMP_FILE}"
+ warn " rm ${MIGRATION_DONE_FILE}"
else
warn " First startup may take a few minutes while images are pulled."
warn " Edit ${INSTALL_DIR}/.env to configure API keys, OAuth, etc."
From 799b3dcbfedae1982ab61d37a55406f4a755af26 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Sun, 1 Mar 2026 00:43:02 +0530
Subject: [PATCH 56/57] docs: add note for Windows users to install WSL before
running installation commands
---
README.es.md | 3 +++
README.hi.md | 3 +++
README.md | 3 +++
README.pt-BR.md | 3 +++
README.zh-CN.md | 3 +++
surfsense_web/content/docs/docker-installation.mdx | 6 +++++-
6 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/README.es.md b/README.es.md
index 4795cc9a8..c2f55f366 100644
--- a/README.es.md
+++ b/README.es.md
@@ -83,6 +83,9 @@ Ejecuta SurfSense en tu propia infraestructura para control total de datos y pri
**Requisitos previos:** [Docker](https://docs.docker.com/get-docker/) (con [Docker Compose](https://docs.docker.com/compose/install/)) debe estar instalado y en ejecución.
+> [!NOTE]
+> Usuarios de Windows: instalen [WSL](https://learn.microsoft.com/en-us/windows/wsl/install) primero y ejecuten el siguiente comando en la terminal de Ubuntu.
+
```bash
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
diff --git a/README.hi.md b/README.hi.md
index 3fc8beb71..066e01eb7 100644
--- a/README.hi.md
+++ b/README.hi.md
@@ -83,6 +83,9 @@ https://github.com/user-attachments/assets/a0a16566-6967-4374-ac51-9b3e07fbecd7
**आवश्यकताएँ:** [Docker](https://docs.docker.com/get-docker/) ([Docker Compose](https://docs.docker.com/compose/install/) सहित) इंस्टॉल और चालू होना चाहिए।
+> [!NOTE]
+> Windows उपयोगकर्ता: पहले [WSL](https://learn.microsoft.com/en-us/windows/wsl/install) इंस्टॉल करें और नीचे दिया गया कमांड Ubuntu टर्मिनल में चलाएं।
+
```bash
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
diff --git a/README.md b/README.md
index 9de1a23bb..7641aa202 100644
--- a/README.md
+++ b/README.md
@@ -83,6 +83,9 @@ Run SurfSense on your own infrastructure for full data control and privacy.
**Prerequisites:** [Docker](https://docs.docker.com/get-docker/) (with [Docker Compose](https://docs.docker.com/compose/install/)) must be installed and running.
+> [!NOTE]
+> Windows users: install [WSL](https://learn.microsoft.com/en-us/windows/wsl/install) first and run the command below in the Ubuntu terminal.
+
```bash
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
diff --git a/README.pt-BR.md b/README.pt-BR.md
index adb102f40..d2e45fe5f 100644
--- a/README.pt-BR.md
+++ b/README.pt-BR.md
@@ -83,6 +83,9 @@ Execute o SurfSense na sua própria infraestrutura para controle total de dados
**Pré-requisitos:** [Docker](https://docs.docker.com/get-docker/) (com [Docker Compose](https://docs.docker.com/compose/install/)) deve estar instalado e em execução.
+> [!NOTE]
+> Usuários do Windows: instalem o [WSL](https://learn.microsoft.com/en-us/windows/wsl/install) primeiro e executem o comando abaixo no terminal do Ubuntu.
+
```bash
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
diff --git a/README.zh-CN.md b/README.zh-CN.md
index 9d5f7d6a4..218252388 100644
--- a/README.zh-CN.md
+++ b/README.zh-CN.md
@@ -83,6 +83,9 @@ https://github.com/user-attachments/assets/a0a16566-6967-4374-ac51-9b3e07fbecd7
**前置条件:** 需要安装并运行 [Docker](https://docs.docker.com/get-docker/)(含 [Docker Compose](https://docs.docker.com/compose/install/))。
+> [!NOTE]
+> Windows 用户:请先安装 [WSL](https://learn.microsoft.com/en-us/windows/wsl/install),然后在 Ubuntu 终端中运行以下命令。
+
```bash
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
```
diff --git a/surfsense_web/content/docs/docker-installation.mdx b/surfsense_web/content/docs/docker-installation.mdx
index 91d15940a..d6a6bca85 100644
--- a/surfsense_web/content/docs/docker-installation.mdx
+++ b/surfsense_web/content/docs/docker-installation.mdx
@@ -10,7 +10,11 @@ This guide explains how to run SurfSense using Docker, with options ranging from
### Option 1 — Install Script (recommended)
-Downloads the compose files, generates a `SECRET_KEY`, starts all services, and sets up [Watchtower](https://github.com/nicholas-fedor/watchtower) for automatic daily updates:
+Downloads the compose files, generates a `SECRET_KEY`, starts all services, and sets up [Watchtower](https://github.com/nicholas-fedor/watchtower) for automatic daily updates.
+
+
+Windows users: install [WSL](https://learn.microsoft.com/en-us/windows/wsl/install) first and run the command below in the Ubuntu terminal.
+
```bash
curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash
From d24691a968a20e75b6ba078d3212b5c29cde21e7 Mon Sep 17 00:00:00 2001
From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com>
Date: Mon, 2 Mar 2026 23:45:24 +0530
Subject: [PATCH 57/57] fix: increase timeout for alembic migrations in
entrypoint script to prevent premature failures
---
surfsense_backend/scripts/docker/entrypoint.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/surfsense_backend/scripts/docker/entrypoint.sh b/surfsense_backend/scripts/docker/entrypoint.sh
index e1a2778fc..7bfcfce86 100644
--- a/surfsense_backend/scripts/docker/entrypoint.sh
+++ b/surfsense_backend/scripts/docker/entrypoint.sh
@@ -53,7 +53,7 @@ run_migrations() {
sleep 1
done
- if timeout 60 alembic upgrade head 2>&1; then
+ if timeout 300 alembic upgrade head 2>&1; then
echo "Migrations completed successfully."
else
echo "WARNING: Migration failed or timed out. Continuing anyway..."