diff --git a/docker/scripts/install.sh b/docker/scripts/install.sh index 5cae0a328..c08b49729 100644 --- a/docker/scripts/install.sh +++ b/docker/scripts/install.sh @@ -2,50 +2,85 @@ # ============================================================================= # SurfSense — One-line Install Script # Usage: curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash +# +# Handles two cases automatically: +# 1. Fresh install — no prior SurfSense data detected +# 2. Migration from the legacy all-in-one container (surfsense-data volume) +# Downloads and runs migrate-database.sh --yes, then restores the dump +# into the new PostgreSQL 17 stack. The user runs one command for both. +# +# If you used custom database credentials in the old all-in-one container, run +# migrate-database.sh manually first (with --db-user / --db-password flags), +# then re-run this script: +# curl -fsSL .../docker/scripts/migrate-database.sh | bash -s -- --db-user X --db-password Y # ============================================================================= set -euo pipefail REPO_RAW="https://raw.githubusercontent.com/MODSetter/SurfSense/main" INSTALL_DIR="./surfsense" +OLD_VOLUME="surfsense-data" +DUMP_FILE="./surfsense_migration_backup.sql" +KEY_FILE="./surfsense_migration_secret.key" +MIGRATION_MODE=false + CYAN='\033[1;36m' YELLOW='\033[1;33m' +GREEN='\033[0;32m' RED='\033[0;31m' +BOLD='\033[1m' NC='\033[0m' -info() { printf "${CYAN}[SurfSense]${NC} %s\n" "$1"; } -warn() { printf "${YELLOW}[SurfSense]${NC} %s\n" "$1"; } -error() { printf "${RED}[SurfSense]${NC} %s\n" "$1" >&2; exit 1; } +info() { printf "${CYAN}[SurfSense]${NC} %s\n" "$1"; } +success() { printf "${GREEN}[SurfSense]${NC} %s\n" "$1"; } +warn() { printf "${YELLOW}[SurfSense]${NC} %s\n" "$1"; } +error() { printf "${RED}[SurfSense]${NC} ERROR: %s\n" "$1" >&2; exit 1; } +step() { printf "\n${BOLD}${CYAN}── %s${NC}\n" "$1"; } -# ── Pre-flight checks ─────────────────────────────────────────────────────── +# ── Pre-flight checks ──────────────────────────────────────────────────────── -command -v docker >/dev/null 2>&1 || error "Docker is not installed. Please install Docker first: https://docs.docker.com/get-docker/" +step "Checking prerequisites" -# Detect legacy all-in-one volume — must migrate before installing -if docker volume ls --format '{{.Name}}' 2>/dev/null | grep -q '^surfsense-data$'; then - printf "${RED}[SurfSense]${NC} Legacy volume 'surfsense-data' detected.\n" >&2 - printf "${YELLOW}[SurfSense]${NC} You appear to be upgrading from the old all-in-one SurfSense container.\n" >&2 - printf "${YELLOW}[SurfSense]${NC} The database has been upgraded from PostgreSQL 14 to 17 and your data\n" >&2 - printf "${YELLOW}[SurfSense]${NC} must be migrated before running the new stack.\n" >&2 - printf "\n" >&2 - printf "${YELLOW}[SurfSense]${NC} Run the migration script first:\n" >&2 - printf "${CYAN}[SurfSense]${NC} curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/migrate-database.sh | bash\n" >&2 - printf "\n" >&2 - printf "${YELLOW}[SurfSense]${NC} See the full guide at: https://surfsense.net/docs/how-to/migrate-from-allinone\n" >&2 - exit 1 -fi +command -v docker >/dev/null 2>&1 \ + || error "Docker is not installed. Install it at: https://docs.docker.com/get-docker/" +success "Docker found." + +docker info >/dev/null 2>&1 \ + || error "Docker daemon is not running. Please start Docker and try again." +success "Docker daemon is running." if docker compose version >/dev/null 2>&1; then DC="docker compose" elif command -v docker-compose >/dev/null 2>&1; then DC="docker-compose" else - error "Docker Compose is not installed. Please install Docker Compose: https://docs.docker.com/compose/install/" + error "Docker Compose is not installed. Install it at: https://docs.docker.com/compose/install/" fi +success "Docker Compose found ($DC)." + +# ── Wait-for-postgres helper ───────────────────────────────────────────────── +wait_for_pg() { + local db_user="$1" + local max_attempts=45 + local attempt=0 + + info "Waiting for PostgreSQL to accept connections..." + until (cd "${INSTALL_DIR}" && ${DC} exec -T db pg_isready -U "${db_user}" -q 2>/dev/null); do + attempt=$((attempt + 1)) + if [[ $attempt -ge $max_attempts ]]; then + error "PostgreSQL did not become ready after $((max_attempts * 2)) seconds.\nCheck logs: cd ${INSTALL_DIR} && ${DC} logs db" + fi + printf "." + sleep 2 + done + printf "\n" + success "PostgreSQL is ready." +} # ── Download files ─────────────────────────────────────────────────────────── -info "Creating installation directory: ${INSTALL_DIR}" +step "Downloading SurfSense files" +info "Installation directory: ${INSTALL_DIR}" mkdir -p "${INSTALL_DIR}/scripts" FILES=( @@ -53,39 +88,148 @@ FILES=( "docker/.env.example:.env.example" "docker/postgresql.conf:postgresql.conf" "docker/scripts/init-electric-user.sh:scripts/init-electric-user.sh" + "docker/scripts/migrate-database.sh:scripts/migrate-database.sh" ) for entry in "${FILES[@]}"; do src="${entry%%:*}" dest="${entry##*:}" info "Downloading ${dest}..." - curl -fsSL "${REPO_RAW}/${src}" -o "${INSTALL_DIR}/${dest}" || error "Failed to download ${src}" + curl -fsSL "${REPO_RAW}/${src}" -o "${INSTALL_DIR}/${dest}" \ + || error "Failed to download ${dest}. Check your internet connection and try again." done chmod +x "${INSTALL_DIR}/scripts/init-electric-user.sh" +chmod +x "${INSTALL_DIR}/scripts/migrate-database.sh" +success "All files downloaded to ${INSTALL_DIR}/" + +# ── Legacy all-in-one detection ────────────────────────────────────────────── +# Detect surfsense-data volume → migration mode. +# If a dump already exists (from a previous partial run) skip extraction and +# go straight to restore — this makes re-runs safe and idempotent. + +if docker volume ls --format '{{.Name}}' 2>/dev/null | grep -q "^${OLD_VOLUME}$"; then + MIGRATION_MODE=true + + if [[ -f "${DUMP_FILE}" ]]; then + step "Migration mode — using existing dump (skipping extraction)" + info "Found existing dump: ${DUMP_FILE}" + info "Skipping data extraction — proceeding directly to restore." + info "To force a fresh extraction, remove the dump first: rm ${DUMP_FILE}" + else + step "Migration mode — legacy all-in-one container detected" + warn "Volume '${OLD_VOLUME}' found. Your data will be migrated automatically." + warn "PostgreSQL is being upgraded from version 14 to 17." + warn "Your original data will NOT be deleted." + printf "\n" + info "Running data extraction (migrate-database.sh --yes)..." + info "Full extraction log: ./surfsense-migration.log" + printf "\n" + + # Run extraction non-interactively. On failure the error from + # migrate-database.sh is printed and install.sh exits here. + bash "${INSTALL_DIR}/scripts/migrate-database.sh" --yes \ + || error "Data extraction failed. See ./surfsense-migration.log for details.\nYou can also run migrate-database.sh manually with custom flags:\n bash ${INSTALL_DIR}/scripts/migrate-database.sh --db-user X --db-password Y" + + printf "\n" + success "Data extraction complete. Proceeding with installation and restore." + fi +fi # ── Set up .env ────────────────────────────────────────────────────────────── +step "Configuring environment" + if [ ! -f "${INSTALL_DIR}/.env" ]; then cp "${INSTALL_DIR}/.env.example" "${INSTALL_DIR}/.env" - SECRET_KEY=$(openssl rand -base64 32 2>/dev/null || head -c 32 /dev/urandom | base64) + if $MIGRATION_MODE && [[ -f "${KEY_FILE}" ]]; then + SECRET_KEY=$(cat "${KEY_FILE}" | tr -d '[:space:]') + success "Using SECRET_KEY recovered from legacy container." + else + SECRET_KEY=$(openssl rand -base64 32 2>/dev/null \ + || head -c 32 /dev/urandom | base64 | tr -d '\n') + success "Generated new random SECRET_KEY." + fi + if [[ "$OSTYPE" == "darwin"* ]]; then sed -i '' "s|SECRET_KEY=replace_me_with_a_random_string|SECRET_KEY=${SECRET_KEY}|" "${INSTALL_DIR}/.env" else sed -i "s|SECRET_KEY=replace_me_with_a_random_string|SECRET_KEY=${SECRET_KEY}|" "${INSTALL_DIR}/.env" fi - - info "Generated random SECRET_KEY in .env" + info "Created ${INSTALL_DIR}/.env" else - warn ".env already exists — skipping (your existing config is preserved)" + warn ".env already exists — keeping your existing configuration." fi # ── Start containers ───────────────────────────────────────────────────────── -info "Starting SurfSense..." -cd "${INSTALL_DIR}" -${DC} up -d +if $MIGRATION_MODE; then + # Read DB credentials from .env (fall back to defaults from docker-compose.yml) + DB_USER=$(grep '^DB_USER=' "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 | tr -d '"' | head -1) + DB_PASS=$(grep '^DB_PASSWORD=' "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 | tr -d '"' | head -1) + DB_NAME=$(grep '^DB_NAME=' "${INSTALL_DIR}/.env" 2>/dev/null | cut -d= -f2 | tr -d '"' | head -1) + DB_USER="${DB_USER:-surfsense}" + DB_PASS="${DB_PASS:-surfsense}" + DB_NAME="${DB_NAME:-surfsense}" + + step "Starting PostgreSQL 17" + (cd "${INSTALL_DIR}" && ${DC} up -d db) + wait_for_pg "${DB_USER}" + + step "Restoring database" + info "Restoring dump into PostgreSQL 17 — this may take a while for large databases..." + + RESTORE_ERR="/tmp/surfsense_restore_err.log" + (cd "${INSTALL_DIR}" && ${DC} exec -T \ + -e PGPASSWORD="${DB_PASS}" \ + db psql -U "${DB_USER}" -d "${DB_NAME}" \ + 2>"${RESTORE_ERR}") < "${DUMP_FILE}" || true + + # Surface real errors; ignore benign "already exists" noise from pg_dump headers + FATAL_ERRORS=$(grep -i "^ERROR:" "${RESTORE_ERR}" \ + | grep -iv "already exists" \ + | grep -iv "multiple primary keys" \ + || true) + + if [[ -n "${FATAL_ERRORS}" ]]; then + warn "Restore completed with errors (may be harmless pg_dump header noise):" + printf "%s\n" "${FATAL_ERRORS}" + warn "If SurfSense behaves incorrectly, inspect manually:" + warn " cd ${INSTALL_DIR} && ${DC} exec db psql -U ${DB_USER} -d ${DB_NAME} < ${DUMP_FILE}" + else + success "Database restored with no fatal errors." + fi + + # Smoke test — verify tables are present + TABLE_COUNT=$( + cd "${INSTALL_DIR}" && ${DC} exec -T \ + -e PGPASSWORD="${DB_PASS}" \ + db psql -U "${DB_USER}" -d "${DB_NAME}" -t \ + -c "SELECT count(*) FROM information_schema.tables WHERE table_schema = 'public';" \ + 2>/dev/null | tr -d ' \n' || echo "0" + ) + if [[ "${TABLE_COUNT}" == "0" || -z "${TABLE_COUNT}" ]]; then + warn "Smoke test: no tables found after restore." + warn "The restore may have failed silently. Check: cd ${INSTALL_DIR} && ${DC} logs db" + else + success "Smoke test passed: ${TABLE_COUNT} table(s) restored successfully." + fi + + step "Starting all SurfSense services" + (cd "${INSTALL_DIR}" && ${DC} up -d) + success "All services started." + + # Key file is no longer needed — SECRET_KEY is now in .env + rm -f "${KEY_FILE}" + +else + step "Starting SurfSense" + (cd "${INSTALL_DIR}" && ${DC} up -d) + success "All services started." +fi + +# ── Done ───────────────────────────────────────────────────────────────────── echo "" printf '\033[1;37m' @@ -105,6 +249,7 @@ Y88b d88P Y88b 888 888 888 Y88b d88P Y8b. 888 888 X88 Y8b. EOF printf " Your personal AI-powered search engine ${YELLOW}v${SURFSENSE_VERSION:-latest}${NC}\n" printf "${CYAN}══════════════════════════════════════════════════════════════${NC}\n\n" + info " Frontend: http://localhost:3000" info " Backend: http://localhost:8000" info " API Docs: http://localhost:8000/docs" @@ -114,5 +259,13 @@ info " Logs: cd ${INSTALL_DIR} && ${DC} logs -f" info " Stop: cd ${INSTALL_DIR} && ${DC} down" info " Update: cd ${INSTALL_DIR} && ${DC} pull && ${DC} up -d" info "" -warn " First startup may take sometime." -warn " Edit .env to configure OAuth connectors, API keys, etc." + +if $MIGRATION_MODE; then + warn " Migration complete! Open frontend and verify your data." + warn " Once verified, clean up the legacy volume and dump file:" + warn " docker volume rm ${OLD_VOLUME}" + warn " rm ${DUMP_FILE}" +else + warn " First startup may take a few minutes while images are pulled." + warn " Edit ${INSTALL_DIR}/.env to configure API keys, OAuth, etc." +fi diff --git a/docker/scripts/migrate-database.sh b/docker/scripts/migrate-database.sh index c55fb002e..3e5c29cb8 100755 --- a/docker/scripts/migrate-database.sh +++ b/docker/scripts/migrate-database.sh @@ -2,9 +2,8 @@ # ============================================================================= # SurfSense — Database Migration Script # -# Migrates data from the legacy all-in-one surfsense-data volume (PostgreSQL 14) -# to the new multi-container surfsense-postgres volume (PostgreSQL 17) using -# a logical pg_dump / psql restore — safe across major PG versions. +# Extracts data from the legacy all-in-one surfsense-data volume (PostgreSQL 14) +# and saves it as a SQL dump + SECRET_KEY file ready for install.sh to restore. # # Usage: # bash migrate-database.sh [options] @@ -13,18 +12,30 @@ # --db-user USER Old PostgreSQL username (default: surfsense) # --db-password PASS Old PostgreSQL password (default: surfsense) # --db-name NAME Old PostgreSQL database (default: surfsense) -# --install-dir DIR New installation directory (default: ./surfsense) # --yes / -y Skip all confirmation prompts # --help / -h Show this help # # Prerequisites: -# - Docker and Docker Compose installed and running +# - Docker installed and running # - The legacy surfsense-data volume must exist # - ~500 MB free disk space for the dump file # +# What this script does: +# 1. Stops any container using surfsense-data (to prevent corruption) +# 2. Starts a temporary PG14 container against the old volume +# 3. Dumps the database to ./surfsense_migration_backup.sql +# 4. Recovers the SECRET_KEY to ./surfsense_migration_secret.key +# 5. Exits — leaving installation to install.sh +# # What this script does NOT do: -# - Delete the original surfsense-data volume (you must do this manually -# after verifying the migration succeeded) +# - Delete the original surfsense-data volume (do this manually after verifying) +# - Install the new SurfSense stack (install.sh handles that automatically) +# +# Note: +# install.sh downloads and runs this script automatically when it detects the +# legacy surfsense-data volume. You only need to run this script manually if +# you have custom database credentials (--db-user / --db-password / --db-name) +# or if the automatic migration inside install.sh fails at the extraction step. # ============================================================================= set -euo pipefail @@ -49,18 +60,16 @@ error() { printf "${RED}[SurfSense]${NC} ERROR: %s\n" "$1" >&2; exit 1; } step() { printf "\n${BOLD}${CYAN}── Step %s: %s${NC}\n" "$1" "$2"; } # ── Constants ───────────────────────────────────────────────────────────────── -REPO_RAW="https://raw.githubusercontent.com/MODSetter/SurfSense/main" OLD_VOLUME="surfsense-data" -NEW_PG_VOLUME="surfsense-postgres" TEMP_CONTAINER="surfsense-pg14-migration" DUMP_FILE="./surfsense_migration_backup.sql" -PG14_IMAGE="postgres:14" +KEY_FILE="./surfsense_migration_secret.key" +PG14_IMAGE="pgvector/pgvector:pg14" # ── Defaults ────────────────────────────────────────────────────────────────── OLD_DB_USER="surfsense" OLD_DB_PASSWORD="surfsense" OLD_DB_NAME="surfsense" -INSTALL_DIR="./surfsense" AUTO_YES=false # ── Argument parsing ────────────────────────────────────────────────────────── @@ -69,7 +78,6 @@ while [[ $# -gt 0 ]]; do --db-user) OLD_DB_USER="$2"; shift 2 ;; --db-password) OLD_DB_PASSWORD="$2"; shift 2 ;; --db-name) OLD_DB_NAME="$2"; shift 2 ;; - --install-dir) INSTALL_DIR="$2"; shift 2 ;; --yes|-y) AUTO_YES=true; shift ;; --help|-h) grep '^#' "$0" | grep -v '^#!/' | sed 's/^# \{0,1\}//' @@ -96,7 +104,7 @@ cleanup() { docker rm "${TEMP_CONTAINER}" >/dev/null 2>&1 || true fi if [[ $exit_code -ne 0 ]]; then - printf "\n${RED}[SurfSense]${NC} Migration failed (exit code %s).\n" "${exit_code}" >&2 + printf "\n${RED}[SurfSense]${NC} Migration data extraction failed (exit code %s).\n" "${exit_code}" >&2 printf "${RED}[SurfSense]${NC} Full log: %s\n" "${LOG_FILE}" >&2 printf "${YELLOW}[SurfSense]${NC} Your original data in '${OLD_VOLUME}' is untouched.\n" >&2 fi @@ -104,7 +112,6 @@ cleanup() { trap cleanup EXIT # ── Wait-for-postgres helper ────────────────────────────────────────────────── -# $1 = container name/id $2 = db user $3 = label for messages wait_for_pg() { local container="$1" local user="$2" @@ -116,7 +123,7 @@ wait_for_pg() { until docker exec "${container}" pg_isready -U "${user}" -q 2>/dev/null; do attempt=$((attempt + 1)) if [[ $attempt -ge $max_attempts ]]; then - error "${label} did not become ready after $((max_attempts * 2)) seconds.\nCheck logs: docker logs ${container}" + error "${label} did not become ready after $((max_attempts * 2)) seconds. Check: docker logs ${container}" fi printf "." sleep 2 @@ -142,7 +149,7 @@ Y88b d88P Y88b 888 888 888 Y88b d88P Y8b. 888 888 X88 Y8b. EOF printf "${NC}" -printf "${CYAN} Database Migration: All-in-One → Multi-Container (PG 14 → 17)${NC}\n" +printf "${CYAN} Data Extraction: All-in-One (PG14) → Migration Dump${NC}\n" printf "${CYAN}══════════════════════════════════════════════════════════════${NC}\n\n" # ── Step 0: Pre-flight checks ───────────────────────────────────────────────── @@ -156,39 +163,31 @@ command -v docker >/dev/null 2>&1 \ docker info >/dev/null 2>&1 \ || error "Docker daemon is not running. Please start Docker and try again." -# Docker Compose -if docker compose version >/dev/null 2>&1; then - DC="docker compose" -elif command -v docker-compose >/dev/null 2>&1; then - DC="docker-compose" -else - error "Docker Compose not found. Install it at: https://docs.docker.com/compose/install/" -fi -info "Docker Compose: ${DC}" - -# OS detection (needed for sed -i portability) -case "$(uname -s)" in - Darwin*) OS_TYPE="darwin" ;; - Linux*) OS_TYPE="linux" ;; - CYGWIN*|MINGW*|MSYS*) OS_TYPE="windows" ;; - *) OS_TYPE="unknown" ;; -esac -info "OS: ${OS_TYPE}" - # Old volume must exist docker volume ls --format '{{.Name}}' | grep -q "^${OLD_VOLUME}$" \ || error "Legacy volume '${OLD_VOLUME}' not found.\n Are you sure you ran the old all-in-one SurfSense container?" success "Found legacy volume: ${OLD_VOLUME}" -# New PG volume must NOT already exist -if docker volume ls --format '{{.Name}}' | grep -q "^${NEW_PG_VOLUME}$"; then - warn "Volume '${NEW_PG_VOLUME}' already exists." - warn "If migration already succeeded, you do not need to run this script again." - warn "If a previous run failed partway, remove the partial volume first:" - warn " docker volume rm ${NEW_PG_VOLUME}" - error "Aborting to avoid overwriting existing data." +# Detect and stop any container currently using the old volume +# (mounting a live PG volume into a second container causes the new container's +# entrypoint to chown the data files, breaking the running container's access) +OLD_CONTAINER=$(docker ps --filter "volume=${OLD_VOLUME}" --format '{{.Names}}' | head -n1 || true) +if [[ -n "${OLD_CONTAINER}" ]]; then + warn "Container '${OLD_CONTAINER}' is running and using the '${OLD_VOLUME}' volume." + warn "It must be stopped before migration to prevent data file corruption." + confirm "Stop '${OLD_CONTAINER}' now and proceed with data extraction?" + docker stop "${OLD_CONTAINER}" >/dev/null 2>&1 \ + || error "Failed to stop '${OLD_CONTAINER}'. Try: docker stop ${OLD_CONTAINER}" + success "Container '${OLD_CONTAINER}' stopped." +fi + +# Bail out if a dump already exists — don't overwrite a previous successful run +if [[ -f "${DUMP_FILE}" ]]; then + warn "Dump file '${DUMP_FILE}' already exists." + warn "If a previous extraction succeeded, just run install.sh now." + warn "To re-extract, remove the file first: rm ${DUMP_FILE}" + error "Aborting to avoid overwriting an existing dump." fi -success "Target volume '${NEW_PG_VOLUME}' does not yet exist — safe to proceed." # Clean up any stale temp container from a previous failed run if docker ps -a --format '{{.Names}}' | grep -q "^${TEMP_CONTAINER}$"; then @@ -212,14 +211,13 @@ fi success "All pre-flight checks passed." # ── Confirmation prompt ─────────────────────────────────────────────────────── -printf "\n${BOLD}Migration plan:${NC}\n" +printf "\n${BOLD}Extraction plan:${NC}\n" printf " Source volume : ${YELLOW}%s${NC} (PG14 data at /data/postgres)\n" "${OLD_VOLUME}" -printf " Target volume : ${YELLOW}%s${NC} (PG17 multi-container stack)\n" "${NEW_PG_VOLUME}" printf " Old credentials : user=${YELLOW}%s${NC} db=${YELLOW}%s${NC}\n" "${OLD_DB_USER}" "${OLD_DB_NAME}" -printf " Install dir : ${YELLOW}%s${NC}\n" "${INSTALL_DIR}" printf " Dump saved to : ${YELLOW}%s${NC}\n" "${DUMP_FILE}" +printf " SECRET_KEY to : ${YELLOW}%s${NC}\n" "${KEY_FILE}" printf " Log file : ${YELLOW}%s${NC}\n\n" "${LOG_FILE}" -confirm "Start migration? (Your original data will not be deleted.)" +confirm "Start data extraction? (Your original data will not be deleted or modified.)" # ── Step 1: Start temporary PostgreSQL 14 container ────────────────────────── step "1" "Starting temporary PostgreSQL 14 container" @@ -228,6 +226,21 @@ info "Pulling ${PG14_IMAGE}..." docker pull "${PG14_IMAGE}" >/dev/null 2>&1 \ || warn "Could not pull ${PG14_IMAGE} — using cached image if available." +# Detect the UID that owns the existing data files and run the temp container +# as that user. This prevents the official postgres image entrypoint from +# running as root and doing `chown -R postgres /data/postgres`, which would +# re-own the files to UID 999 and break any subsequent access by the original +# container's postgres process (which may run as a different UID). +DATA_UID=$(docker run --rm -v "${OLD_VOLUME}:/data" alpine \ + stat -c '%u' /data/postgres 2>/dev/null || echo "") +if [[ -z "${DATA_UID}" || "${DATA_UID}" == "0" ]]; then + warn "Could not detect data directory UID — falling back to default (may chown files)." + USER_FLAG="" +else + info "Data directory owned by UID ${DATA_UID} — starting temp container as that user." + USER_FLAG="--user ${DATA_UID}" +fi + docker run -d \ --name "${TEMP_CONTAINER}" \ -v "${OLD_VOLUME}:/data" \ @@ -235,6 +248,7 @@ docker run -d \ -e POSTGRES_USER="${OLD_DB_USER}" \ -e POSTGRES_PASSWORD="${OLD_DB_PASSWORD}" \ -e POSTGRES_DB="${OLD_DB_NAME}" \ + ${USER_FLAG} \ "${PG14_IMAGE}" >/dev/null success "Temporary container '${TEMP_CONTAINER}' started." @@ -245,13 +259,12 @@ step "2" "Dumping PostgreSQL 14 database" info "Running pg_dump — this may take a while for large databases..." -# Run pg_dump and capture stderr separately to detect real failures if ! docker exec \ -e PGPASSWORD="${OLD_DB_PASSWORD}" \ "${TEMP_CONTAINER}" \ pg_dump -U "${OLD_DB_USER}" --no-password "${OLD_DB_NAME}" \ - > "${DUMP_FILE}" 2>/tmp/pg_dump_err; then - cat /tmp/pg_dump_err >&2 + > "${DUMP_FILE}" 2>/tmp/surfsense_pgdump_err; then + cat /tmp/surfsense_pgdump_err >&2 error "pg_dump failed. See above for details." fi @@ -271,7 +284,7 @@ DUMP_LINES=$(wc -l < "${DUMP_FILE}" | tr -d ' ') DUMP_SIZE=$(du -sh "${DUMP_FILE}" 2>/dev/null | cut -f1) success "Dump complete: ${DUMP_SIZE} (${DUMP_LINES} lines) → ${DUMP_FILE}" -# Stop the temp container now (trap will also handle it on unexpected exit) +# Stop the temp container (trap will also handle it on unexpected exit) info "Stopping temporary PostgreSQL 14 container..." docker stop "${TEMP_CONTAINER}" >/dev/null 2>&1 || true docker rm "${TEMP_CONTAINER}" >/dev/null 2>&1 || true @@ -292,148 +305,49 @@ if docker run --rm -v "${OLD_VOLUME}:/data" alpine \ success "Recovered SECRET_KEY from '${OLD_VOLUME}'." else warn "No SECRET_KEY file found at /data/.secret_key in '${OLD_VOLUME}'." - warn "This means the all-in-one was launched with SECRET_KEY set as an explicit environment variable." - printf "${YELLOW}[SurfSense]${NC} Enter the SECRET_KEY from your old container's environment\n" - printf "${YELLOW}[SurfSense]${NC} (press Enter to generate a new one — existing sessions will be invalidated): " - read -r RECOVERED_KEY - if [[ -z "${RECOVERED_KEY}" ]]; then + warn "This means the all-in-one container was launched with SECRET_KEY set as an explicit env var." + if $AUTO_YES; then + # Non-interactive (called from install.sh) — auto-generate rather than hanging on read RECOVERED_KEY=$(openssl rand -base64 32 2>/dev/null \ || head -c 32 /dev/urandom | base64 | tr -d '\n') - warn "Generated a new SECRET_KEY. All active browser sessions will be logged out after migration." + warn "Non-interactive mode: generated a new SECRET_KEY automatically." + warn "All active browser sessions will be logged out after migration." + warn "To restore your original key, update SECRET_KEY in ./surfsense/.env afterwards." + else + printf "${YELLOW}[SurfSense]${NC} Enter the SECRET_KEY from your old container's environment\n" + printf "${YELLOW}[SurfSense]${NC} (press Enter to generate a new one — existing sessions will be invalidated): " + read -r RECOVERED_KEY + if [[ -z "${RECOVERED_KEY}" ]]; then + RECOVERED_KEY=$(openssl rand -base64 32 2>/dev/null \ + || head -c 32 /dev/urandom | base64 | tr -d '\n') + warn "Generated a new SECRET_KEY. All active browser sessions will be logged out after migration." + fi fi fi -# ── Step 4: Set up the new installation ─────────────────────────────────────── -step "4" "Setting up new SurfSense installation" - -if [[ -f "${INSTALL_DIR}/docker-compose.yml" ]]; then - warn "Directory '${INSTALL_DIR}' already exists — skipping file download." -else - info "Creating installation directory: ${INSTALL_DIR}" - mkdir -p "${INSTALL_DIR}/scripts" - - FILES=( - "docker/docker-compose.yml:docker-compose.yml" - "docker/.env.example:.env.example" - "docker/postgresql.conf:postgresql.conf" - "docker/scripts/init-electric-user.sh:scripts/init-electric-user.sh" - ) - - for entry in "${FILES[@]}"; do - src="${entry%%:*}" - dest="${entry##*:}" - info "Downloading ${dest}..." - curl -fsSL "${REPO_RAW}/${src}" -o "${INSTALL_DIR}/${dest}" \ - || error "Failed to download ${src}. Check your internet connection." - done - - chmod +x "${INSTALL_DIR}/scripts/init-electric-user.sh" - success "Compose files downloaded to ${INSTALL_DIR}/" -fi - -# Create .env from example if it does not exist -if [[ ! -f "${INSTALL_DIR}/.env" ]]; then - cp "${INSTALL_DIR}/.env.example" "${INSTALL_DIR}/.env" - info "Created ${INSTALL_DIR}/.env from .env.example" -fi - -# Write the recovered SECRET_KEY into .env (handles both placeholder and pre-set values) -if [[ "${OS_TYPE}" == "darwin" ]]; then - sed -i '' "s|SECRET_KEY=replace_me_with_a_random_string|SECRET_KEY=${RECOVERED_KEY}|" "${INSTALL_DIR}/.env" - sed -i '' "s|^SECRET_KEY=.*|SECRET_KEY=${RECOVERED_KEY}|" "${INSTALL_DIR}/.env" -else - sed -i "s|SECRET_KEY=replace_me_with_a_random_string|SECRET_KEY=${RECOVERED_KEY}|" "${INSTALL_DIR}/.env" - sed -i "s|^SECRET_KEY=.*|SECRET_KEY=${RECOVERED_KEY}|" "${INSTALL_DIR}/.env" -fi -success "SECRET_KEY written to ${INSTALL_DIR}/.env" - -# ── Step 5: Start PostgreSQL 17 (new stack) ─────────────────────────────────── -step "5" "Starting PostgreSQL 17" - -(cd "${INSTALL_DIR}" && ${DC} up -d db) - -# Resolve the running container name for direct docker exec calls -PG17_CONTAINER=$(cd "${INSTALL_DIR}" && ${DC} ps -q db 2>/dev/null | head -n1 || true) -if [[ -z "${PG17_CONTAINER}" ]]; then - # Fallback to the predictable compose container name - PG17_CONTAINER="surfsense-db-1" -fi -info "PostgreSQL 17 container: ${PG17_CONTAINER}" - -wait_for_pg "${PG17_CONTAINER}" "${OLD_DB_USER}" "PostgreSQL 17" - -# ── Step 6: Restore the dump ────────────────────────────────────────────────── -step "6" "Restoring database into PostgreSQL 17" - -info "Running psql restore — this may take a while for large databases..." - -RESTORE_ERR_FILE="/tmp/surfsense_restore_err.log" - -docker exec -i \ - -e PGPASSWORD="${OLD_DB_PASSWORD}" \ - "${PG17_CONTAINER}" \ - psql -U "${OLD_DB_USER}" -d "${OLD_DB_NAME}" \ - < "${DUMP_FILE}" \ - 2>"${RESTORE_ERR_FILE}" || true # psql exits non-zero on warnings; check below - -# Surface any real (non-benign) errors -FATAL_ERRORS=$(grep -i "^ERROR:" "${RESTORE_ERR_FILE}" \ - | grep -iv "already exists" \ - | grep -iv "multiple primary keys" \ - || true) - -if [[ -n "${FATAL_ERRORS}" ]]; then - warn "Restore completed with the following errors:" - printf "%s\n" "${FATAL_ERRORS}" - confirm "These may be harmless (e.g. pre-existing system objects). Continue?" -else - success "Restore completed with no fatal errors." -fi - -# Smoke test — verify tables exist in the restored database -TABLE_COUNT=$( - docker exec \ - -e PGPASSWORD="${OLD_DB_PASSWORD}" \ - "${PG17_CONTAINER}" \ - psql -U "${OLD_DB_USER}" -d "${OLD_DB_NAME}" -t \ - -c "SELECT count(*) FROM information_schema.tables WHERE table_schema = 'public';" \ - 2>/dev/null | tr -d ' \n' || echo "0" -) - -if [[ "${TABLE_COUNT}" == "0" || -z "${TABLE_COUNT}" ]]; then - warn "Smoke test: no tables found in the restored database." - warn "The restore may have failed silently. Inspect the dump and restore manually:" - warn " docker exec -i ${PG17_CONTAINER} psql -U ${OLD_DB_USER} -d ${OLD_DB_NAME} < ${DUMP_FILE}" - confirm "Continue starting the rest of the stack anyway?" -else - success "Smoke test passed: ${TABLE_COUNT} table(s) found in the restored database." -fi - -# ── Step 7: Start all remaining services ────────────────────────────────────── -step "7" "Starting all SurfSense services" - -(cd "${INSTALL_DIR}" && ${DC} up -d) -success "All services started." +# Save SECRET_KEY to a file for install.sh to pick up +printf '%s' "${RECOVERED_KEY}" > "${KEY_FILE}" +success "SECRET_KEY saved to ${KEY_FILE}" # ── Done ────────────────────────────────────────────────────────────────────── printf "\n${GREEN}${BOLD}" printf "══════════════════════════════════════════════════════════════\n" -printf " Migration complete!\n" +printf " Data extraction complete!\n" printf "══════════════════════════════════════════════════════════════\n" printf "${NC}\n" -success " Frontend : http://localhost:3000" -success " Backend : http://localhost:8000" -success " API Docs : http://localhost:8000/docs" +success "Dump file : ${DUMP_FILE} (${DUMP_SIZE})" +success "Secret key: ${KEY_FILE}" printf "\n" -info " Config : ${INSTALL_DIR}/.env" -info " Logs : cd ${INSTALL_DIR} && ${DC} logs -f" +info "Next step — run install.sh from this same directory:" printf "\n" -warn "Next steps:" -warn " 1. Open http://localhost:3000 and verify your data is intact." -warn " 2. Once satisfied, remove the legacy volume (IRREVERSIBLE):" -warn " docker volume rm ${OLD_VOLUME}" -warn " 3. Delete the dump file once you no longer need it as a backup:" -warn " rm ${DUMP_FILE}" -warn " Full migration log saved to: ${LOG_FILE}" +printf "${CYAN} curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash${NC}\n" +printf "\n" +info "install.sh will detect the dump, restore your data into PostgreSQL 17," +info "and start the full SurfSense stack automatically." +printf "\n" +warn "Keep both files until you have verified the migration:" +warn " ${DUMP_FILE}" +warn " ${KEY_FILE}" +warn "Full log saved to: ${LOG_FILE}" printf "\n" diff --git a/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx b/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx index 74d3d0d0b..36233145d 100644 --- a/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx +++ b/surfsense_web/content/docs/how-to/migrate-from-allinone.mdx @@ -11,82 +11,79 @@ Because PostgreSQL data files are not compatible between major versions, a **log This guide only applies to users who ran the legacy `docker-compose.quickstart.yml` (the all-in-one `surfsense` container). If you were already using `docker/docker-compose.yml`, you do not need to migrate. - -If you try to run `install.sh` while the old `surfsense-data` volume exists, the script will detect it and stop with instructions to migrate first. - +--- + +## Option A — One command (recommended) + +`install.sh` detects the legacy `surfsense-data` volume and handles the full migration automatically — no separate migration script needed. Just run the same install command you would use for a fresh install: + +```bash +curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash +``` + +**What it does automatically:** + +1. Downloads all SurfSense files (including `migrate-database.sh`) into `./surfsense/` +2. Detects the `surfsense-data` volume and enters migration mode +3. Stops the old all-in-one container if it is still running +4. Starts a temporary PostgreSQL 14 container and dumps your database +5. Recovers your `SECRET_KEY` from the old volume +6. Starts PostgreSQL 17, restores the dump, runs a smoke test +7. Starts all services + +Your original `surfsense-data` volume is **never deleted** — you remove it manually after verifying. + +### After it completes + +1. Open [http://localhost:3000](http://localhost:3000) and confirm your data is intact. +2. Once satisfied, remove the old volume (irreversible): + ```bash + docker volume rm surfsense-data + ``` +3. Delete the dump file once you no longer need it as a backup: + ```bash + rm ./surfsense_migration_backup.sql + ``` + +### If the migration fails mid-way + +The dump file is saved to `./surfsense_migration_backup.sql` as a checkpoint. Simply re-run `install.sh` — it will detect the existing dump and skip straight to the restore step without re-extracting. --- -## Option A — Migration Script (recommended) +## Option B — Manual migration script (custom credentials) -A single script handles the entire process automatically: it dumps your PostgreSQL 14 data, recovers your `SECRET_KEY`, sets up the new stack, and restores into PostgreSQL 17. - -**Prerequisites:** Docker running, ~500 MB free disk space, internet access. +If you launched the old all-in-one container with custom database credentials (`POSTGRES_USER`, `POSTGRES_PASSWORD`, `POSTGRES_DB` environment variables), the automatic path will use wrong credentials. Run `migrate-database.sh` manually first: ```bash -curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/migrate-database.sh | bash +# 1. Extract data with your custom credentials +bash ./surfsense/scripts/migrate-database.sh --db-user myuser --db-password mypass --db-name mydb + +# 2. Install and restore (detects the dump automatically) +curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/install.sh | bash ``` -Or download and inspect it first (recommended): +Or download and run if you haven't run `install.sh` yet: ```bash curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/migrate-database.sh -o migrate-database.sh -# Review the script, then run: -bash migrate-database.sh +bash migrate-database.sh --db-user myuser --db-password mypass --db-name mydb ``` -### Options +### Migration script options | Flag | Description | Default | |------|-------------|---------| | `--db-user USER` | Old PostgreSQL username | `surfsense` | | `--db-password PASS` | Old PostgreSQL password | `surfsense` | | `--db-name NAME` | Old PostgreSQL database | `surfsense` | -| `--install-dir DIR` | New installation directory | `./surfsense` | -| `--yes` / `-y` | Skip confirmation prompts | — | - -If you customised the database credentials in your old all-in-one container, pass them explicitly: - -```bash -bash migrate-database.sh --db-user myuser --db-password mypass --db-name mydb -``` - -### What the script does - -1. Checks prerequisites and confirms the `surfsense-data` volume exists -2. Starts a temporary `postgres:14` container against the old data -3. Runs `pg_dump` and validates the dump file (size + header check) -4. Recovers your `SECRET_KEY` from the old volume (or prompts if not found) -5. Downloads the new compose files into `./surfsense/` (skips if already present) -6. Writes the recovered `SECRET_KEY` into `./surfsense/.env` -7. Starts the new `db` service (PostgreSQL 17), waits for readiness -8. Restores the dump with `psql` and runs a smoke test -9. Starts all remaining services - -The original `surfsense-data` volume is **never deleted** — you remove it manually after verifying the migration. - -### After the script completes - -1. Open [http://localhost:3000](http://localhost:3000) and confirm your data is intact. -2. Once satisfied, remove the old volume: - ```bash - docker volume rm surfsense-data - ``` -3. Delete the backup dump once you no longer need it: - ```bash - rm ./surfsense_migration_backup.sql - ``` +| `--yes` / `-y` | Skip confirmation prompts (used automatically by `install.sh`) | — | --- -## Option B — Manual Steps +## Option C — Manual steps -Use these steps if the migration script doesn't work on your platform (e.g. Windows without WSL2), or if you want full control over each step. - -### Before you start - -- Confirm the old volume exists: `docker volume ls | grep surfsense-data` -- Have ~500 MB free disk space for the SQL dump. +For users who prefer full control or whose platform doesn't support bash scripts (e.g. Windows without WSL2). ### Step 1 — Start a temporary PostgreSQL 14 container @@ -97,7 +94,7 @@ docker run -d --name surfsense-pg14-temp \ -e POSTGRES_USER=surfsense \ -e POSTGRES_PASSWORD=surfsense \ -e POSTGRES_DB=surfsense \ - postgres:14 + pgvector/pgvector:pg14 ``` Wait ~10 seconds, then confirm it is healthy: @@ -113,114 +110,78 @@ docker exec -e PGPASSWORD=surfsense surfsense-pg14-temp \ pg_dump -U surfsense surfsense > surfsense_backup.sql ``` -Verify the dump is valid: - -```bash -wc -l surfsense_backup.sql -grep "PostgreSQL database dump" surfsense_backup.sql -``` - ### Step 3 — Recover your SECRET\_KEY ```bash docker run --rm -v surfsense-data:/data alpine cat /data/.secret_key ``` -Copy the printed value for the next step. - ### Step 4 — Set up the new stack ```bash -git clone https://github.com/MODSetter/SurfSense.git -cd SurfSense/docker -cp .env.example .env +mkdir -p surfsense/scripts +curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/docker-compose.yml -o surfsense/docker-compose.yml +curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/.env.example -o surfsense/.env.example +curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/postgresql.conf -o surfsense/postgresql.conf +curl -fsSL https://raw.githubusercontent.com/MODSetter/SurfSense/main/docker/scripts/init-electric-user.sh -o surfsense/scripts/init-electric-user.sh +chmod +x surfsense/scripts/init-electric-user.sh +cp surfsense/.env.example surfsense/.env ``` -Set `SECRET_KEY` in `.env` to the value recovered above. +Set `SECRET_KEY` in `surfsense/.env` to the value from Step 3. -### Step 5 — Start PostgreSQL 17 +### Step 5 — Start PostgreSQL 17 and restore ```bash +cd surfsense docker compose up -d db +docker compose exec db pg_isready -U surfsense # wait until ready +docker compose exec -T db psql -U surfsense -d surfsense < ../surfsense_backup.sql ``` -Wait until ready: - -```bash -docker compose exec db pg_isready -U surfsense -``` - -### Step 6 — Restore the database - -```bash -docker compose exec -T db \ - psql -U surfsense -d surfsense < surfsense_backup.sql -``` - -Harmless notices like `ERROR: role "surfsense" already exists` are expected. - -### Step 7 — Start all services +### Step 6 — Start all services ```bash docker compose up -d ``` -### Step 8 — Clean up - -After verifying everything works: +### Step 7 — Clean up ```bash -# Remove temporary PG14 container docker stop surfsense-pg14-temp && docker rm surfsense-pg14-temp - -# Remove old volume (irreversible — only after confirming migration success) -docker volume rm surfsense-data +docker volume rm surfsense-data # only after verifying migration succeeded ``` --- ## Troubleshooting -### Script exits with "surfsense-postgres already exists" +### `install.sh` runs normally with a blank database (no migration happened) -A previous migration attempt partially completed. Remove the incomplete volume and retry: +The legacy volume was not detected. Confirm it exists: ```bash -docker volume rm surfsense-postgres -bash migrate-database.sh +docker volume ls | grep surfsense-data ``` -### PostgreSQL 14 container fails to start - -Check the container logs: +If it doesn't appear, the old container may have used a different volume name. Check with: ```bash -docker logs surfsense-pg14-temp +docker volume ls | grep -i surfsense ``` -If you see permission errors, the data directory may need ownership correction. Run: +### Extraction fails with permission errors -```bash -docker exec surfsense-pg14-temp chown -R postgres:postgres /data/postgres -``` - -Then restart the container. - -### Empty or corrupt dump file - -If `surfsense_backup.sql` is smaller than expected, run the dump command again with verbose output: - -```bash -docker exec -e PGPASSWORD=surfsense surfsense-pg14-temp \ - pg_dump -U surfsense surfsense -v 2>&1 | head -40 -``` +The script detects the UID of the data files and runs the temporary PG14 container as that user. If you see permission errors in `./surfsense-migration.log`, run `migrate-database.sh` manually and check the log for details. ### Cannot find `/data/.secret_key` -If the all-in-one was launched with `SECRET_KEY` set explicitly as an environment variable, the key was never written to the volume. Set the same value manually in `docker/.env`. If it is lost, generate a new one: +The all-in-one entrypoint always writes the key to `/data/.secret_key` unless you explicitly set `SECRET_KEY=` as an environment variable. If the key is missing, the migration script auto-generates a new one (with a warning). You can update it manually in `./surfsense/.env` afterwards. Note that a new key invalidates all existing browser sessions — users will need to log in again. + +### Restore errors after re-running `install.sh` + +If `surfsense-postgres` volume already exists from a previous partial run, remove it before retrying: ```bash -openssl rand -base64 32 +docker volume rm surfsense-postgres ``` - -Note: a new key invalidates all existing browser sessions — users will need to log in again.