chore: remove Electric SQL plumbing and infrastructure

Remove all Electric SQL client code, Docker service, env vars, CI build
args, install scripts, and documentation. Feature hooks that depend on
Electric are intentionally left in place to be rewritten with Rocicorp
Zero in subsequent commits.

Deleted:
- lib/electric/ (client.ts, context.ts, auth.ts, baseline.ts)
- ElectricProvider.tsx
- docker/scripts/init-electric-user.sh
- content/docs/how-to/electric-sql.mdx

Cleaned:
- package.json (4 @electric-sql/* deps)
- app/layout.tsx, UserDropdown.tsx, LayoutDataProvider.tsx
- docker-compose.yml, docker-compose.dev.yml
- Dockerfile, docker-entrypoint.js
- .env.example (frontend, docker, backend)
- CI workflows, install scripts, docs
This commit is contained in:
CREDO23 2026-03-23 16:40:25 +02:00
parent af5215fa44
commit 2b7465cdaa
30 changed files with 4 additions and 1511 deletions

View file

@ -57,7 +57,6 @@ jobs:
working-directory: surfsense_web
env:
NEXT_PUBLIC_FASTAPI_BACKEND_URL: ${{ vars.NEXT_PUBLIC_FASTAPI_BACKEND_URL }}
NEXT_PUBLIC_ELECTRIC_URL: ${{ vars.NEXT_PUBLIC_ELECTRIC_URL }}
NEXT_PUBLIC_DEPLOYMENT_MODE: ${{ vars.NEXT_PUBLIC_DEPLOYMENT_MODE }}
NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE: ${{ vars.NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE }}

View file

@ -164,8 +164,6 @@ jobs:
${{ matrix.image == 'web' && 'NEXT_PUBLIC_FASTAPI_BACKEND_URL=__NEXT_PUBLIC_FASTAPI_BACKEND_URL__' || '' }}
${{ matrix.image == 'web' && 'NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__' || '' }}
${{ matrix.image == 'web' && 'NEXT_PUBLIC_ETL_SERVICE=__NEXT_PUBLIC_ETL_SERVICE__' || '' }}
${{ matrix.image == 'web' && 'NEXT_PUBLIC_ELECTRIC_URL=__NEXT_PUBLIC_ELECTRIC_URL__' || '' }}
${{ matrix.image == 'web' && 'NEXT_PUBLIC_ELECTRIC_AUTH_MODE=__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__' || '' }}
${{ matrix.image == 'web' && 'NEXT_PUBLIC_DEPLOYMENT_MODE=__NEXT_PUBLIC_DEPLOYMENT_MODE__' || '' }}
- name: Export digest

View file

@ -35,7 +35,6 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# BACKEND_PORT=8929
# FRONTEND_PORT=3929
# ELECTRIC_PORT=5929
# SEARXNG_PORT=8888
# FLOWER_PORT=5555
@ -58,7 +57,6 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=LOCAL
# NEXT_PUBLIC_ETL_SERVICE=DOCLING
# NEXT_PUBLIC_DEPLOYMENT_MODE=self-hosted
# NEXT_PUBLIC_ELECTRIC_AUTH_MODE=insecure
# ------------------------------------------------------------------------------
# Custom Domain / Reverse Proxy
@ -71,7 +69,6 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# NEXT_FRONTEND_URL=https://app.yourdomain.com
# BACKEND_URL=https://api.yourdomain.com
# NEXT_PUBLIC_FASTAPI_BACKEND_URL=https://api.yourdomain.com
# NEXT_PUBLIC_ELECTRIC_URL=https://electric.yourdomain.com
# ------------------------------------------------------------------------------
@ -101,19 +98,6 @@ EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
# Supports TLS: rediss://:password@host:6380/0
# REDIS_URL=redis://redis:6379/0
# ------------------------------------------------------------------------------
# Electric SQL (real-time sync credentials)
# ------------------------------------------------------------------------------
# These must match on the db, backend, and electric services.
# Change for security; defaults work out of the box.
# ELECTRIC_DB_USER=electric
# ELECTRIC_DB_PASSWORD=electric_password
# Full override for the Electric → Postgres connection URL.
# Leave commented out to use the Docker-managed `db` container (default).
# Uncomment and set `db` to `host.docker.internal` when pointing Electric at a local Postgres instance (e.g. Postgres.app on macOS):
# ELECTRIC_DATABASE_URL=postgresql://electric:electric_password@db:5432/surfsense?sslmode=disable
# ------------------------------------------------------------------------------
# TTS & STT (Text-to-Speech / Speech-to-Text)
# ------------------------------------------------------------------------------

View file

@ -18,13 +18,10 @@ services:
volumes:
- postgres_data:/var/lib/postgresql/data
- ./postgresql.conf:/etc/postgresql/postgresql.conf:ro
- ./scripts/init-electric-user.sh:/docker-entrypoint-initdb.d/init-electric-user.sh:ro
environment:
- POSTGRES_USER=${DB_USER:-postgres}
- POSTGRES_PASSWORD=${DB_PASSWORD:-postgres}
- POSTGRES_DB=${DB_NAME:-surfsense}
- ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
- ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
command: postgres -c config_file=/etc/postgresql/postgresql.conf
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-postgres} -d ${DB_NAME:-surfsense}"]
@ -91,8 +88,6 @@ services:
- UNSTRUCTURED_HAS_PATCHED_LOOP=1
- LANGCHAIN_TRACING_V2=false
- LANGSMITH_TRACING=false
- ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
- ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
- AUTH_TYPE=${AUTH_TYPE:-LOCAL}
- NEXT_FRONTEND_URL=${NEXT_FRONTEND_URL:-http://localhost:3000}
- SEARXNG_DEFAULT_HOST=${SEARXNG_DEFAULT_HOST:-http://searxng:8080}
@ -130,8 +125,6 @@ services:
- REDIS_APP_URL=${REDIS_URL:-redis://redis:6379/0}
- CELERY_TASK_DEFAULT_QUEUE=surfsense
- PYTHONPATH=/app
- ELECTRIC_DB_USER=${ELECTRIC_DB_USER:-electric}
- ELECTRIC_DB_PASSWORD=${ELECTRIC_DB_PASSWORD:-electric_password}
- SEARXNG_DEFAULT_HOST=${SEARXNG_DEFAULT_HOST:-http://searxng:8080}
- SERVICE_ROLE=worker
depends_on:
@ -176,24 +169,6 @@ services:
# - redis
# - celery_worker
electric:
image: electricsql/electric:1.4.10
ports:
- "${ELECTRIC_PORT:-5133}:3000"
depends_on:
db:
condition: service_healthy
environment:
- DATABASE_URL=${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}?sslmode=${DB_SSLMODE:-disable}}
- ELECTRIC_INSECURE=true
- ELECTRIC_WRITE_TO_PG_MODE=direct
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/v1/health"]
interval: 10s
timeout: 5s
retries: 5
frontend:
build:
context: ../surfsense_web
@ -201,8 +176,6 @@ services:
NEXT_PUBLIC_FASTAPI_BACKEND_URL: ${NEXT_PUBLIC_FASTAPI_BACKEND_URL:-http://localhost:8000}
NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE: ${NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE:-LOCAL}
NEXT_PUBLIC_ETL_SERVICE: ${NEXT_PUBLIC_ETL_SERVICE:-DOCLING}
NEXT_PUBLIC_ELECTRIC_URL: ${NEXT_PUBLIC_ELECTRIC_URL:-http://localhost:5133}
NEXT_PUBLIC_ELECTRIC_AUTH_MODE: ${NEXT_PUBLIC_ELECTRIC_AUTH_MODE:-insecure}
NEXT_PUBLIC_DEPLOYMENT_MODE: ${NEXT_PUBLIC_DEPLOYMENT_MODE:-self-hosted}
ports:
- "${FRONTEND_PORT:-3000}:3000"
@ -211,8 +184,6 @@ services:
depends_on:
backend:
condition: service_healthy
electric:
condition: service_healthy
volumes:
postgres_data:

View file

@ -15,13 +15,10 @@ services:
volumes:
- postgres_data:/var/lib/postgresql/data
- ./postgresql.conf:/etc/postgresql/postgresql.conf:ro
- ./scripts/init-electric-user.sh:/docker-entrypoint-initdb.d/init-electric-user.sh:ro
environment:
POSTGRES_USER: ${DB_USER:-surfsense}
POSTGRES_PASSWORD: ${DB_PASSWORD:-surfsense}
POSTGRES_DB: ${DB_NAME:-surfsense}
ELECTRIC_DB_USER: ${ELECTRIC_DB_USER:-electric}
ELECTRIC_DB_PASSWORD: ${ELECTRIC_DB_PASSWORD:-electric_password}
command: postgres -c config_file=/etc/postgresql/postgresql.conf
restart: unless-stopped
healthcheck:
@ -72,8 +69,6 @@ services:
PYTHONPATH: /app
UVICORN_LOOP: asyncio
UNSTRUCTURED_HAS_PATCHED_LOOP: "1"
ELECTRIC_DB_USER: ${ELECTRIC_DB_USER:-electric}
ELECTRIC_DB_PASSWORD: ${ELECTRIC_DB_PASSWORD:-electric_password}
NEXT_FRONTEND_URL: ${NEXT_FRONTEND_URL:-http://localhost:${FRONTEND_PORT:-3929}}
SEARXNG_DEFAULT_HOST: ${SEARXNG_DEFAULT_HOST:-http://searxng:8080}
# Daytona Sandbox uncomment and set credentials to enable cloud code execution
@ -112,8 +107,6 @@ services:
REDIS_APP_URL: ${REDIS_URL:-redis://redis:6379/0}
CELERY_TASK_DEFAULT_QUEUE: surfsense
PYTHONPATH: /app
ELECTRIC_DB_USER: ${ELECTRIC_DB_USER:-electric}
ELECTRIC_DB_PASSWORD: ${ELECTRIC_DB_PASSWORD:-electric_password}
SEARXNG_DEFAULT_HOST: ${SEARXNG_DEFAULT_HOST:-http://searxng:8080}
SERVICE_ROLE: worker
depends_on:
@ -165,42 +158,20 @@ services:
# - celery_worker
# restart: unless-stopped
electric:
image: electricsql/electric:1.4.10
ports:
- "${ELECTRIC_PORT:-5929}:3000"
environment:
DATABASE_URL: ${ELECTRIC_DATABASE_URL:-postgresql://${ELECTRIC_DB_USER:-electric}:${ELECTRIC_DB_PASSWORD:-electric_password}@${DB_HOST:-db}:${DB_PORT:-5432}/${DB_NAME:-surfsense}?sslmode=${DB_SSLMODE:-disable}}
ELECTRIC_INSECURE: "true"
ELECTRIC_WRITE_TO_PG_MODE: direct
restart: unless-stopped
depends_on:
db:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/v1/health"]
interval: 10s
timeout: 5s
retries: 5
frontend:
image: ghcr.io/modsetter/surfsense-web:${SURFSENSE_VERSION:-latest}
ports:
- "${FRONTEND_PORT:-3929}:3000"
environment:
NEXT_PUBLIC_FASTAPI_BACKEND_URL: ${NEXT_PUBLIC_FASTAPI_BACKEND_URL:-http://localhost:${BACKEND_PORT:-8929}}
NEXT_PUBLIC_ELECTRIC_URL: ${NEXT_PUBLIC_ELECTRIC_URL:-http://localhost:${ELECTRIC_PORT:-5929}}
NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE: ${AUTH_TYPE:-LOCAL}
NEXT_PUBLIC_ETL_SERVICE: ${ETL_SERVICE:-DOCLING}
NEXT_PUBLIC_DEPLOYMENT_MODE: ${DEPLOYMENT_MODE:-self-hosted}
NEXT_PUBLIC_ELECTRIC_AUTH_MODE: ${NEXT_PUBLIC_ELECTRIC_AUTH_MODE:-insecure}
labels:
- "com.centurylinklabs.watchtower.enable=true"
depends_on:
backend:
condition: service_healthy
electric:
condition: service_healthy
restart: unless-stopped
volumes:

View file

@ -1,38 +0,0 @@
#!/bin/sh
# Creates the Electric SQL replication user on first DB initialization.
# Idempotent — safe to run alongside Alembic migration 66.
set -e
ELECTRIC_DB_USER="${ELECTRIC_DB_USER:-electric}"
ELECTRIC_DB_PASSWORD="${ELECTRIC_DB_PASSWORD:-electric_password}"
echo "Creating Electric SQL replication user: $ELECTRIC_DB_USER"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_user WHERE usename = '$ELECTRIC_DB_USER') THEN
CREATE USER $ELECTRIC_DB_USER WITH REPLICATION PASSWORD '$ELECTRIC_DB_PASSWORD';
END IF;
END
\$\$;
GRANT CONNECT ON DATABASE $POSTGRES_DB TO $ELECTRIC_DB_USER;
GRANT CREATE ON DATABASE $POSTGRES_DB TO $ELECTRIC_DB_USER;
GRANT USAGE ON SCHEMA public TO $ELECTRIC_DB_USER;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO $ELECTRIC_DB_USER;
GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO $ELECTRIC_DB_USER;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO $ELECTRIC_DB_USER;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON SEQUENCES TO $ELECTRIC_DB_USER;
DO \$\$
BEGIN
IF NOT EXISTS (SELECT FROM pg_publication WHERE pubname = 'electric_publication_default') THEN
CREATE PUBLICATION electric_publication_default;
END IF;
END
\$\$;
EOSQL
echo "Electric SQL user '$ELECTRIC_DB_USER' and publication created successfully"

View file

@ -109,7 +109,6 @@ $Files = @(
@{ Src = "docker/docker-compose.yml"; Dest = "docker-compose.yml" }
@{ Src = "docker/.env.example"; Dest = ".env.example" }
@{ Src = "docker/postgresql.conf"; Dest = "postgresql.conf" }
@{ Src = "docker/scripts/init-electric-user.sh"; Dest = "scripts/init-electric-user.sh" }
@{ Src = "docker/scripts/migrate-database.ps1"; Dest = "scripts/migrate-database.ps1" }
@{ Src = "docker/searxng/settings.yml"; Dest = "searxng/settings.yml" }
@{ Src = "docker/searxng/limiter.toml"; Dest = "searxng/limiter.toml" }

View file

@ -108,7 +108,6 @@ FILES=(
"docker/docker-compose.yml:docker-compose.yml"
"docker/.env.example:.env.example"
"docker/postgresql.conf:postgresql.conf"
"docker/scripts/init-electric-user.sh:scripts/init-electric-user.sh"
"docker/scripts/migrate-database.sh:scripts/migrate-database.sh"
"docker/searxng/settings.yml:searxng/settings.yml"
"docker/searxng/limiter.toml:searxng/limiter.toml"
@ -122,7 +121,6 @@ for entry in "${FILES[@]}"; do
|| error "Failed to download ${dest}. Check your internet connection and try again."
done
chmod +x "${INSTALL_DIR}/scripts/init-electric-user.sh"
chmod +x "${INSTALL_DIR}/scripts/migrate-database.sh"
success "All files downloaded to ${INSTALL_DIR}/"

View file

@ -17,10 +17,6 @@ REDIS_APP_URL=redis://localhost:6379/0
# Only uncomment if running the backend outside Docker (e.g. uvicorn on host).
# SEARXNG_DEFAULT_HOST=http://localhost:8888
#Electric(for migrations only)
ELECTRIC_DB_USER=electric
ELECTRIC_DB_PASSWORD=electric_password
# Periodic task interval
# # Run every minute (default)
# SCHEDULE_CHECKER_INTERVAL=1m

View file

@ -25,13 +25,6 @@ database_url = os.getenv("DATABASE_URL")
if database_url:
config.set_main_option("sqlalchemy.url", database_url)
# Electric SQL user credentials - centralized configuration for migrations
# These are used by migrations that set up Electric SQL replication
config.set_main_option("electric_db_user", os.getenv("ELECTRIC_DB_USER", "electric"))
config.set_main_option(
"electric_db_password", os.getenv("ELECTRIC_DB_PASSWORD", "electric_password")
)
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:

View file

@ -2,10 +2,6 @@ NEXT_PUBLIC_FASTAPI_BACKEND_URL=http://localhost:8000
NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=LOCAL or GOOGLE
NEXT_PUBLIC_ETL_SERVICE=UNSTRUCTURED or LLAMACLOUD or DOCLING
# Electric SQL
NEXT_PUBLIC_ELECTRIC_URL=http://localhost:5133
NEXT_PUBLIC_ELECTRIC_AUTH_MODE=insecure
# Contact Form Vars - OPTIONAL
DATABASE_URL=postgresql://postgres:[YOUR-PASSWORD]@db.sdsf.supabase.co:5432/postgres

View file

@ -35,15 +35,11 @@ RUN corepack enable pnpm
ARG NEXT_PUBLIC_FASTAPI_BACKEND_URL=__NEXT_PUBLIC_FASTAPI_BACKEND_URL__
ARG NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__
ARG NEXT_PUBLIC_ETL_SERVICE=__NEXT_PUBLIC_ETL_SERVICE__
ARG NEXT_PUBLIC_ELECTRIC_URL=__NEXT_PUBLIC_ELECTRIC_URL__
ARG NEXT_PUBLIC_ELECTRIC_AUTH_MODE=__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__
ARG NEXT_PUBLIC_DEPLOYMENT_MODE=__NEXT_PUBLIC_DEPLOYMENT_MODE__
ENV NEXT_PUBLIC_FASTAPI_BACKEND_URL=$NEXT_PUBLIC_FASTAPI_BACKEND_URL
ENV NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE=$NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE
ENV NEXT_PUBLIC_ETL_SERVICE=$NEXT_PUBLIC_ETL_SERVICE
ENV NEXT_PUBLIC_ELECTRIC_URL=$NEXT_PUBLIC_ELECTRIC_URL
ENV NEXT_PUBLIC_ELECTRIC_AUTH_MODE=$NEXT_PUBLIC_ELECTRIC_AUTH_MODE
ENV NEXT_PUBLIC_DEPLOYMENT_MODE=$NEXT_PUBLIC_DEPLOYMENT_MODE
COPY --from=deps /app/node_modules ./node_modules

View file

@ -3,7 +3,6 @@ import "./globals.css";
import { RootProvider } from "fumadocs-ui/provider/next";
import { Roboto } from "next/font/google";
import { AnnouncementToastProvider } from "@/components/announcements/AnnouncementToastProvider";
import { ElectricProvider } from "@/components/providers/ElectricProvider";
import { GlobalLoadingProvider } from "@/components/providers/GlobalLoadingProvider";
import { I18nProvider } from "@/components/providers/I18nProvider";
import { PostHogProvider } from "@/components/providers/PostHogProvider";
@ -141,9 +140,7 @@ export default function RootLayout({
>
<RootProvider>
<ReactQueryClientProvider>
<ElectricProvider>
<GlobalLoadingProvider>{children}</GlobalLoadingProvider>
</ElectricProvider>
</ReactQueryClientProvider>
<Toaster />
<AnnouncementToastProvider />

View file

@ -212,12 +212,6 @@ export default function sitemap(): MetadataRoute.Sitemap {
priority: 0.8,
},
// How-to documentation
{
url: "https://www.surfsense.com/docs/how-to/electric-sql",
lastModified,
changeFrequency: "daily",
priority: 0.8,
},
{
url: "https://www.surfsense.com/docs/how-to/realtime-collaboration",
lastModified,

View file

@ -16,7 +16,6 @@ import {
} from "@/components/ui/dropdown-menu";
import { Spinner } from "@/components/ui/spinner";
import { logout } from "@/lib/auth-utils";
import { cleanupElectric } from "@/lib/electric/client";
import { resetUser, trackLogout } from "@/lib/posthog/events";
export function UserDropdown({
@ -39,14 +38,6 @@ export function UserDropdown({
trackLogout();
resetUser();
// Best-effort cleanup of Electric SQL / PGlite
// Even if this fails, login-time cleanup will handle it
try {
await cleanupElectric();
} catch (err) {
console.warn("[Logout] Electric cleanup failed (will be handled on next login):", err);
}
// Revoke refresh token on server and clear all tokens from localStorage
await logout();

View file

@ -51,7 +51,6 @@ import { notificationsApiService } from "@/lib/apis/notifications-api.service";
import { searchSpacesApiService } from "@/lib/apis/search-spaces-api.service";
import { logout } from "@/lib/auth-utils";
import { deleteThread, fetchThreads, updateThread } from "@/lib/chat/thread-persistence";
import { cleanupElectric } from "@/lib/electric/client";
import { resetUser, trackLogout } from "@/lib/posthog/events";
import { cacheKeys } from "@/lib/query-client/cache-keys";
import { MorePagesDialog } from "@/components/settings/more-pages-dialog";
@ -159,8 +158,6 @@ export function LayoutDataProvider({ searchSpaceId, children }: LayoutDataProvid
// Search space dialog state
const [isCreateSearchSpaceDialogOpen, setIsCreateSearchSpaceDialogOpen] = useState(false);
// Per-tab inbox hooks — each has independent API loading, pagination,
// and Electric live queries. The Electric sync shape is shared (client-level cache).
const userId = user?.id ? String(user.id) : null;
const numericSpaceId = Number(searchSpaceId) || null;
@ -607,14 +604,6 @@ export function LayoutDataProvider({ searchSpaceId, children }: LayoutDataProvid
trackLogout();
resetUser();
// Best-effort cleanup of Electric SQL / PGlite
// Even if this fails, login-time cleanup will handle it
try {
await cleanupElectric();
} catch (err) {
console.warn("[Logout] Electric cleanup failed (will be handled on next login):", err);
}
// Revoke refresh token on server and clear all tokens from localStorage
await logout();

View file

@ -1,116 +0,0 @@
"use client";
import { useAtomValue } from "jotai";
import { usePathname } from "next/navigation";
import { useEffect, useRef, useState } from "react";
import { currentUserAtom } from "@/atoms/user/user-query.atoms";
import { useGlobalLoadingEffect } from "@/hooks/use-global-loading";
import { getBearerToken } from "@/lib/auth-utils";
import {
cleanupElectric,
type ElectricClient,
initElectric,
isElectricInitialized,
} from "@/lib/electric/client";
import { ElectricContext } from "@/lib/electric/context";
interface ElectricProviderProps {
children: React.ReactNode;
}
/**
* Initializes user-specific PGlite database with Electric SQL sync.
* Handles user isolation, cleanup, and re-initialization on user change.
*/
export function ElectricProvider({ children }: ElectricProviderProps) {
const [electricClient, setElectricClient] = useState<ElectricClient | null>(null);
const [error, setError] = useState<Error | null>(null);
const {
data: user,
isSuccess: isUserLoaded,
isError: isUserError,
} = useAtomValue(currentUserAtom);
const previousUserIdRef = useRef<string | null>(null);
const initializingRef = useRef(false);
const pathname = usePathname();
useEffect(() => {
if (typeof window === "undefined") return;
// No user logged in - cleanup if previous user existed
if (!isUserLoaded || !user?.id) {
if (previousUserIdRef.current && isElectricInitialized()) {
console.log("[ElectricProvider] User logged out, cleaning up...");
cleanupElectric().then(() => {
previousUserIdRef.current = null;
setElectricClient(null);
});
}
return;
}
const userId = String(user.id);
// Skip if already initialized for this user or currently initializing
if ((electricClient && previousUserIdRef.current === userId) || initializingRef.current) {
return;
}
initializingRef.current = true;
let mounted = true;
async function init() {
try {
console.log(`[ElectricProvider] Initializing for user: ${userId}`);
const client = await initElectric(userId);
if (mounted) {
previousUserIdRef.current = userId;
setElectricClient(client);
setError(null);
console.log(`[ElectricProvider] ✅ Ready for user: ${userId}`);
}
} catch (err) {
console.error("[ElectricProvider] Failed to initialize:", err);
if (mounted) {
setError(err instanceof Error ? err : new Error("Failed to initialize Electric SQL"));
setElectricClient(null);
}
} finally {
if (mounted) {
initializingRef.current = false;
}
}
}
init();
return () => {
mounted = false;
};
}, [user?.id, isUserLoaded, electricClient]);
const hasToken = typeof window !== "undefined" && !!getBearerToken();
// Only block UI on dashboard routes; public pages render immediately
const requiresElectricLoading = pathname?.startsWith("/dashboard");
const shouldShowLoading =
hasToken && isUserLoaded && !!user?.id && !electricClient && !error && requiresElectricLoading;
useGlobalLoadingEffect(shouldShowLoading);
// Render immediately for unauthenticated users or failed user queries
if (!hasToken || !isUserLoaded || !user?.id || isUserError) {
return <ElectricContext.Provider value={null}>{children}</ElectricContext.Provider>;
}
// Render with null context while initializing
if (!electricClient && !error) {
return <ElectricContext.Provider value={null}>{children}</ElectricContext.Provider>;
}
if (error) {
console.warn("[ElectricProvider] Initialization failed, sync may not work:", error.message);
}
return <ElectricContext.Provider value={electricClient}>{children}</ElectricContext.Provider>;
}

View file

@ -25,6 +25,5 @@ The following `.env` variables are **only used by the dev compose file** (they h
| `NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE` | Frontend build arg for auth type | `LOCAL` |
| `NEXT_PUBLIC_ETL_SERVICE` | Frontend build arg for ETL service | `DOCLING` |
| `NEXT_PUBLIC_DEPLOYMENT_MODE` | Frontend build arg for deployment mode | `self-hosted` |
| `NEXT_PUBLIC_ELECTRIC_AUTH_MODE` | Frontend build arg for Electric auth | `insecure` |
In the production compose file, the `NEXT_PUBLIC_*` frontend variables are automatically derived from `AUTH_TYPE`, `ETL_SERVICE`, and the port settings. In the dev compose file, they are passed as build args since the frontend is built from source.

View file

@ -18,8 +18,6 @@ After starting, access SurfSense at:
- **Frontend**: [http://localhost:3929](http://localhost:3929)
- **Backend API**: [http://localhost:8929](http://localhost:8929)
- **API Docs**: [http://localhost:8929/docs](http://localhost:8929/docs)
- **Electric SQL**: [http://localhost:5929](http://localhost:5929)
---
## Configuration
@ -50,7 +48,6 @@ All configuration lives in a single `docker/.env` file (or `surfsense/.env` if y
|----------|-------------|---------|
| `FRONTEND_PORT` | Frontend service port | `3929` |
| `BACKEND_PORT` | Backend API service port | `8929` |
| `ELECTRIC_PORT` | Electric SQL service port | `5929` |
### Custom Domain / Reverse Proxy
@ -61,7 +58,6 @@ Only set these if serving SurfSense on a real domain via a reverse proxy (Caddy,
| `NEXT_FRONTEND_URL` | Public frontend URL (e.g. `https://app.yourdomain.com`) |
| `BACKEND_URL` | Public backend URL for OAuth callbacks (e.g. `https://api.yourdomain.com`) |
| `NEXT_PUBLIC_FASTAPI_BACKEND_URL` | Backend URL used by the frontend (e.g. `https://api.yourdomain.com`) |
| `NEXT_PUBLIC_ELECTRIC_URL` | Electric SQL URL used by the frontend (e.g. `https://electric.yourdomain.com`) |
### Database
@ -77,14 +73,6 @@ Defaults work out of the box. Change for security in production.
| `DB_SSLMODE` | SSL mode: `disable`, `require`, `verify-ca`, `verify-full` | `disable` |
| `DATABASE_URL` | Full connection URL override. Use for managed databases (RDS, Supabase, etc.) | *(built from above)* |
### Electric SQL
| Variable | Description | Default |
|----------|-------------|---------|
| `ELECTRIC_DB_USER` | Replication user for Electric SQL | `electric` |
| `ELECTRIC_DB_PASSWORD` | Replication password for Electric SQL | `electric_password` |
| `ELECTRIC_DATABASE_URL` | Full connection URL override for Electric. Set to `host.docker.internal` when pointing at a local Postgres instance | *(built from above)* |
### Authentication
| Variable | Description |
@ -148,7 +136,6 @@ Uncomment the connectors you want to use. Redirect URIs follow the pattern `http
| `backend` | FastAPI application server |
| `celery_worker` | Background task processing (document indexing, etc.) |
| `celery_beat` | Periodic task scheduler (connector sync) |
| `electric` | Electric SQL (real-time sync for the frontend) |
| `frontend` | Next.js web application |
All services start automatically with `docker compose up -d`.
@ -165,7 +152,6 @@ docker compose logs -f
# View logs for a specific service
docker compose logs -f backend
docker compose logs -f electric
# Stop all services
docker compose down
@ -183,6 +169,4 @@ docker compose down -v
- **Ports already in use**: Change the relevant `*_PORT` variable in `.env` and restart.
- **Permission errors on Linux**: You may need to prefix `docker` commands with `sudo`.
- **Electric SQL not connecting**: Check `docker compose logs electric`. If it shows `domain does not exist: db`, ensure `ELECTRIC_DATABASE_URL` is not set to a stale value in `.env`.
- **Real-time updates not working in browser**: Open DevTools → Console and look for `[Electric]` errors. Check that `NEXT_PUBLIC_ELECTRIC_URL` matches the running Electric SQL address.
- **Line ending issues on Windows**: Run `git config --global core.autocrlf true` before cloning.

View file

@ -38,4 +38,3 @@ After starting, access SurfSense at:
- **Frontend**: [http://localhost:3929](http://localhost:3929)
- **Backend API**: [http://localhost:8929](http://localhost:8929)
- **API Docs**: [http://localhost:8929/docs](http://localhost:8929/docs)
- **Electric SQL**: [http://localhost:5929](http://localhost:5929)

View file

@ -1,226 +0,0 @@
---
title: Electric SQL
description: Setting up Electric SQL for real-time data synchronization in SurfSense
---
[Electric SQL](https://electric-sql.com/) enables real-time data synchronization in SurfSense, providing instant updates for inbox items, document indexing status, and connector sync progress without manual refresh. The frontend uses [PGlite](https://pglite.dev/) (a lightweight PostgreSQL in the browser) to maintain a local database that syncs with the backend via Electric SQL.
## What does Electric SQL do?
When you index documents or receive inbox updates, Electric SQL pushes updates to your browser in real-time. The data flows like this:
1. Backend writes data to PostgreSQL
2. Electric SQL detects changes and streams them to the frontend
3. PGlite (running in your browser) receives and stores the data locally in IndexedDB
4. Your UI updates instantly without refreshing
This means:
- **Inbox updates appear instantly** - No need to refresh the page
- **Document indexing progress updates live** - Watch your documents get processed
- **Connector status syncs automatically** - See when connectors finish syncing
- **Offline support** - PGlite caches data locally, so previously loaded data remains accessible
## Docker Setup
- The `docker-compose.yml` includes the Electric SQL service, pre-configured to connect to the Docker-managed `db` container.
- No additional configuration is required. Electric SQL works with the Docker PostgreSQL instance out of the box.
## Manual Setup (Development Only)
This section is intended for local development environments. Follow the steps below based on your PostgreSQL setup.
### Step 1: Configure Environment Variables
Ensure your environment files are configured. If you haven't set up SurfSense yet, follow the [Manual Installation Guide](/docs/manual-installation) first.
For Electric SQL, verify these variables are set:
**Backend (`surfsense_backend/.env`):**
```bash
ELECTRIC_DB_USER=electric
ELECTRIC_DB_PASSWORD=electric_password
```
**Frontend (`surfsense_web/.env`):**
```bash
NEXT_PUBLIC_ELECTRIC_URL=http://localhost:5133
NEXT_PUBLIC_ELECTRIC_AUTH_MODE=insecure
```
Next, choose the option that matches your PostgreSQL setup:
---
### Option A: Using Docker PostgreSQL
If you're using the Docker-managed PostgreSQL instance, no extra configuration is needed. Just start the services using the development compose file (which exposes the PostgreSQL port to your host machine):
```bash
docker compose -f docker-compose.dev.yml up -d db electric
```
Then run the database migration, start the backend, and launch the frontend:
```bash
cd surfsense_backend
uv run alembic upgrade head
uv run main.py
```
In a separate terminal, start the frontend:
```bash
cd surfsense_web
pnpm run dev
```
Electric SQL is now configured and connected to your Docker PostgreSQL database.
---
### Option B: Using Local PostgreSQL
If you're using a local PostgreSQL installation (e.g. Postgres.app on macOS), follow these steps:
**1. Enable logical replication in PostgreSQL:**
Open your `postgresql.conf` file:
```bash
# Common locations:
# macOS (Postgres.app): ~/Library/Application Support/Postgres/var-17/postgresql.conf
# macOS (Homebrew): /opt/homebrew/var/postgresql@17/postgresql.conf
# Linux: /etc/postgresql/17/main/postgresql.conf
sudo vim /path/to/postgresql.conf
```
Add the following settings:
```ini
# Required for Electric SQL
wal_level = logical
max_replication_slots = 10
max_wal_senders = 10
```
After saving, restart PostgreSQL for the settings to take effect.
**2. Create the Electric replication user:**
Connect to your local database as a superuser and run:
```sql
CREATE USER electric WITH REPLICATION PASSWORD 'electric_password';
GRANT CONNECT ON DATABASE surfsense TO electric;
GRANT CREATE ON DATABASE surfsense TO electric;
GRANT USAGE ON SCHEMA public TO electric;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO electric;
GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO electric;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO electric;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON SEQUENCES TO electric;
CREATE PUBLICATION electric_publication_default;
```
**3. Set `ELECTRIC_DATABASE_URL` in `docker/.env`:**
Uncomment and update this line to point Electric at your local Postgres via `host.docker.internal` (the hostname Docker containers use to reach the host machine):
```bash
ELECTRIC_DATABASE_URL=postgresql://electric:electric_password@host.docker.internal:5432/surfsense?sslmode=disable
```
**4. Start Electric SQL only (skip the Docker `db` container):**
```bash
docker compose -f docker-compose.dev.yml up -d --no-deps electric
```
The `--no-deps` flag starts only the `electric` service without starting the Docker-managed `db` container.
**5. Run database migration and start the backend:**
```bash
cd surfsense_backend
uv run alembic upgrade head
uv run main.py
```
In a separate terminal, start the frontend:
```bash
cd surfsense_web
pnpm run dev
```
Electric SQL is now configured and connected to your local PostgreSQL database.
## Environment Variables Reference
**Required for manual setup:**
| Variable | Location | Description | Default |
|----------|----------|-------------|---------|
| `ELECTRIC_DB_USER` | `surfsense_backend/.env` | Database user for Electric replication | `electric` |
| `ELECTRIC_DB_PASSWORD` | `surfsense_backend/.env` | Database password for Electric replication | `electric_password` |
| `NEXT_PUBLIC_ELECTRIC_URL` | `surfsense_web/.env` | Electric SQL server URL (PGlite connects to this) | `http://localhost:5133` |
| `NEXT_PUBLIC_ELECTRIC_AUTH_MODE` | `surfsense_web/.env` | Authentication mode (`insecure` for dev, `secure` for production) | `insecure` |
**Optional / Docker-only:**
| Variable | Location | Description | Default |
|----------|----------|-------------|---------|
| `ELECTRIC_PORT` | `docker/.env` | Port to expose Electric SQL on the host | `5133` (dev), `5929` (production) |
| `ELECTRIC_DATABASE_URL` | `docker/.env` | Full connection URL override for Electric. Only needed for Option B (local Postgres via `host.docker.internal`) | *(built from above defaults)* |
## Verify Setup
To verify Electric SQL is running correctly:
```bash
curl http://localhost:5133/v1/health
```
You should receive:
```json
{"status":"active"}
```
## Troubleshooting
### Electric SQL Server Not Starting
**Check PostgreSQL settings:**
- Ensure `wal_level = logical` is set
- Verify the Electric user has replication permissions
- Check database connectivity from Electric container
### Real-time Updates Not Working
1. Open browser DevTools → Console
2. Look for errors containing `[Electric]`
3. Check Network tab for WebSocket connections to the Electric URL
### Connection Refused Errors
- Verify Electric SQL server is running: `docker ps | grep electric`
- Check the `NEXT_PUBLIC_ELECTRIC_URL` matches your Electric server address
- For Docker setups, ensure the frontend can reach the Electric container
### Data Not Syncing
- Check Electric SQL logs: `docker compose logs electric`
- Verify PostgreSQL replication is working
- Ensure the Electric user has proper table permissions
### PGlite/IndexedDB Issues
If data appears stale or corrupted in the browser:
1. Open browser DevTools → Application → IndexedDB
2. Delete databases starting with `surfsense-`
3. Refresh the page - PGlite will recreate the local database and resync

View file

@ -8,11 +8,6 @@ import { Card, Cards } from 'fumadocs-ui/components/card';
Practical guides to help you get the most out of SurfSense.
<Cards>
<Card
title="Electric SQL"
description="Setting up Electric SQL for real-time data synchronization"
href="/docs/how-to/electric-sql"
/>
<Card
title="Realtime Collaboration"
description="Invite teammates, share chats, and collaborate in realtime"

View file

@ -1,6 +1,6 @@
{
"title": "How to",
"pages": ["electric-sql", "realtime-collaboration", "web-search"],
"pages": ["realtime-collaboration", "web-search"],
"icon": "Compass",
"defaultOpen": false
}

View file

@ -73,8 +73,6 @@ Edit the `.env` file and set the following variables:
| AUTH_TYPE | Authentication method: `GOOGLE` for OAuth with Google, `LOCAL` for email/password authentication |
| GOOGLE_OAUTH_CLIENT_ID | (Optional) Client ID from Google Cloud Console (required if AUTH_TYPE=GOOGLE) |
| GOOGLE_OAUTH_CLIENT_SECRET | (Optional) Client secret from Google Cloud Console (required if AUTH_TYPE=GOOGLE) |
| ELECTRIC_DB_USER | (Optional) PostgreSQL username for Electric-SQL connection (default: `electric`) |
| ELECTRIC_DB_PASSWORD | (Optional) PostgreSQL password for Electric-SQL connection (default: `electric_password`) |
| EMBEDDING_MODEL | Name of the embedding model (e.g., `sentence-transformers/all-MiniLM-L6-v2`, `openai://text-embedding-ada-002`) |
| RERANKERS_ENABLED | (Optional) Enable or disable document reranking for improved search results (e.g., `TRUE` or `FALSE`, default: `FALSE`) |
| RERANKERS_MODEL_NAME | Name of the reranker model (e.g., `ms-marco-MiniLM-L-12-v2`) (required if RERANKERS_ENABLED=TRUE) |
@ -410,8 +408,6 @@ Edit the `.env` file and set:
| NEXT_PUBLIC_FASTAPI_BACKEND_URL | Backend URL (e.g., `http://localhost:8000`) |
| NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE | Same value as set in backend AUTH_TYPE i.e `GOOGLE` for OAuth with Google, `LOCAL` for email/password authentication |
| NEXT_PUBLIC_ETL_SERVICE | Document parsing service (should match backend ETL_SERVICE): `UNSTRUCTURED`, `LLAMACLOUD`, or `DOCLING` - affects supported file formats in upload interface |
| NEXT_PUBLIC_ELECTRIC_URL | URL for Electric-SQL service (e.g., `http://localhost:5133`) |
| NEXT_PUBLIC_ELECTRIC_AUTH_MODE | Electric-SQL authentication mode (default: `insecure`) |
### 2. Install Dependencies

View file

@ -17,14 +17,12 @@ const replacements = [
"__NEXT_PUBLIC_FASTAPI_BACKEND_URL__",
process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL || "http://localhost:8000",
],
["__NEXT_PUBLIC_ELECTRIC_URL__", process.env.NEXT_PUBLIC_ELECTRIC_URL || "http://localhost:5133"],
[
"__NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE__",
process.env.NEXT_PUBLIC_FASTAPI_BACKEND_AUTH_TYPE || "LOCAL",
],
["__NEXT_PUBLIC_ETL_SERVICE__", process.env.NEXT_PUBLIC_ETL_SERVICE || "DOCLING"],
["__NEXT_PUBLIC_DEPLOYMENT_MODE__", process.env.NEXT_PUBLIC_DEPLOYMENT_MODE || "self-hosted"],
["__NEXT_PUBLIC_ELECTRIC_AUTH_MODE__", process.env.NEXT_PUBLIC_ELECTRIC_AUTH_MODE || "insecure"],
];
let filesProcessed = 0;

View file

@ -1,20 +0,0 @@
/**
* Get auth token for Electric SQL
* In production, this should get the token from your auth system
*/
export async function getElectricAuthToken(): Promise<string> {
// For insecure mode (development), return empty string
if (process.env.NEXT_PUBLIC_ELECTRIC_AUTH_MODE === "insecure") {
return "";
}
// In production, get token from your auth system
// This should match your backend auth token
if (typeof window !== "undefined") {
const token = localStorage.getItem("surfsense_bearer_token");
return token || "";
}
return "";
}

View file

@ -1,62 +0,0 @@
import type { MutableRefObject } from "react";
/**
* Extract the newest `created_at` timestamp from a list of items.
* Used to establish the server-clock cutoff for the baseline timing-gap check.
*
* Uses Date parsing instead of string comparison because the API (Python
* isoformat: "+00:00" suffix) and Electric/PGlite ("Z" suffix, variable
* fractional-second precision) produce different string formats.
*/
export function getNewestTimestamp<T extends { created_at: string }>(items: T[]): string | null {
if (items.length === 0) return null;
let newest = items[0].created_at;
let newestMs = new Date(newest).getTime();
for (let i = 1; i < items.length; i++) {
const ms = new Date(items[i].created_at).getTime();
if (ms > newestMs) {
newest = items[i].created_at;
newestMs = ms;
}
}
return newest;
}
/**
* Identify genuinely new items from an Electric live query callback.
*
* On Electric's first callback, ALL live IDs are snapshotted as the baseline.
* Items beyond the API's first page are in this baseline and stay hidden
* (they'll appear via scroll pagination). Items created in the timing gap
* between the API fetch and Electric's first callback are rescued via the
* `newestApiTimestamp` check their `created_at` is newer than anything
* the API returned, so they pass through.
*
*/
export function filterNewElectricItems<T extends { id: number; created_at: string }>(
validItems: T[],
liveIds: Set<number>,
prevIds: Set<number>,
baselineRef: MutableRefObject<Set<number> | null>,
newestApiTimestamp: string | null
): T[] {
if (baselineRef.current === null) {
baselineRef.current = new Set(liveIds);
}
const baseline = baselineRef.current;
const cutoffMs = newestApiTimestamp ? new Date(newestApiTimestamp).getTime() : null;
const newItems = validItems.filter((item) => {
if (prevIds.has(item.id)) return false;
if (!baseline.has(item.id)) return true;
if (cutoffMs !== null && new Date(item.created_at).getTime() > cutoffMs) return true;
return false;
});
for (const item of newItems) {
baseline.add(item.id);
}
return newItems;
}

View file

@ -1,848 +0,0 @@
/**
* Electric SQL client setup for ElectricSQL 1.x with PGlite
*
* USER-SPECIFIC DATABASE ARCHITECTURE:
* - Each user gets their own IndexedDB database: idb://surfsense-{userId}-v{version}
* - On login: cleanup databases from other users, then initialize current user's DB
* - On logout: best-effort cleanup (not relied upon)
*
* This ensures:
* 1. Complete user isolation (data can never leak between users)
* 2. Self-healing on login (stale databases are cleaned up)
* 3. Works even if logout cleanup fails
*/
import { PGlite, type Transaction } from "@electric-sql/pglite";
import { live } from "@electric-sql/pglite/live";
import { electricSync } from "@electric-sql/pglite-sync";
// Debug logging - only logs in development, silent in production
const IS_DEV = process.env.NODE_ENV === "development";
function debugLog(...args: unknown[]) {
if (IS_DEV) console.log(...args);
}
function debugWarn(...args: unknown[]) {
if (IS_DEV) console.warn(...args);
}
// Types
export interface ElectricClient {
db: PGlite;
userId: string;
syncShape: (options: SyncShapeOptions) => Promise<SyncHandle>;
}
export interface SyncShapeOptions {
table: string;
where?: string;
columns?: string[];
primaryKey?: string[];
}
export interface SyncHandle {
unsubscribe: () => void;
readonly isUpToDate: boolean;
// The stream property contains the ShapeStreamInterface from pglite-sync
stream?: unknown;
// Promise that resolves when initial sync is complete
initialSyncPromise?: Promise<void>;
}
// Singleton state - now tracks the user ID
let electricClient: ElectricClient | null = null;
let currentUserId: string | null = null;
let isInitializing = false;
let initPromise: Promise<ElectricClient> | null = null;
// Cache for sync handles to prevent duplicate subscriptions (memory optimization)
const activeSyncHandles = new Map<string, SyncHandle>();
// Track pending sync operations to prevent race conditions
// If a sync is in progress, subsequent calls will wait for it instead of starting a new one
const pendingSyncs = new Map<string, Promise<SyncHandle>>();
// Version for sync state - increment this to force fresh sync when Electric config changes
// v2: user-specific database architecture
// v3: consistent cutoff date for sync+queries, visibility refresh support
// v4: heartbeat-based stale notification detection with updated_at tracking
// v5: fixed duplicate key errors, stable cutoff dates, onMustRefetch handler,
// real-time documents table with title/created_by_id/status columns,
// consolidated single documents sync, pending state for document queue visibility
// v6: added enable_summary column to search_source_connectors
// v7: fixed connector-popup using invalid category for useInbox
const SYNC_VERSION = 7;
// Database name prefix for identifying SurfSense databases
const DB_PREFIX = "surfsense-";
// Get Electric URL from environment
function getElectricUrl(): string {
if (typeof window !== "undefined") {
return process.env.NEXT_PUBLIC_ELECTRIC_URL || "http://localhost:5133";
}
return "http://localhost:5133";
}
/**
* Get the database name for a specific user
*/
function getDbName(userId: string): string {
return `idb://${DB_PREFIX}${userId}-v${SYNC_VERSION}`;
}
/**
* Clean up databases from OTHER users AND old versions
* This is called on login to ensure clean state
*/
async function cleanupOtherUserDatabases(currentUserId: string): Promise<void> {
if (typeof window === "undefined" || !window.indexedDB) {
return;
}
// The exact database identifier we want to keep (current user + current version)
// Format: "surfsense-{userId}-v{version}"
const currentDbIdentifier = `${DB_PREFIX}${currentUserId}-v${SYNC_VERSION}`;
try {
// Try to list all databases (not supported in all browsers)
if (typeof window.indexedDB.databases === "function") {
const databases = await window.indexedDB.databases();
for (const dbInfo of databases) {
const dbName = dbInfo.name;
if (!dbName) continue;
// Check if this is a SurfSense database
if (dbName.includes("surfsense")) {
// Check if this is the current database
// PGlite stores with "/pglite/" prefix, so we check if the name ENDS WITH our identifier
if (dbName.endsWith(currentDbIdentifier)) {
debugLog(`[Electric] Keeping current database: ${dbName}`);
continue;
}
// Delete ALL other databases (other users OR old versions of current user)
try {
debugLog(`[Electric] Deleting stale database: ${dbName}`);
window.indexedDB.deleteDatabase(dbName);
} catch (deleteErr) {
debugWarn(`[Electric] Failed to delete database ${dbName}:`, deleteErr);
}
}
}
}
} catch (err) {
// indexedDB.databases() not supported - that's okay, login cleanup is best-effort
debugWarn("[Electric] Could not enumerate databases for cleanup:", err);
}
}
/**
* Initialize the Electric SQL client for a specific user
*
* KEY BEHAVIORS:
* 1. If already initialized for the SAME user, returns existing client
* 2. If initialized for a DIFFERENT user, closes old client and creates new one
* 3. On first init, cleans up databases from other users
*
* @param userId - The current user's ID (required)
*/
export async function initElectric(userId: string): Promise<ElectricClient> {
if (!userId) {
throw new Error("userId is required for Electric initialization");
}
// If already initialized for this user, return existing client
if (electricClient && currentUserId === userId) {
return electricClient;
}
// If initialized for a different user, close the old client first
if (electricClient && currentUserId !== userId) {
debugLog(`[Electric] User changed from ${currentUserId} to ${userId}, reinitializing...`);
await cleanupElectric();
}
// If already initializing, wait for it
if (isInitializing && initPromise) {
return initPromise;
}
isInitializing = true;
currentUserId = userId;
initPromise = (async () => {
try {
// STEP 1: Clean up databases from other users (login-time cleanup)
debugLog("[Electric] Cleaning up databases from other users...");
await cleanupOtherUserDatabases(userId);
// STEP 2: Create user-specific PGlite database
const dbName = getDbName(userId);
debugLog(`[Electric] Initializing database: ${dbName}`);
const db = await PGlite.create({
dataDir: dbName,
relaxedDurability: true,
extensions: {
// Enable debug mode in electricSync only in development
electric: electricSync({ debug: process.env.NODE_ENV === "development" }),
live, // Enable live queries for real-time updates
},
});
// STEP 3: Create the notifications table schema in PGlite
// This matches the backend schema
await db.exec(`
CREATE TABLE IF NOT EXISTS notifications (
id INTEGER PRIMARY KEY,
user_id TEXT NOT NULL,
search_space_id INTEGER,
type TEXT NOT NULL,
title TEXT NOT NULL,
message TEXT NOT NULL,
read BOOLEAN NOT NULL DEFAULT FALSE,
metadata JSONB DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ
);
CREATE INDEX IF NOT EXISTS idx_notifications_user_id ON notifications(user_id);
CREATE INDEX IF NOT EXISTS idx_notifications_read ON notifications(read);
`);
// Create the search_source_connectors table schema in PGlite
// This matches the backend schema
await db.exec(`
CREATE TABLE IF NOT EXISTS search_source_connectors (
id INTEGER PRIMARY KEY,
search_space_id INTEGER NOT NULL,
user_id TEXT NOT NULL,
connector_type TEXT NOT NULL,
name TEXT NOT NULL,
is_indexable BOOLEAN NOT NULL DEFAULT FALSE,
last_indexed_at TIMESTAMPTZ,
config JSONB DEFAULT '{}',
periodic_indexing_enabled BOOLEAN NOT NULL DEFAULT FALSE,
indexing_frequency_minutes INTEGER,
next_scheduled_at TIMESTAMPTZ,
enable_summary BOOLEAN NOT NULL DEFAULT FALSE,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_connectors_search_space_id ON search_source_connectors(search_space_id);
CREATE INDEX IF NOT EXISTS idx_connectors_type ON search_source_connectors(connector_type);
CREATE INDEX IF NOT EXISTS idx_connectors_user_id ON search_source_connectors(user_id);
`);
// Create the documents table schema in PGlite
// Sync columns needed for real-time table display (lightweight - no content/metadata)
await db.exec(`
CREATE TABLE IF NOT EXISTS documents (
id INTEGER PRIMARY KEY,
search_space_id INTEGER NOT NULL,
document_type TEXT NOT NULL,
title TEXT NOT NULL DEFAULT '',
created_by_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
status JSONB DEFAULT '{"state": "ready"}'::jsonb
);
CREATE INDEX IF NOT EXISTS idx_documents_search_space_id ON documents(search_space_id);
CREATE INDEX IF NOT EXISTS idx_documents_type ON documents(document_type);
CREATE INDEX IF NOT EXISTS idx_documents_search_space_type ON documents(search_space_id, document_type);
CREATE INDEX IF NOT EXISTS idx_documents_status ON documents((status->>'state'));
`);
await db.exec(`
CREATE TABLE IF NOT EXISTS chat_comment_mentions (
id INTEGER PRIMARY KEY,
comment_id INTEGER NOT NULL,
mentioned_user_id TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_chat_comment_mentions_user_id ON chat_comment_mentions(mentioned_user_id);
CREATE INDEX IF NOT EXISTS idx_chat_comment_mentions_comment_id ON chat_comment_mentions(comment_id);
`);
// Create chat_comments table for live comment sync
await db.exec(`
CREATE TABLE IF NOT EXISTS chat_comments (
id INTEGER PRIMARY KEY,
message_id INTEGER NOT NULL,
thread_id INTEGER NOT NULL,
parent_id INTEGER,
author_id TEXT,
content TEXT NOT NULL,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_chat_comments_thread_id ON chat_comments(thread_id);
CREATE INDEX IF NOT EXISTS idx_chat_comments_message_id ON chat_comments(message_id);
CREATE INDEX IF NOT EXISTS idx_chat_comments_parent_id ON chat_comments(parent_id);
`);
// Create new_chat_messages table for live message sync
await db.exec(`
CREATE TABLE IF NOT EXISTS new_chat_messages (
id INTEGER PRIMARY KEY,
thread_id INTEGER NOT NULL,
role TEXT NOT NULL,
content JSONB NOT NULL,
author_id TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_new_chat_messages_thread_id ON new_chat_messages(thread_id);
CREATE INDEX IF NOT EXISTS idx_new_chat_messages_created_at ON new_chat_messages(created_at);
`);
const electricUrl = getElectricUrl();
// STEP 4: Create the client wrapper
electricClient = {
db,
userId,
syncShape: async (options: SyncShapeOptions): Promise<SyncHandle> => {
const { table, where, columns, primaryKey = ["id"] } = options;
// Create cache key for this sync shape
const cacheKey = `${table}_${where || "all"}_${columns?.join(",") || "all"}`;
// Check if we already have an active sync for this shape (memory optimization)
const existingHandle = activeSyncHandles.get(cacheKey);
if (existingHandle) {
debugLog(`[Electric] Reusing existing sync handle for: ${cacheKey}`);
return existingHandle;
}
// Check if there's already a pending sync for this shape (prevent race condition)
const pendingSync = pendingSyncs.get(cacheKey);
if (pendingSync) {
debugLog(`[Electric] Waiting for pending sync to complete: ${cacheKey}`);
return pendingSync;
}
// Create and track the sync promise to prevent race conditions
const syncPromise = (async (): Promise<SyncHandle> => {
// Build params for the shape request
// Electric SQL expects params as URL query parameters
const params: Record<string, string> = { table };
// Validate and fix WHERE clause to ensure string literals are properly quoted
let validatedWhere = where;
if (where) {
// Check if where uses positional parameters
if (where.includes("$1")) {
// Extract the value from the where clause if it's embedded
// For now, we'll use the where clause as-is and let Electric handle it
params.where = where;
validatedWhere = where;
} else {
// Validate that string literals are properly quoted
// Count single quotes - should be even (pairs) for properly quoted strings
const singleQuoteCount = (where.match(/'/g) || []).length;
if (singleQuoteCount % 2 !== 0) {
// Odd number of quotes means unterminated string literal
debugWarn("Where clause has unmatched quotes, fixing:", where);
// Add closing quote at the end
validatedWhere = `${where}'`;
params.where = validatedWhere;
} else {
// Use the where clause directly (already formatted)
params.where = where;
validatedWhere = where;
}
}
}
if (columns) params.columns = columns.join(",");
debugLog("[Electric] Syncing shape with params:", params);
debugLog("[Electric] Electric URL:", `${electricUrl}/v1/shape`);
debugLog("[Electric] Where clause:", where, "Validated:", validatedWhere);
try {
// Debug: Test Electric SQL connection directly first (DEV ONLY - skipped in production)
if (process.env.NODE_ENV === "development") {
const testUrl = `${electricUrl}/v1/shape?table=${table}&offset=-1${validatedWhere ? `&where=${encodeURIComponent(validatedWhere)}` : ""}`;
debugLog("[Electric] Testing Electric SQL directly:", testUrl);
try {
const testResponse = await fetch(testUrl);
const testHeaders = {
handle: testResponse.headers.get("electric-handle"),
offset: testResponse.headers.get("electric-offset"),
upToDate: testResponse.headers.get("electric-up-to-date"),
};
debugLog("[Electric] Direct Electric SQL response headers:", testHeaders);
const testData = await testResponse.json();
debugLog(
"[Electric] Direct Electric SQL data count:",
Array.isArray(testData) ? testData.length : "not array",
testData
);
} catch (testErr) {
console.error("[Electric] Direct Electric SQL test failed:", testErr);
}
}
// Use PGlite's electric sync plugin to sync the shape
// According to Electric SQL docs, the shape config uses params for table, where, columns
// Note: mapColumns is OPTIONAL per pglite-sync types.ts
// Create a promise that resolves when initial sync is complete
// Using recommended approach: check isUpToDate immediately, watch stream, shorter timeout
// IMPORTANT: We don't unsubscribe from the stream - it must stay active for real-time updates
let syncResolved = false;
// Initialize with no-op functions to satisfy TypeScript
let resolveInitialSync: () => void = () => {};
let rejectInitialSync: (error: Error) => void = () => {};
const initialSyncPromise = new Promise<void>((resolve, reject) => {
resolveInitialSync = () => {
if (!syncResolved) {
syncResolved = true;
// DON'T unsubscribe from stream - it needs to stay active for real-time updates
resolve();
}
};
rejectInitialSync = (error: Error) => {
if (!syncResolved) {
syncResolved = true;
// DON'T unsubscribe from stream even on error - let Electric handle it
reject(error);
}
};
// Shorter timeout (5 seconds) as fallback
setTimeout(() => {
if (!syncResolved) {
debugWarn(
`[Electric] ⚠️ Sync timeout for ${table} - checking isUpToDate one more time...`
);
// Check isUpToDate one more time before resolving
// This will be checked after shape is created
setTimeout(() => {
if (!syncResolved) {
debugWarn(
`[Electric] ⚠️ Sync timeout for ${table} - resolving anyway after 5s`
);
resolveInitialSync();
}
}, 100);
}
}, 5000);
});
// ROOT CAUSE FIX: The duplicate key errors were caused by unstable cutoff dates
// in use-inbox.ts generating different sync keys on each render.
// That's now fixed (rounded to midnight UTC in getSyncCutoffDate).
// We can safely use shapeKey for fast incremental sync.
const shapeKey = `${userId}_v${SYNC_VERSION}_${table}_${where?.replace(/[^a-zA-Z0-9]/g, "_") || "all"}`;
// Type assertion to PGlite with electric extension
const pgWithElectric = db as unknown as {
electric: {
syncShapeToTable: (
config: Record<string, unknown>
) => Promise<{ unsubscribe: () => void; isUpToDate: boolean; stream: unknown }>;
};
};
const shapeConfig = {
shape: {
url: `${electricUrl}/v1/shape`,
params: {
table,
...(validatedWhere ? { where: validatedWhere } : {}),
...(columns ? { columns: columns.join(",") } : {}),
},
},
table,
primaryKey,
shapeKey, // Re-enabled for fast incremental sync (root cause in use-inbox.ts is fixed)
onInitialSync: () => {
debugLog(
`[Electric] ✅ Initial sync complete for ${table} - data should now be in PGlite`
);
resolveInitialSync();
},
onError: (error: Error) => {
console.error(`[Electric] ❌ Shape sync error for ${table}:`, error);
console.error(
"[Electric] Error details:",
JSON.stringify(error, Object.getOwnPropertyNames(error))
);
rejectInitialSync(error);
},
// Handle must-refetch: clear table data before Electric re-inserts from scratch
// This prevents "duplicate key" errors when the shape is invalidated
onMustRefetch: async (tx: Transaction) => {
debugLog(
`[Electric] ⚠️ Must refetch triggered for ${table} - clearing existing data`
);
try {
// Delete rows matching the shape's WHERE clause
// If no WHERE clause, delete all rows from the table
if (validatedWhere) {
// Parse the WHERE clause to build a DELETE statement
// The WHERE clause is already validated and formatted
await tx.exec(`DELETE FROM ${table} WHERE ${validatedWhere}`);
debugLog(`[Electric] 🗑️ Cleared ${table} rows matching: ${validatedWhere}`);
} else {
// No WHERE clause means we're syncing the entire table
await tx.exec(`DELETE FROM ${table}`);
debugLog(`[Electric] 🗑️ Cleared all rows from ${table}`);
}
} catch (cleanupError) {
console.error(
`[Electric] ❌ Failed to clear ${table} during must-refetch:`,
cleanupError
);
// Re-throw to let Electric handle the error
throw cleanupError;
}
},
};
debugLog("[Electric] syncShapeToTable config:", JSON.stringify(shapeConfig, null, 2));
let shape: { unsubscribe: () => void; isUpToDate: boolean; stream: unknown };
try {
shape = await pgWithElectric.electric.syncShapeToTable(shapeConfig);
} catch (syncError) {
// Handle "Already syncing" error - pglite-sync might not have fully cleaned up yet
const errorMessage =
syncError instanceof Error ? syncError.message : String(syncError);
if (errorMessage.includes("Already syncing")) {
debugWarn(
`[Electric] Already syncing ${table}, waiting for existing sync to settle...`
);
// Wait a short time for pglite-sync to settle
await new Promise((resolve) => setTimeout(resolve, 100));
// Check if an active handle now exists (another sync might have completed)
const existingHandle = activeSyncHandles.get(cacheKey);
if (existingHandle) {
debugLog(`[Electric] Found existing handle after waiting: ${cacheKey}`);
return existingHandle;
}
// Retry once after waiting
debugLog(`[Electric] Retrying sync for ${table}...`);
try {
shape = await pgWithElectric.electric.syncShapeToTable(shapeConfig);
} catch (retryError) {
const retryMessage =
retryError instanceof Error ? retryError.message : String(retryError);
if (retryMessage.includes("Already syncing")) {
// Still syncing - create a placeholder handle that indicates the table is being synced
debugWarn(`[Electric] ${table} still syncing, creating placeholder handle`);
const placeholderHandle: SyncHandle = {
unsubscribe: () => {
debugLog(`[Electric] Placeholder unsubscribe for: ${cacheKey}`);
activeSyncHandles.delete(cacheKey);
},
get isUpToDate() {
return false; // We don't know the real state
},
stream: undefined,
initialSyncPromise: Promise.resolve(), // Already syncing means data should be coming
};
activeSyncHandles.set(cacheKey, placeholderHandle);
return placeholderHandle;
}
throw retryError;
}
} else {
throw syncError;
}
}
if (!shape) {
throw new Error("syncShapeToTable returned undefined");
}
// Log the actual shape result structure
debugLog("[Electric] Shape sync result (initial):", {
hasUnsubscribe: typeof shape?.unsubscribe === "function",
isUpToDate: shape?.isUpToDate,
hasStream: !!shape?.stream,
streamType: typeof shape?.stream,
});
// Recommended Approach Step 1: Check isUpToDate immediately
if (shape.isUpToDate) {
debugLog(
`[Electric] ✅ Sync already up-to-date for ${table} (resuming from previous state)`
);
resolveInitialSync();
} else {
// Recommended Approach Step 2: Subscribe to stream and watch for "up-to-date" message
if (shape?.stream) {
const stream = shape.stream as any;
debugLog("[Electric] Shape stream details:", {
shapeHandle: stream?.shapeHandle,
lastOffset: stream?.lastOffset,
isUpToDate: stream?.isUpToDate,
error: stream?.error,
hasSubscribe: typeof stream?.subscribe === "function",
hasUnsubscribe: typeof stream?.unsubscribe === "function",
});
// Subscribe to the stream to watch for "up-to-date" control message
// NOTE: We keep this subscription active - don't unsubscribe!
// The stream is what Electric SQL uses for real-time updates
if (typeof stream?.subscribe === "function") {
debugLog(
"[Electric] Subscribing to shape stream to watch for up-to-date message..."
);
// Subscribe but don't store unsubscribe - we want it to stay active
stream.subscribe((messages: unknown[]) => {
// Continue receiving updates even after sync is resolved
if (!syncResolved) {
debugLog(
"[Electric] 🔵 Shape stream received messages:",
messages?.length || 0
);
}
// Check if any message indicates sync is complete
if (messages && messages.length > 0) {
for (const message of messages) {
const msg = message as any;
// Check for "up-to-date" control message
if (
msg?.headers?.control === "up-to-date" ||
msg?.headers?.electric_up_to_date === "true" ||
(typeof msg === "object" && "up-to-date" in msg)
) {
if (!syncResolved) {
debugLog(`[Electric] ✅ Received up-to-date message for ${table}`);
resolveInitialSync();
}
// Continue listening for real-time updates - don't return!
}
}
if (!syncResolved && messages.length > 0) {
debugLog(
"[Electric] First message:",
JSON.stringify(messages[0], null, 2)
);
}
}
// Also check stream's isUpToDate property after receiving messages
if (!syncResolved && stream?.isUpToDate) {
debugLog(`[Electric] ✅ Stream isUpToDate is true for ${table}`);
resolveInitialSync();
}
});
// Also check stream's isUpToDate property immediately
if (stream?.isUpToDate) {
debugLog(`[Electric] ✅ Stream isUpToDate is true immediately for ${table}`);
resolveInitialSync();
}
}
// Also poll isUpToDate periodically as a backup (every 200ms)
const pollInterval = setInterval(() => {
if (syncResolved) {
clearInterval(pollInterval);
return;
}
if (shape.isUpToDate || stream?.isUpToDate) {
debugLog(`[Electric] ✅ Sync completed (detected via polling) for ${table}`);
clearInterval(pollInterval);
resolveInitialSync();
}
}, 200);
// Clean up polling when promise resolves
initialSyncPromise.finally(() => {
clearInterval(pollInterval);
});
} else {
debugWarn(
`[Electric] ⚠️ No stream available for ${table}, relying on callback and timeout`
);
}
}
// Create the sync handle with proper cleanup
const syncHandle: SyncHandle = {
unsubscribe: () => {
debugLog(`[Electric] Unsubscribing from: ${cacheKey}`);
// Remove from cache first
activeSyncHandles.delete(cacheKey);
// Then unsubscribe from the shape
if (shape && typeof shape.unsubscribe === "function") {
shape.unsubscribe();
}
},
// Use getter to always return current state
get isUpToDate() {
return shape?.isUpToDate ?? false;
},
stream: shape?.stream,
initialSyncPromise, // Expose promise so callers can wait for sync
};
// Cache the sync handle for reuse (memory optimization)
activeSyncHandles.set(cacheKey, syncHandle);
debugLog(
`[Electric] Cached sync handle for: ${cacheKey} (total cached: ${activeSyncHandles.size})`
);
return syncHandle;
} catch (error) {
console.error("[Electric] Failed to sync shape:", error);
// Check if Electric SQL server is reachable
try {
const response = await fetch(`${electricUrl}/v1/shape?table=${table}&offset=-1`, {
method: "GET",
});
debugLog(
"[Electric] Electric SQL server response:",
response.status,
response.statusText
);
if (!response.ok) {
console.error("[Electric] Electric SQL server error:", await response.text());
}
} catch (fetchError) {
console.error("[Electric] Cannot reach Electric SQL server:", fetchError);
console.error("[Electric] Make sure Electric SQL is running at:", electricUrl);
}
throw error;
}
})();
// Track the sync promise to prevent concurrent syncs for the same shape
pendingSyncs.set(cacheKey, syncPromise);
// Clean up the pending sync when done (whether success or failure)
syncPromise.finally(() => {
pendingSyncs.delete(cacheKey);
debugLog(`[Electric] Pending sync removed for: ${cacheKey}`);
});
return syncPromise;
},
};
debugLog(`[Electric] ✅ Initialized successfully for user: ${userId}`);
return electricClient;
} catch (error) {
console.error("[Electric] Failed to initialize:", error);
// Reset state on failure
electricClient = null;
currentUserId = null;
throw error;
} finally {
isInitializing = false;
}
})();
return initPromise;
}
/**
* Cleanup Electric SQL - close database and reset singleton
* Called on logout (best-effort) and when switching users
*/
export async function cleanupElectric(): Promise<void> {
if (!electricClient) {
return;
}
const userIdToClean = currentUserId;
debugLog(`[Electric] Cleaning up for user: ${userIdToClean}`);
// Unsubscribe from all active sync handles first (memory cleanup)
debugLog(`[Electric] Unsubscribing from ${activeSyncHandles.size} active sync handles`);
// Copy keys to array to avoid mutation during iteration
const handleKeys = Array.from(activeSyncHandles.keys());
for (const key of handleKeys) {
const handle = activeSyncHandles.get(key);
if (handle) {
try {
handle.unsubscribe();
} catch (err) {
debugWarn(`[Electric] Failed to unsubscribe from ${key}:`, err);
}
}
}
// Ensure caches are empty
activeSyncHandles.clear();
pendingSyncs.clear();
try {
// Close the PGlite database connection
await electricClient.db.close();
debugLog("[Electric] Database closed");
} catch (error) {
console.error("[Electric] Error closing database:", error);
}
// Reset singleton state
electricClient = null;
currentUserId = null;
isInitializing = false;
initPromise = null;
// Delete the user's IndexedDB database (best-effort cleanup on logout)
if (typeof window !== "undefined" && window.indexedDB && userIdToClean) {
try {
const dbName = `${DB_PREFIX}${userIdToClean}-v${SYNC_VERSION}`;
window.indexedDB.deleteDatabase(dbName);
debugLog(`[Electric] Deleted database: ${dbName}`);
} catch (err) {
debugWarn("[Electric] Failed to delete database:", err);
}
}
debugLog("[Electric] Cleanup complete");
}
/**
* Get the Electric client (throws if not initialized)
*/
export function getElectric(): ElectricClient {
if (!electricClient) {
throw new Error("Electric not initialized. Call initElectric(userId) first.");
}
return electricClient;
}
/**
* Check if Electric is initialized for a specific user
*/
export function isElectricInitialized(userId?: string): boolean {
if (!electricClient) return false;
if (userId && currentUserId !== userId) return false;
return true;
}
/**
* Get the current user ID that Electric is initialized for
*/
export function getCurrentElectricUserId(): string | null {
return currentUserId;
}
/**
* Get the PGlite database instance
*/
export function getDb(): PGlite | null {
return electricClient?.db ?? null;
}

View file

@ -1,36 +0,0 @@
"use client";
import { createContext, useContext } from "react";
import type { ElectricClient } from "./client";
/**
* Context for sharing the Electric SQL client across the app
*
* This ensures:
* 1. Single initialization point (ElectricProvider only)
* 2. No race conditions (hooks wait for context)
* 3. Clean cleanup (ElectricProvider manages lifecycle)
*/
export const ElectricContext = createContext<ElectricClient | null>(null);
/**
* Hook to get the Electric client from context
* Returns null if Electric is not initialized yet
*/
export function useElectricClient(): ElectricClient | null {
return useContext(ElectricContext);
}
/**
* Hook to get the Electric client, throwing if not available
* Use this when you're sure Electric should be initialized
*/
export function useElectricClientOrThrow(): ElectricClient {
const client = useContext(ElectricContext);
if (!client) {
throw new Error(
"Electric client not available. Make sure you're inside ElectricProvider and user is authenticated."
);
}
return client;
}

View file

@ -27,10 +27,6 @@
"@assistant-ui/react-ai-sdk": "^1.1.20",
"@assistant-ui/react-markdown": "^0.11.9",
"@babel/standalone": "^7.29.2",
"@electric-sql/client": "^1.4.0",
"@electric-sql/pglite": "^0.3.14",
"@electric-sql/pglite-sync": "^0.4.0",
"@electric-sql/react": "^1.0.26",
"@hookform/resolvers": "^5.2.2",
"@number-flow/react": "^0.5.10",
"@platejs/autoformat": "^52.0.11",