2026-04-20 04:04:19 +05:30
|
|
|
|
import { Notice, TFile, type App, type CachedMetadata, type TAbstractFile } from "obsidian";
|
|
|
|
|
|
import {
|
|
|
|
|
|
AuthError,
|
|
|
|
|
|
PermanentError,
|
|
|
|
|
|
type SurfSenseApiClient,
|
|
|
|
|
|
TransientError,
|
2026-04-20 21:07:15 +05:30
|
|
|
|
VaultNotRegisteredError,
|
2026-04-20 04:04:19 +05:30
|
|
|
|
} from "./api-client";
|
2026-04-20 23:13:49 +05:30
|
|
|
|
import { isExcluded, isFolderFiltered } from "./excludes";
|
2026-04-20 23:48:51 +05:30
|
|
|
|
import { buildNotePayload } from "./payload";
|
2026-04-20 04:04:19 +05:30
|
|
|
|
import { type BatchResult, PersistentQueue } from "./queue";
|
|
|
|
|
|
import type {
|
|
|
|
|
|
HealthResponse,
|
2026-04-20 23:48:51 +05:30
|
|
|
|
ManifestEntry,
|
2026-04-20 04:04:19 +05:30
|
|
|
|
NotePayload,
|
|
|
|
|
|
QueueItem,
|
|
|
|
|
|
StatusKind,
|
|
|
|
|
|
StatusState,
|
|
|
|
|
|
} from "./types";
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
|
* Owner of "what does the vault look like vs the server" reasoning.
|
|
|
|
|
|
*
|
2026-04-20 18:19:30 +05:30
|
|
|
|
* Start order: connect (or fall back to /health) → drain queue → reconcile →
|
|
|
|
|
|
* subscribe events. Reconcile no-ops if last run was < RECONCILE_MIN_INTERVAL_MS ago.
|
2026-04-20 04:04:19 +05:30
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
export interface SyncEngineDeps {
|
|
|
|
|
|
app: App;
|
|
|
|
|
|
apiClient: SurfSenseApiClient;
|
|
|
|
|
|
queue: PersistentQueue;
|
|
|
|
|
|
getSettings: () => SyncEngineSettings;
|
|
|
|
|
|
saveSettings: (mut: (s: SyncEngineSettings) => void) => Promise<void>;
|
|
|
|
|
|
setStatus: (s: StatusState) => void;
|
2026-04-20 23:13:49 +05:30
|
|
|
|
onCapabilities: (caps: string[]) => void;
|
2026-04-20 23:48:51 +05:30
|
|
|
|
/** Fired when the adaptive backoff multiplier may have changed; main.ts uses it to reschedule. */
|
|
|
|
|
|
onReconcileBackoffChanged?: () => void;
|
2026-04-20 04:04:19 +05:30
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
export interface SyncEngineSettings {
|
|
|
|
|
|
vaultId: string;
|
|
|
|
|
|
connectorId: number | null;
|
|
|
|
|
|
searchSpaceId: number | null;
|
2026-04-20 23:13:49 +05:30
|
|
|
|
includeFolders: string[];
|
|
|
|
|
|
excludeFolders: string[];
|
2026-04-20 04:04:19 +05:30
|
|
|
|
excludePatterns: string[];
|
|
|
|
|
|
includeAttachments: boolean;
|
|
|
|
|
|
lastReconcileAt: number | null;
|
|
|
|
|
|
lastSyncAt: number | null;
|
|
|
|
|
|
filesSynced: number;
|
|
|
|
|
|
tombstones: Record<string, number>;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
export const RECONCILE_MIN_INTERVAL_MS = 5 * 60 * 1000;
|
|
|
|
|
|
const TOMBSTONE_TTL_MS = 24 * 60 * 60 * 1000; // 1 day
|
|
|
|
|
|
const PENDING_DEBOUNCE_MS = 1500;
|
|
|
|
|
|
|
|
|
|
|
|
export class SyncEngine {
|
|
|
|
|
|
private readonly deps: SyncEngineDeps;
|
|
|
|
|
|
private capabilities: string[] = [];
|
|
|
|
|
|
private pendingMdEdits = new Map<string, ReturnType<typeof setTimeout>>();
|
2026-04-20 23:48:51 +05:30
|
|
|
|
/** Consecutive reconciles that found no work; powers the adaptive interval. */
|
|
|
|
|
|
private idleReconcileStreak = 0;
|
|
|
|
|
|
/** 2^streak is capped at this value (e.g. 8 → max ×8 backoff). */
|
|
|
|
|
|
private readonly maxBackoffMultiplier = 8;
|
2026-04-20 04:04:19 +05:30
|
|
|
|
|
|
|
|
|
|
constructor(deps: SyncEngineDeps) {
|
|
|
|
|
|
this.deps = deps;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2026-04-20 23:48:51 +05:30
|
|
|
|
/** Returns the next-tick interval given the user's base, scaled by the idle streak. */
|
|
|
|
|
|
getReconcileBackoffMs(baseMs: number): number {
|
|
|
|
|
|
const multiplier = Math.min(2 ** this.idleReconcileStreak, this.maxBackoffMultiplier);
|
|
|
|
|
|
return baseMs * multiplier;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2026-04-20 04:04:19 +05:30
|
|
|
|
getCapabilities(): readonly string[] {
|
|
|
|
|
|
return this.capabilities;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
supports(capability: string): boolean {
|
|
|
|
|
|
return this.capabilities.includes(capability);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/** Run the onload sequence described in this file's docstring. */
|
|
|
|
|
|
async start(): Promise<void> {
|
|
|
|
|
|
this.setStatus("syncing", "Connecting to SurfSense…");
|
|
|
|
|
|
|
|
|
|
|
|
const settings = this.deps.getSettings();
|
2026-04-20 18:19:30 +05:30
|
|
|
|
if (!settings.searchSpaceId) {
|
|
|
|
|
|
// No target yet — bare /health probe still surfaces auth/network errors.
|
|
|
|
|
|
try {
|
|
|
|
|
|
const health = await this.deps.apiClient.health();
|
|
|
|
|
|
this.applyHealth(health);
|
|
|
|
|
|
} catch (err) {
|
|
|
|
|
|
this.handleStartupError(err);
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
2026-04-20 04:04:19 +05:30
|
|
|
|
this.setStatus("idle", "Pick a search space in settings to start syncing.");
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2026-04-20 21:07:15 +05:30
|
|
|
|
// Re-announce so the backend sees the latest vault_name + last_connect_at.
|
|
|
|
|
|
// flushQueue owns the connectorId gate, so a failed connect here still
|
|
|
|
|
|
// leaves the queue stable for the next trigger.
|
2026-04-20 18:19:30 +05:30
|
|
|
|
await this.ensureConnected();
|
|
|
|
|
|
|
2026-04-20 04:04:19 +05:30
|
|
|
|
await this.flushQueue();
|
|
|
|
|
|
await this.maybeReconcile();
|
|
|
|
|
|
this.setStatus(this.queueStatusKind(), undefined);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/** Public entry point used after settings save to (re)connect the vault. */
|
|
|
|
|
|
async ensureConnected(): Promise<void> {
|
|
|
|
|
|
const settings = this.deps.getSettings();
|
|
|
|
|
|
if (!settings.searchSpaceId) {
|
|
|
|
|
|
this.setStatus("idle", "Pick a search space in settings.");
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
try {
|
|
|
|
|
|
const resp = await this.deps.apiClient.connect({
|
|
|
|
|
|
searchSpaceId: settings.searchSpaceId,
|
|
|
|
|
|
vaultId: settings.vaultId,
|
2026-04-20 23:13:49 +05:30
|
|
|
|
vaultName: this.deps.app.vault.getName(),
|
2026-04-20 04:04:19 +05:30
|
|
|
|
});
|
|
|
|
|
|
this.applyHealth(resp);
|
|
|
|
|
|
await this.deps.saveSettings((s) => {
|
|
|
|
|
|
s.connectorId = resp.connector_id;
|
|
|
|
|
|
});
|
|
|
|
|
|
} catch (err) {
|
|
|
|
|
|
this.handleStartupError(err);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
applyHealth(h: HealthResponse): void {
|
|
|
|
|
|
this.capabilities = Array.isArray(h.capabilities) ? [...h.capabilities] : [];
|
2026-04-20 23:13:49 +05:30
|
|
|
|
this.deps.onCapabilities(this.capabilities);
|
2026-04-20 04:04:19 +05:30
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// ---- vault event handlers --------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
onCreate(file: TAbstractFile): void {
|
|
|
|
|
|
if (!this.shouldTrack(file)) return;
|
|
|
|
|
|
const settings = this.deps.getSettings();
|
|
|
|
|
|
if (this.isExcluded(file.path, settings)) return;
|
2026-04-20 23:48:51 +05:30
|
|
|
|
this.resetIdleStreak();
|
2026-04-20 04:04:19 +05:30
|
|
|
|
if (this.isMarkdown(file)) {
|
|
|
|
|
|
this.scheduleMdUpsert(file.path);
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
this.deps.queue.enqueueUpsert(file.path);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
onModify(file: TAbstractFile): void {
|
|
|
|
|
|
if (!this.shouldTrack(file)) return;
|
|
|
|
|
|
const settings = this.deps.getSettings();
|
|
|
|
|
|
if (this.isExcluded(file.path, settings)) return;
|
2026-04-20 23:48:51 +05:30
|
|
|
|
this.resetIdleStreak();
|
2026-04-20 04:04:19 +05:30
|
|
|
|
if (this.isMarkdown(file)) {
|
|
|
|
|
|
// Defer to metadataCache.changed so payload fields are fresh.
|
|
|
|
|
|
this.scheduleMdUpsert(file.path);
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
this.deps.queue.enqueueUpsert(file.path);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
onDelete(file: TAbstractFile): void {
|
|
|
|
|
|
if (!this.shouldTrack(file)) return;
|
2026-04-20 23:48:51 +05:30
|
|
|
|
this.resetIdleStreak();
|
2026-04-20 04:04:19 +05:30
|
|
|
|
this.deps.queue.enqueueDelete(file.path);
|
|
|
|
|
|
void this.deps.saveSettings((s) => {
|
|
|
|
|
|
s.tombstones[file.path] = Date.now();
|
|
|
|
|
|
});
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
onRename(file: TAbstractFile, oldPath: string): void {
|
|
|
|
|
|
if (!this.shouldTrack(file)) return;
|
2026-04-20 23:48:51 +05:30
|
|
|
|
this.resetIdleStreak();
|
2026-04-20 04:04:19 +05:30
|
|
|
|
const settings = this.deps.getSettings();
|
|
|
|
|
|
if (this.isExcluded(file.path, settings)) {
|
|
|
|
|
|
this.deps.queue.enqueueDelete(oldPath);
|
|
|
|
|
|
void this.deps.saveSettings((s) => {
|
|
|
|
|
|
s.tombstones[oldPath] = Date.now();
|
|
|
|
|
|
});
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
this.deps.queue.enqueueRename(oldPath, file.path);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
onMetadataChanged(file: TFile, _data: string, _cache: CachedMetadata): void {
|
|
|
|
|
|
if (!this.shouldTrack(file)) return;
|
|
|
|
|
|
const settings = this.deps.getSettings();
|
|
|
|
|
|
if (this.isExcluded(file.path, settings)) return;
|
|
|
|
|
|
if (!this.isMarkdown(file)) return;
|
|
|
|
|
|
// Cancel any deferred upsert and enqueue with fresh metadata now.
|
|
|
|
|
|
const pending = this.pendingMdEdits.get(file.path);
|
|
|
|
|
|
if (pending) {
|
|
|
|
|
|
clearTimeout(pending);
|
|
|
|
|
|
this.pendingMdEdits.delete(file.path);
|
|
|
|
|
|
}
|
|
|
|
|
|
this.deps.queue.enqueueUpsert(file.path);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
private scheduleMdUpsert(path: string): void {
|
|
|
|
|
|
const existing = this.pendingMdEdits.get(path);
|
|
|
|
|
|
if (existing) clearTimeout(existing);
|
|
|
|
|
|
this.pendingMdEdits.set(
|
|
|
|
|
|
path,
|
|
|
|
|
|
setTimeout(() => {
|
|
|
|
|
|
this.pendingMdEdits.delete(path);
|
|
|
|
|
|
this.deps.queue.enqueueUpsert(path);
|
|
|
|
|
|
}, PENDING_DEBOUNCE_MS),
|
|
|
|
|
|
);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// ---- queue draining ---------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
async flushQueue(): Promise<void> {
|
|
|
|
|
|
if (this.deps.queue.size === 0) return;
|
2026-04-20 21:07:15 +05:30
|
|
|
|
// Shared gate for every flush trigger so the first /sync can't race /connect.
|
|
|
|
|
|
if (!this.deps.getSettings().connectorId) {
|
|
|
|
|
|
await this.ensureConnected();
|
|
|
|
|
|
if (!this.deps.getSettings().connectorId) return;
|
|
|
|
|
|
}
|
2026-04-20 04:04:19 +05:30
|
|
|
|
this.setStatus("syncing", `Syncing ${this.deps.queue.size} item(s)…`);
|
|
|
|
|
|
const summary = await this.deps.queue.drain({
|
|
|
|
|
|
processBatch: (batch) => this.processBatch(batch),
|
|
|
|
|
|
});
|
|
|
|
|
|
if (summary.acked > 0) {
|
|
|
|
|
|
await this.deps.saveSettings((s) => {
|
|
|
|
|
|
s.lastSyncAt = Date.now();
|
|
|
|
|
|
s.filesSynced = (s.filesSynced ?? 0) + summary.acked;
|
|
|
|
|
|
});
|
|
|
|
|
|
}
|
|
|
|
|
|
this.setStatus(this.queueStatusKind(), this.statusDetail());
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
private async processBatch(batch: QueueItem[]): Promise<BatchResult> {
|
|
|
|
|
|
const settings = this.deps.getSettings();
|
|
|
|
|
|
const upserts = batch.filter((b): b is QueueItem & { op: "upsert" } => b.op === "upsert");
|
|
|
|
|
|
const renames = batch.filter((b): b is QueueItem & { op: "rename" } => b.op === "rename");
|
|
|
|
|
|
const deletes = batch.filter((b): b is QueueItem & { op: "delete" } => b.op === "delete");
|
|
|
|
|
|
|
|
|
|
|
|
const acked: QueueItem[] = [];
|
|
|
|
|
|
const retry: QueueItem[] = [];
|
|
|
|
|
|
const dropped: QueueItem[] = [];
|
|
|
|
|
|
|
|
|
|
|
|
// Renames first so paths line up server-side before content upserts.
|
2026-04-21 03:18:44 +05:30
|
|
|
|
// Per-item server errors go to retry; "missing" is treated as success.
|
2026-04-20 04:04:19 +05:30
|
|
|
|
if (renames.length > 0) {
|
|
|
|
|
|
try {
|
2026-04-21 03:18:44 +05:30
|
|
|
|
const resp = await this.deps.apiClient.renameBatch({
|
2026-04-20 04:04:19 +05:30
|
|
|
|
vaultId: settings.vaultId,
|
|
|
|
|
|
renames: renames.map((r) => ({ oldPath: r.oldPath, newPath: r.newPath })),
|
|
|
|
|
|
});
|
2026-04-21 03:18:44 +05:30
|
|
|
|
const failed = new Set(
|
|
|
|
|
|
resp.failed.map((f) => `${f.oldPath}\u0000${f.newPath}`),
|
|
|
|
|
|
);
|
|
|
|
|
|
for (const r of renames) {
|
|
|
|
|
|
if (failed.has(`${r.oldPath}\u0000${r.newPath}`)) retry.push(r);
|
|
|
|
|
|
else acked.push(r);
|
|
|
|
|
|
}
|
2026-04-20 04:04:19 +05:30
|
|
|
|
} catch (err) {
|
2026-04-20 21:07:15 +05:30
|
|
|
|
if (await this.handleVaultNotRegistered(err)) {
|
|
|
|
|
|
retry.push(...renames);
|
|
|
|
|
|
} else {
|
|
|
|
|
|
const verdict = this.classify(err);
|
|
|
|
|
|
if (verdict === "stop") return { acked, retry: [...retry, ...renames], dropped, stop: true };
|
|
|
|
|
|
if (verdict === "retry") retry.push(...renames);
|
|
|
|
|
|
else dropped.push(...renames);
|
|
|
|
|
|
}
|
2026-04-20 04:04:19 +05:30
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (deletes.length > 0) {
|
|
|
|
|
|
try {
|
2026-04-21 03:18:44 +05:30
|
|
|
|
const resp = await this.deps.apiClient.deleteBatch({
|
2026-04-20 04:04:19 +05:30
|
|
|
|
vaultId: settings.vaultId,
|
|
|
|
|
|
paths: deletes.map((d) => d.path),
|
|
|
|
|
|
});
|
2026-04-21 03:18:44 +05:30
|
|
|
|
const failed = new Set(resp.failed);
|
|
|
|
|
|
for (const d of deletes) {
|
|
|
|
|
|
if (failed.has(d.path)) retry.push(d);
|
|
|
|
|
|
else acked.push(d);
|
|
|
|
|
|
}
|
2026-04-20 04:04:19 +05:30
|
|
|
|
} catch (err) {
|
2026-04-20 21:07:15 +05:30
|
|
|
|
if (await this.handleVaultNotRegistered(err)) {
|
|
|
|
|
|
retry.push(...deletes);
|
|
|
|
|
|
} else {
|
|
|
|
|
|
const verdict = this.classify(err);
|
|
|
|
|
|
if (verdict === "stop") return { acked, retry: [...retry, ...deletes], dropped, stop: true };
|
|
|
|
|
|
if (verdict === "retry") retry.push(...deletes);
|
|
|
|
|
|
else dropped.push(...deletes);
|
|
|
|
|
|
}
|
2026-04-20 04:04:19 +05:30
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (upserts.length > 0) {
|
|
|
|
|
|
const payloads: NotePayload[] = [];
|
|
|
|
|
|
for (const item of upserts) {
|
|
|
|
|
|
const file = this.deps.app.vault.getAbstractFileByPath(item.path);
|
|
|
|
|
|
if (!file || !isTFile(file)) {
|
|
|
|
|
|
// File vanished; treat as ack (delete will follow if user removed it).
|
|
|
|
|
|
acked.push(item);
|
|
|
|
|
|
continue;
|
|
|
|
|
|
}
|
|
|
|
|
|
try {
|
|
|
|
|
|
const payload = this.isMarkdown(file)
|
|
|
|
|
|
? await buildNotePayload(this.deps.app, file, settings.vaultId)
|
|
|
|
|
|
: await this.buildBinaryPayload(file, settings.vaultId);
|
|
|
|
|
|
payloads.push(payload);
|
|
|
|
|
|
} catch (err) {
|
|
|
|
|
|
console.error("SurfSense: failed to build payload", item.path, err);
|
|
|
|
|
|
retry.push(item);
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (payloads.length > 0) {
|
|
|
|
|
|
try {
|
|
|
|
|
|
const resp = await this.deps.apiClient.syncBatch({
|
|
|
|
|
|
vaultId: settings.vaultId,
|
|
|
|
|
|
notes: payloads,
|
|
|
|
|
|
});
|
2026-04-21 03:18:44 +05:30
|
|
|
|
// Per-note failures retry; the queue's maxAttempts eventually drops poison pills.
|
|
|
|
|
|
const failed = new Set(resp.failed);
|
2026-04-20 04:04:19 +05:30
|
|
|
|
for (const item of upserts) {
|
|
|
|
|
|
if (retry.find((r) => r === item)) continue;
|
2026-04-21 03:18:44 +05:30
|
|
|
|
if (failed.has(item.path)) retry.push(item);
|
2026-04-20 04:04:19 +05:30
|
|
|
|
else acked.push(item);
|
|
|
|
|
|
}
|
|
|
|
|
|
} catch (err) {
|
2026-04-20 21:07:15 +05:30
|
|
|
|
if (await this.handleVaultNotRegistered(err)) {
|
|
|
|
|
|
for (const item of upserts) {
|
|
|
|
|
|
if (retry.find((r) => r === item)) continue;
|
|
|
|
|
|
retry.push(item);
|
|
|
|
|
|
}
|
|
|
|
|
|
} else {
|
|
|
|
|
|
const verdict = this.classify(err);
|
|
|
|
|
|
if (verdict === "stop")
|
|
|
|
|
|
return { acked, retry: [...retry, ...upserts], dropped, stop: true };
|
|
|
|
|
|
if (verdict === "retry") retry.push(...upserts);
|
|
|
|
|
|
else dropped.push(...upserts);
|
|
|
|
|
|
}
|
2026-04-20 04:04:19 +05:30
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return { acked, retry, dropped, stop: false };
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
private async buildBinaryPayload(file: TFile, vaultId: string): Promise<NotePayload> {
|
|
|
|
|
|
// Plain attachments don't go through buildNotePayload (no markdown
|
|
|
|
|
|
// metadata to extract). We still need a stable hash + file stat so
|
|
|
|
|
|
// the backend can de-dupe and the manifest diff still works.
|
|
|
|
|
|
const buf = await this.deps.app.vault.readBinary(file);
|
|
|
|
|
|
const digest = await crypto.subtle.digest("SHA-256", buf);
|
|
|
|
|
|
const hash = bufferToHex(digest);
|
|
|
|
|
|
return {
|
|
|
|
|
|
vault_id: vaultId,
|
|
|
|
|
|
path: file.path,
|
|
|
|
|
|
name: file.basename,
|
|
|
|
|
|
extension: file.extension,
|
|
|
|
|
|
content: "",
|
|
|
|
|
|
frontmatter: {},
|
|
|
|
|
|
tags: [],
|
|
|
|
|
|
headings: [],
|
|
|
|
|
|
resolved_links: [],
|
|
|
|
|
|
unresolved_links: [],
|
|
|
|
|
|
embeds: [],
|
|
|
|
|
|
aliases: [],
|
|
|
|
|
|
content_hash: hash,
|
2026-04-20 23:48:51 +05:30
|
|
|
|
size: file.stat.size,
|
2026-04-20 04:04:19 +05:30
|
|
|
|
mtime: file.stat.mtime,
|
|
|
|
|
|
ctime: file.stat.ctime,
|
|
|
|
|
|
is_binary: true,
|
|
|
|
|
|
};
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// ---- reconcile --------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
async maybeReconcile(force = false): Promise<void> {
|
|
|
|
|
|
const settings = this.deps.getSettings();
|
|
|
|
|
|
if (!settings.connectorId) return;
|
|
|
|
|
|
if (!force && settings.lastReconcileAt) {
|
|
|
|
|
|
if (Date.now() - settings.lastReconcileAt < RECONCILE_MIN_INTERVAL_MS) return;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
this.setStatus("syncing", "Reconciling vault with server…");
|
|
|
|
|
|
try {
|
|
|
|
|
|
const manifest = await this.deps.apiClient.getManifest(settings.vaultId);
|
2026-04-20 23:48:51 +05:30
|
|
|
|
const remote = manifest.items ?? {};
|
|
|
|
|
|
const enqueued = this.diffAndQueue(settings, remote);
|
2026-04-20 04:04:19 +05:30
|
|
|
|
await this.deps.saveSettings((s) => {
|
|
|
|
|
|
s.lastReconcileAt = Date.now();
|
|
|
|
|
|
s.tombstones = pruneTombstones(s.tombstones);
|
|
|
|
|
|
});
|
2026-04-20 23:48:51 +05:30
|
|
|
|
this.updateIdleStreak(enqueued);
|
2026-04-20 04:04:19 +05:30
|
|
|
|
await this.flushQueue();
|
|
|
|
|
|
} catch (err) {
|
|
|
|
|
|
this.classifyAndStatus(err, "Reconcile failed");
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2026-04-20 23:48:51 +05:30
|
|
|
|
/**
|
|
|
|
|
|
* Compare local vault to server manifest and enqueue diffs.
|
|
|
|
|
|
*
|
|
|
|
|
|
* Performance: short-circuits on `mtime + size` for every file. We trust the
|
|
|
|
|
|
* pair as a "no change" signal because (a) content edits move mtime, and
|
|
|
|
|
|
* (b) same-mtime/different-content requires deliberate filesystem trickery.
|
|
|
|
|
|
* False positives (mtime moved, content identical) collapse to a no-op
|
|
|
|
|
|
* upsert on the server via its `content_hash` check. Net effect: zero disk
|
|
|
|
|
|
* reads on idle reconciles.
|
|
|
|
|
|
*
|
|
|
|
|
|
* Returns the number of items enqueued so the caller can drive the
|
|
|
|
|
|
* adaptive backoff.
|
|
|
|
|
|
*/
|
|
|
|
|
|
private diffAndQueue(
|
2026-04-20 04:04:19 +05:30
|
|
|
|
settings: SyncEngineSettings,
|
2026-04-20 23:48:51 +05:30
|
|
|
|
remote: Record<string, ManifestEntry>,
|
|
|
|
|
|
): number {
|
2026-04-20 04:04:19 +05:30
|
|
|
|
const localFiles = this.deps.app.vault.getFiles().filter((f) => {
|
|
|
|
|
|
if (!this.shouldTrack(f)) return false;
|
|
|
|
|
|
if (this.isExcluded(f.path, settings)) return false;
|
|
|
|
|
|
return true;
|
|
|
|
|
|
});
|
|
|
|
|
|
const localPaths = new Set(localFiles.map((f) => f.path));
|
2026-04-20 23:48:51 +05:30
|
|
|
|
let enqueued = 0;
|
2026-04-20 04:04:19 +05:30
|
|
|
|
|
|
|
|
|
|
for (const file of localFiles) {
|
|
|
|
|
|
const remoteEntry = remote[file.path];
|
|
|
|
|
|
if (!remoteEntry) {
|
|
|
|
|
|
this.deps.queue.enqueueUpsert(file.path);
|
2026-04-20 23:48:51 +05:30
|
|
|
|
enqueued++;
|
2026-04-20 04:04:19 +05:30
|
|
|
|
continue;
|
|
|
|
|
|
}
|
2026-04-20 23:48:51 +05:30
|
|
|
|
const remoteMtimeMs = toMillis(remoteEntry.mtime);
|
|
|
|
|
|
const mtimeMatches = file.stat.mtime <= remoteMtimeMs + 1000;
|
|
|
|
|
|
// Older server rows lack `size`; treat as "unknown" → fall through to upsert.
|
|
|
|
|
|
const sizeMatches =
|
|
|
|
|
|
typeof remoteEntry.size === "number" && file.stat.size === remoteEntry.size;
|
|
|
|
|
|
if (mtimeMatches && sizeMatches) continue;
|
|
|
|
|
|
this.deps.queue.enqueueUpsert(file.path);
|
|
|
|
|
|
enqueued++;
|
2026-04-20 04:04:19 +05:30
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Remote-only → delete, but only if NOT a fresh tombstone (which
|
|
|
|
|
|
// the queue will deliver) and NOT a path we already plan to upsert.
|
|
|
|
|
|
for (const path of Object.keys(remote)) {
|
|
|
|
|
|
if (localPaths.has(path)) continue;
|
|
|
|
|
|
const tombstone = settings.tombstones[path];
|
|
|
|
|
|
if (tombstone && Date.now() - tombstone < TOMBSTONE_TTL_MS) continue;
|
|
|
|
|
|
this.deps.queue.enqueueDelete(path);
|
2026-04-20 23:48:51 +05:30
|
|
|
|
enqueued++;
|
2026-04-20 04:04:19 +05:30
|
|
|
|
}
|
2026-04-20 23:48:51 +05:30
|
|
|
|
|
|
|
|
|
|
return enqueued;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/** Bump (idle) or reset (active) the streak; notify only when the cap-aware multiplier changes. */
|
|
|
|
|
|
private updateIdleStreak(enqueued: number): void {
|
|
|
|
|
|
const previousStreak = this.idleReconcileStreak;
|
|
|
|
|
|
if (enqueued === 0) this.idleReconcileStreak++;
|
|
|
|
|
|
else this.idleReconcileStreak = 0;
|
|
|
|
|
|
const cap = Math.log2(this.maxBackoffMultiplier);
|
|
|
|
|
|
const cappedPrev = Math.min(previousStreak, cap);
|
|
|
|
|
|
const cappedNow = Math.min(this.idleReconcileStreak, cap);
|
|
|
|
|
|
if (cappedPrev !== cappedNow) this.deps.onReconcileBackoffChanged?.();
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/** Vault edit happened — drop back to the base interval immediately. */
|
|
|
|
|
|
private resetIdleStreak(): void {
|
|
|
|
|
|
if (this.idleReconcileStreak === 0) return;
|
|
|
|
|
|
this.idleReconcileStreak = 0;
|
|
|
|
|
|
this.deps.onReconcileBackoffChanged?.();
|
2026-04-20 04:04:19 +05:30
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// ---- status helpers ---------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
private setStatus(kind: StatusKind, detail?: string): void {
|
|
|
|
|
|
this.deps.setStatus({ kind, detail, queueDepth: this.deps.queue.size });
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
private queueStatusKind(): StatusKind {
|
|
|
|
|
|
if (this.deps.queue.size > 0) return "queued";
|
|
|
|
|
|
return "idle";
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
private statusDetail(): string | undefined {
|
|
|
|
|
|
const settings = this.deps.getSettings();
|
|
|
|
|
|
if (settings.lastSyncAt) {
|
|
|
|
|
|
return `Last sync ${formatRelative(settings.lastSyncAt)}`;
|
|
|
|
|
|
}
|
|
|
|
|
|
return undefined;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
private handleStartupError(err: unknown): void {
|
|
|
|
|
|
if (err instanceof AuthError) {
|
|
|
|
|
|
this.setStatus("auth-error", err.message);
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
if (err instanceof TransientError) {
|
|
|
|
|
|
this.setStatus("offline", err.message);
|
|
|
|
|
|
return;
|
|
|
|
|
|
}
|
|
|
|
|
|
this.setStatus("error", (err as Error).message ?? "Unknown error");
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2026-04-20 21:07:15 +05:30
|
|
|
|
/** Re-connect on VAULT_NOT_REGISTERED so the next drain sees the new row. */
|
|
|
|
|
|
private async handleVaultNotRegistered(err: unknown): Promise<boolean> {
|
|
|
|
|
|
if (!(err instanceof VaultNotRegisteredError)) return false;
|
|
|
|
|
|
console.warn("SurfSense: vault not registered, re-connecting before retry", err);
|
|
|
|
|
|
await this.ensureConnected();
|
|
|
|
|
|
return true;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2026-04-20 04:04:19 +05:30
|
|
|
|
private classify(err: unknown): "ack" | "retry" | "drop" | "stop" {
|
|
|
|
|
|
if (err instanceof AuthError) {
|
|
|
|
|
|
this.setStatus("auth-error", err.message);
|
|
|
|
|
|
return "stop";
|
|
|
|
|
|
}
|
|
|
|
|
|
if (err instanceof TransientError) {
|
|
|
|
|
|
this.setStatus("offline", err.message);
|
|
|
|
|
|
return "stop";
|
|
|
|
|
|
}
|
|
|
|
|
|
if (err instanceof PermanentError) {
|
|
|
|
|
|
console.warn("SurfSense: permanent error, dropping batch", err);
|
|
|
|
|
|
new Notice(`SurfSense: ${err.message}`);
|
|
|
|
|
|
return "drop";
|
|
|
|
|
|
}
|
|
|
|
|
|
console.error("SurfSense: unknown error", err);
|
|
|
|
|
|
return "retry";
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
private classifyAndStatus(err: unknown, prefix: string): void {
|
|
|
|
|
|
this.classify(err);
|
|
|
|
|
|
this.setStatus(this.queueStatusKind(), `${prefix}: ${(err as Error).message}`);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// ---- predicates -------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
private shouldTrack(file: TAbstractFile): boolean {
|
|
|
|
|
|
if (!isTFile(file)) return false;
|
|
|
|
|
|
const settings = this.deps.getSettings();
|
|
|
|
|
|
if (!settings.includeAttachments && !this.isMarkdown(file)) return false;
|
|
|
|
|
|
return true;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
private isExcluded(path: string, settings: SyncEngineSettings): boolean {
|
2026-04-20 23:13:49 +05:30
|
|
|
|
if (isFolderFiltered(path, settings.includeFolders, settings.excludeFolders)) {
|
|
|
|
|
|
return true;
|
|
|
|
|
|
}
|
2026-04-20 04:04:19 +05:30
|
|
|
|
return isExcluded(path, settings.excludePatterns);
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
private isMarkdown(file: TAbstractFile): boolean {
|
|
|
|
|
|
return isTFile(file) && file.extension.toLowerCase() === "md";
|
|
|
|
|
|
}
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function isTFile(f: TAbstractFile): f is TFile {
|
|
|
|
|
|
return f instanceof TFile;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function bufferToHex(buf: ArrayBuffer): string {
|
|
|
|
|
|
const view = new Uint8Array(buf);
|
|
|
|
|
|
let hex = "";
|
|
|
|
|
|
for (let i = 0; i < view.length; i++) hex += (view[i] ?? 0).toString(16).padStart(2, "0");
|
|
|
|
|
|
return hex;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function formatRelative(ts: number): string {
|
|
|
|
|
|
const diff = Date.now() - ts;
|
|
|
|
|
|
if (diff < 60_000) return "just now";
|
|
|
|
|
|
if (diff < 3600_000) return `${Math.round(diff / 60_000)}m ago`;
|
|
|
|
|
|
if (diff < 86_400_000) return `${Math.round(diff / 3600_000)}h ago`;
|
|
|
|
|
|
return `${Math.round(diff / 86_400_000)}d ago`;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2026-04-20 23:48:51 +05:30
|
|
|
|
/** Manifest mtimes are Pydantic-serialised ISO strings; vault stats are epoch ms. Normalise to ms. */
|
|
|
|
|
|
function toMillis(value: number | string | Date): number {
|
|
|
|
|
|
if (typeof value === "number") return value;
|
|
|
|
|
|
if (value instanceof Date) return value.getTime();
|
|
|
|
|
|
const parsed = Date.parse(value);
|
|
|
|
|
|
return Number.isFinite(parsed) ? parsed : 0;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2026-04-20 04:04:19 +05:30
|
|
|
|
function pruneTombstones(tombstones: Record<string, number>): Record<string, number> {
|
|
|
|
|
|
const out: Record<string, number> = {};
|
|
|
|
|
|
const cutoff = Date.now() - TOMBSTONE_TTL_MS;
|
|
|
|
|
|
for (const [k, v] of Object.entries(tombstones)) {
|
|
|
|
|
|
if (v >= cutoff) out[k] = v;
|
|
|
|
|
|
}
|
|
|
|
|
|
return out;
|
|
|
|
|
|
}
|