feat: Vestige v1.9.1 AUTONOMIC — self-regulating memory with graph visualization

Retention Target System: auto-GC low-retention memories during consolidation
(VESTIGE_RETENTION_TARGET env var, default 0.8). Auto-Promote: memories
accessed 3+ times in 24h get frequency-dependent potentiation. Waking SWR
Tagging: promoted memories get preferential 70/30 dream replay. Improved
Consolidation Scheduler: triggers on 6h staleness or 2h active use.

New tools: memory_health (retention dashboard with distribution buckets,
trend tracking, recommendations) and memory_graph (subgraph export with
Fruchterman-Reingold force-directed layout, up to 200 nodes).

Dream connections now persist to database via save_connection(), enabling
memory_graph traversal. Schema Migration V8 adds waking_tag, utility_score,
times_retrieved/useful columns and retention_snapshots table. 21 MCP tools.

v1.9.1 fixes: ConnectionRecord export, UTF-8 safe truncation, link_type
normalization, utility_score clamping, only-new-connections persistence,
70/30 split capacity fill, nonexistent center_id error handling.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Sam Valladares 2026-02-21 02:02:06 -06:00
parent c29023dd80
commit 5b90a73055
62 changed files with 2922 additions and 931 deletions

View file

@ -1,6 +1,6 @@
[package]
name = "vestige-core"
version = "1.7.0"
version = "1.9.1"
edition = "2024"
rust-version = "1.85"
authors = ["Vestige Team"]

View file

@ -252,20 +252,35 @@ impl ConsolidationScheduler {
/// Check if consolidation should run
///
/// Returns true if:
/// - Auto consolidation is enabled
/// - Sufficient time has passed since last consolidation
/// - System is currently idle
/// v1.9.0: Improved scheduler with multiple trigger conditions:
/// - Full consolidation: >6h stale AND >10 new memories since last
/// - Mini-consolidation (decay only): >2h if active
/// - System idle AND interval passed
pub fn should_consolidate(&self) -> bool {
if !self.auto_enabled {
return false;
}
let time_since_last = Utc::now() - self.last_consolidation;
// Trigger 1: Standard interval + idle check
let interval_passed = time_since_last >= self.consolidation_interval;
let is_idle = self.activity_tracker.is_idle();
if interval_passed && is_idle {
return true;
}
interval_passed && is_idle
// Trigger 2: >6h stale (force consolidation regardless of idle)
if time_since_last >= Duration::hours(6) {
return true;
}
// Trigger 3: Mini-consolidation every 2h if active
if time_since_last >= Duration::hours(2) && !is_idle {
return true;
}
false
}
/// Force check if consolidation should run (ignoring idle check)
@ -1720,12 +1735,16 @@ fn cosine_similarity(a: &[f32], b: &[f32]) -> f64 {
(dot / (mag_a * mag_b)) as f64
}
/// Truncate string to max length
/// Truncate string to max length (UTF-8 safe)
fn truncate(s: &str, max_len: usize) -> &str {
if s.len() <= max_len {
s
} else {
&s[..max_len]
let mut end = max_len;
while end > 0 && !s.is_char_boundary(end) {
end -= 1;
}
&s[..end]
}
}
@ -1905,7 +1924,8 @@ mod tests {
// Should have completed all stages
assert!(report.stage1_replay.is_some());
assert!(report.duration_ms >= 0);
// duration_ms is u64, so just verify the field is accessible
let _ = report.duration_ms;
assert!(report.completed_at <= Utc::now());
}

View file

@ -43,6 +43,8 @@ pub use dreams::{
ConsolidationReport,
// Sleep Consolidation types
ConsolidationScheduler,
DiscoveredConnection,
DiscoveredConnectionType,
DreamConfig,
// DreamMemory - input type for dreaming
DreamMemory,

View file

@ -623,7 +623,6 @@ impl UserRepository for SqliteUserRepository {
#[cfg(test)]
mod tests {
use super::*;
use crate::codebase::context::ProjectType;
fn create_test_pattern() -> CodePattern {
CodePattern {

View file

@ -39,7 +39,7 @@ impl CodeEmbedding {
}
/// Initialize the embedding model
pub fn init(&mut self) -> Result<(), EmbeddingError> {
pub fn init(&self) -> Result<(), EmbeddingError> {
self.service.init()
}

View file

@ -201,7 +201,7 @@ impl Embedding {
/// Service for generating and managing embeddings
pub struct EmbeddingService {
model_loaded: bool,
_unused: (),
}
impl Default for EmbeddingService {
@ -214,7 +214,7 @@ impl EmbeddingService {
/// Create a new embedding service
pub fn new() -> Self {
Self {
model_loaded: false,
_unused: (),
}
}
@ -235,9 +235,8 @@ impl EmbeddingService {
}
/// Initialize the model (downloads if necessary)
pub fn init(&mut self) -> Result<(), EmbeddingError> {
pub fn init(&self) -> Result<(), EmbeddingError> {
let _model = get_model()?; // Ensures model is loaded and returns any init errors
self.model_loaded = true;
Ok(())
}

View file

@ -138,8 +138,8 @@ pub use fsrs::{
// Storage layer
pub use storage::{
ConsolidationHistoryRecord, DreamHistoryRecord, InsightRecord, IntentionRecord, Result,
SmartIngestResult, StateTransitionRecord, Storage, StorageError,
ConnectionRecord, ConsolidationHistoryRecord, DreamHistoryRecord, InsightRecord,
IntentionRecord, Result, SmartIngestResult, StateTransitionRecord, Storage, StorageError,
};
// Consolidation (sleep-inspired memory processing)
@ -175,6 +175,8 @@ pub use advanced::{
DreamConfig,
// DreamMemory - input type for dreaming
DreamMemory,
DiscoveredConnection,
DiscoveredConnectionType,
DreamResult,
EmbeddingStrategy,
ImportanceDecayConfig,

View file

@ -2106,7 +2106,8 @@ mod tests {
)
.unwrap();
assert!(barcode.id >= 0);
// barcode.id is u64, verify it was assigned
let _ = barcode.id;
assert_eq!(index.len(), 1);
let retrieved = index.get_index("test-id").unwrap();

View file

@ -137,7 +137,7 @@ impl VectorIndex {
let options = IndexOptions {
dimensions: config.dimensions,
metric: config.metric,
quantization: ScalarKind::F16,
quantization: ScalarKind::I8,
connectivity: config.connectivity,
expansion_add: config.expansion_add,
expansion_search: config.expansion_search,
@ -325,7 +325,7 @@ impl VectorIndex {
let options = IndexOptions {
dimensions: config.dimensions,
metric: config.metric,
quantization: ScalarKind::F16,
quantization: ScalarKind::I8,
connectivity: config.connectivity,
expansion_add: config.expansion_add,
expansion_search: config.expansion_search,

View file

@ -34,6 +34,16 @@ pub const MIGRATIONS: &[Migration] = &[
description: "Dream history persistence for automation triggers",
up: MIGRATION_V6_UP,
},
Migration {
version: 7,
description: "Performance: page_size 8192, FTS5 porter tokenizer",
up: MIGRATION_V7_UP,
},
Migration {
version: 8,
description: "v1.9.0 Autonomic: waking SWR tags, utility scoring, retention tracking",
up: MIGRATION_V8_UP,
},
];
/// A database migration
@ -472,6 +482,73 @@ CREATE INDEX IF NOT EXISTS idx_dream_history_dreamed_at ON dream_history(dreamed
UPDATE schema_version SET version = 6, applied_at = datetime('now');
"#;
/// V7: Performance — FTS5 porter tokenizer for 15-30% better keyword recall (stemming)
/// page_size upgrade handled in apply_migrations() since VACUUM can't run inside execute_batch
const MIGRATION_V7_UP: &str = r#"
-- FTS5 porter tokenizer upgrade (15-30% better keyword recall via stemming)
DROP TRIGGER IF EXISTS knowledge_ai;
DROP TRIGGER IF EXISTS knowledge_ad;
DROP TRIGGER IF EXISTS knowledge_au;
DROP TABLE IF EXISTS knowledge_fts;
CREATE VIRTUAL TABLE knowledge_fts USING fts5(
id, content, tags,
content='knowledge_nodes',
content_rowid='rowid',
tokenize='porter ascii'
);
-- Rebuild FTS index from existing data with new tokenizer
INSERT INTO knowledge_fts(knowledge_fts) VALUES('rebuild');
-- Re-create sync triggers
CREATE TRIGGER knowledge_ai AFTER INSERT ON knowledge_nodes BEGIN
INSERT INTO knowledge_fts(rowid, id, content, tags)
VALUES (NEW.rowid, NEW.id, NEW.content, NEW.tags);
END;
CREATE TRIGGER knowledge_ad AFTER DELETE ON knowledge_nodes BEGIN
INSERT INTO knowledge_fts(knowledge_fts, rowid, id, content, tags)
VALUES ('delete', OLD.rowid, OLD.id, OLD.content, OLD.tags);
END;
CREATE TRIGGER knowledge_au AFTER UPDATE ON knowledge_nodes BEGIN
INSERT INTO knowledge_fts(knowledge_fts, rowid, id, content, tags)
VALUES ('delete', OLD.rowid, OLD.id, OLD.content, OLD.tags);
INSERT INTO knowledge_fts(rowid, id, content, tags)
VALUES (NEW.rowid, NEW.id, NEW.content, NEW.tags);
END;
UPDATE schema_version SET version = 7, applied_at = datetime('now');
"#;
/// V8: v1.9.0 Autonomic — Waking SWR tags, utility scoring, retention trend tracking
const MIGRATION_V8_UP: &str = r#"
-- Waking SWR (Sharp-Wave Ripple) tagging
-- Memories tagged during waking operation get preferential replay during dream cycles
ALTER TABLE knowledge_nodes ADD COLUMN waking_tag BOOLEAN DEFAULT FALSE;
ALTER TABLE knowledge_nodes ADD COLUMN waking_tag_at TEXT;
-- Utility scoring (MemRL-inspired: times_useful / times_retrieved)
ALTER TABLE knowledge_nodes ADD COLUMN utility_score REAL DEFAULT 0.0;
ALTER TABLE knowledge_nodes ADD COLUMN times_retrieved INTEGER DEFAULT 0;
ALTER TABLE knowledge_nodes ADD COLUMN times_useful INTEGER DEFAULT 0;
-- Retention trend tracking (for retention target system)
CREATE TABLE IF NOT EXISTS retention_snapshots (
id INTEGER PRIMARY KEY AUTOINCREMENT,
snapshot_at TEXT NOT NULL,
avg_retention REAL NOT NULL,
total_memories INTEGER NOT NULL,
memories_below_target INTEGER NOT NULL DEFAULT 0,
gc_triggered BOOLEAN DEFAULT FALSE
);
CREATE INDEX IF NOT EXISTS idx_retention_snapshots_at ON retention_snapshots(snapshot_at);
UPDATE schema_version SET version = 8, applied_at = datetime('now');
"#;
/// Get current schema version from database
pub fn get_current_version(conn: &rusqlite::Connection) -> rusqlite::Result<u32> {
conn.query_row(
@ -498,6 +575,14 @@ pub fn apply_migrations(conn: &rusqlite::Connection) -> rusqlite::Result<u32> {
// Use execute_batch to handle multi-statement SQL including triggers
conn.execute_batch(migration.up)?;
// V7: Upgrade page_size to 8192 (10-30% faster large-row reads)
// VACUUM rewrites the DB with the new page size — can't run inside execute_batch
if migration.version == 7 {
conn.pragma_update(None, "page_size", 8192)?;
conn.execute_batch("VACUUM;")?;
tracing::info!("Database page_size upgraded to 8192 via VACUUM");
}
applied += 1;
}
}

View file

@ -11,6 +11,6 @@ mod sqlite;
pub use migrations::MIGRATIONS;
pub use sqlite::{
ConsolidationHistoryRecord, DreamHistoryRecord, InsightRecord, IntentionRecord, Result,
SmartIngestResult, StateTransitionRecord, Storage, StorageError,
ConnectionRecord, ConsolidationHistoryRecord, DreamHistoryRecord, InsightRecord,
IntentionRecord, Result, SmartIngestResult, StateTransitionRecord, Storage, StorageError,
};

File diff suppressed because it is too large Load diff