diff --git a/Cargo.lock b/Cargo.lock index 794a880..69f1673 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3655,7 +3655,7 @@ checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "vestige-core" -version = "1.5.0" +version = "1.6.0" dependencies = [ "chrono", "directories", @@ -3689,7 +3689,7 @@ dependencies = [ [[package]] name = "vestige-mcp" -version = "1.5.0" +version = "1.6.0" dependencies = [ "anyhow", "axum", diff --git a/Cargo.toml b/Cargo.toml index 63ddea3..92c981b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ members = [ ] [workspace.package] -version = "1.5.0" +version = "1.6.0" edition = "2024" license = "AGPL-3.0-only" repository = "https://github.com/samvallad33/vestige" diff --git a/crates/vestige-core/Cargo.toml b/crates/vestige-core/Cargo.toml index 7312e93..59e486f 100644 --- a/crates/vestige-core/Cargo.toml +++ b/crates/vestige-core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "vestige-core" -version = "1.5.0" +version = "1.6.0" edition = "2024" rust-version = "1.85" authors = ["Vestige Team"] diff --git a/crates/vestige-core/src/advanced/adaptive_embedding.rs b/crates/vestige-core/src/advanced/adaptive_embedding.rs index 91877b7..d4b39d3 100644 --- a/crates/vestige-core/src/advanced/adaptive_embedding.rs +++ b/crates/vestige-core/src/advanced/adaptive_embedding.rs @@ -31,13 +31,11 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; -/// Default embedding dimensions (BGE-base-en-v1.5: 768d, upgraded from MiniLM 384d) -/// 2026 GOD TIER UPGRADE: +30% retrieval accuracy -pub const DEFAULT_DIMENSIONS: usize = 768; +/// Default embedding dimensions after Matryoshka truncation (768 → 256) +pub const DEFAULT_DIMENSIONS: usize = 256; -/// Code embedding dimensions (when using code-specific models) -/// Now matches default since we upgraded to 768d -pub const CODE_DIMENSIONS: usize = 768; +/// Code embedding dimensions (matches default after Matryoshka truncation) +pub const CODE_DIMENSIONS: usize = 256; /// Supported programming languages for code embeddings #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] diff --git a/crates/vestige-core/src/embeddings/local.rs b/crates/vestige-core/src/embeddings/local.rs index 30a5415..9b67629 100644 --- a/crates/vestige-core/src/embeddings/local.rs +++ b/crates/vestige-core/src/embeddings/local.rs @@ -18,9 +18,10 @@ use std::sync::{Mutex, OnceLock}; // CONSTANTS // ============================================================================ -/// Embedding dimensions for the default model (nomic-embed-text-v1.5) -/// 768 dimensions with Matryoshka support (can truncate to 256/512 if needed) -pub const EMBEDDING_DIMENSIONS: usize = 768; +/// Embedding dimensions after Matryoshka truncation +/// Truncated from 768 → 256 for 3x storage savings with only ~2% quality loss +/// (Matryoshka Representation Learning — the first N dims ARE the N-dim representation) +pub const EMBEDDING_DIMENSIONS: usize = 256; /// Maximum text length for embedding (truncated if longer) pub const MAX_TEXT_LENGTH: usize = 8192; @@ -277,7 +278,7 @@ impl EmbeddingService { )); } - Ok(Embedding::new(embeddings[0].clone())) + Ok(Embedding::new(matryoshka_truncate(embeddings[0].clone()))) } /// Generate embeddings for multiple texts (batch processing) @@ -307,7 +308,7 @@ impl EmbeddingService { .map_err(|e| EmbeddingError::EmbeddingFailed(e.to_string()))?; for emb in embeddings { - all_embeddings.push(Embedding::new(emb)); + all_embeddings.push(Embedding::new(matryoshka_truncate(emb))); } } @@ -338,6 +339,26 @@ impl EmbeddingService { // SIMILARITY FUNCTIONS // ============================================================================ +/// Apply Matryoshka truncation: truncate to EMBEDDING_DIMENSIONS and L2-normalize +/// +/// Nomic Embed v1.5 supports Matryoshka Representation Learning, +/// meaning the first N dimensions of the 768-dim output ARE a valid +/// N-dimensional embedding with minimal quality loss (~2% on MTEB for 256-dim). +#[inline] +pub fn matryoshka_truncate(mut vector: Vec) -> Vec { + if vector.len() > EMBEDDING_DIMENSIONS { + vector.truncate(EMBEDDING_DIMENSIONS); + } + // L2-normalize the truncated vector + let norm = vector.iter().map(|x| x * x).sum::().sqrt(); + if norm > 0.0 { + for x in &mut vector { + *x /= norm; + } + } + vector +} + /// Compute cosine similarity between two vectors #[inline] pub fn cosine_similarity(a: &[f32], b: &[f32]) -> f32 { diff --git a/crates/vestige-core/src/embeddings/mod.rs b/crates/vestige-core/src/embeddings/mod.rs index 2dec019..fcbb9d1 100644 --- a/crates/vestige-core/src/embeddings/mod.rs +++ b/crates/vestige-core/src/embeddings/mod.rs @@ -14,8 +14,8 @@ mod hybrid; mod local; pub use local::{ - cosine_similarity, dot_product, euclidean_distance, Embedding, EmbeddingError, - EmbeddingService, BATCH_SIZE, EMBEDDING_DIMENSIONS, MAX_TEXT_LENGTH, + cosine_similarity, dot_product, euclidean_distance, matryoshka_truncate, Embedding, + EmbeddingError, EmbeddingService, BATCH_SIZE, EMBEDDING_DIMENSIONS, MAX_TEXT_LENGTH, }; pub use code::CodeEmbedding; diff --git a/crates/vestige-core/src/search/hybrid.rs b/crates/vestige-core/src/search/hybrid.rs index bfaf908..33c492a 100644 --- a/crates/vestige-core/src/search/hybrid.rs +++ b/crates/vestige-core/src/search/hybrid.rs @@ -117,8 +117,8 @@ pub struct HybridSearchConfig { impl Default for HybridSearchConfig { fn default() -> Self { Self { - keyword_weight: 0.5, - semantic_weight: 0.5, + keyword_weight: 0.3, + semantic_weight: 0.7, rrf_k: 60.0, min_semantic_similarity: 0.3, source_limit_multiplier: 2, diff --git a/crates/vestige-core/src/search/reranker.rs b/crates/vestige-core/src/search/reranker.rs index 55d58ca..5f1058a 100644 --- a/crates/vestige-core/src/search/reranker.rs +++ b/crates/vestige-core/src/search/reranker.rs @@ -1,14 +1,17 @@ //! Memory Reranking Module //! -//! ## GOD TIER 2026: Two-Stage Retrieval +//! ## Two-Stage Retrieval with Cross-Encoder //! -//! Uses fastembed's reranking model to improve precision: -//! 1. Stage 1: Retrieve top-50 candidates (fast, high recall) -//! 2. Stage 2: Rerank to find best top-10 (slower, high precision) +//! Uses fastembed's Jina Reranker v1 Turbo (38M params) cross-encoder +//! for high-precision reranking: +//! 1. Stage 1: Retrieve top-50 candidates via hybrid search (fast, high recall) +//! 2. Stage 2: Cross-encoder rerank to find best top-10 (slower, high precision) //! -//! This gives +15-20% retrieval precision on complex queries. +//! Falls back to BM25-like term overlap scoring when the cross-encoder +//! model is unavailable. -// Note: Mutex and OnceLock are reserved for future cross-encoder model implementation +#[cfg(feature = "embeddings")] +use fastembed::{RerankInitOptions, RerankerModel, TextRerank}; // ============================================================================ // CONSTANTS @@ -83,21 +86,15 @@ impl Default for RerankerConfig { } } -/// Service for reranking search results +/// Service for reranking search results using a cross-encoder model /// -/// ## Usage -/// -/// ```rust,ignore -/// let reranker = Reranker::new(RerankerConfig::default()); -/// -/// // Get initial candidates (fast, recall-focused) -/// let candidates = storage.hybrid_search(query, 50)?; -/// -/// // Rerank for precision -/// let reranked = reranker.rerank(query, candidates, 10)?; -/// ``` +/// When the `embeddings` feature is enabled and `init_cross_encoder()` is called, +/// uses Jina Reranker v1 Turbo for neural cross-encoder scoring. +/// Falls back to BM25-like term overlap when the model is unavailable. pub struct Reranker { config: RerankerConfig, + #[cfg(feature = "embeddings")] + cross_encoder: Option, } impl Default for Reranker { @@ -108,24 +105,61 @@ impl Default for Reranker { impl Reranker { /// Create a new reranker with the given configuration + /// + /// The cross-encoder model is NOT loaded here — call `init_cross_encoder()` + /// explicitly to load it. This keeps construction fast and test-friendly. pub fn new(config: RerankerConfig) -> Self { - Self { config } + Self { + config, + #[cfg(feature = "embeddings")] + cross_encoder: None, + } + } + + /// Initialize the cross-encoder model (Jina Reranker v1 Turbo, ~150MB) + /// + /// Downloads the model on first call. Call this during server startup, + /// NOT in tests or hot paths. + #[cfg(feature = "embeddings")] + pub fn init_cross_encoder(&mut self) { + if self.cross_encoder.is_some() { + return; // Already initialized + } + + let options = RerankInitOptions::new(RerankerModel::JINARerankerV1TurboEn) + .with_show_download_progress(true); + + match TextRerank::try_new(options) { + Ok(model) => { + eprintln!("[vestige] Cross-encoder reranker loaded (Jina Reranker v1 Turbo)"); + self.cross_encoder = Some(model); + } + Err(e) => { + eprintln!("[vestige] Cross-encoder unavailable, using BM25 fallback: {e}"); + } + } + } + + /// Check if the cross-encoder model is available + pub fn has_cross_encoder(&self) -> bool { + #[cfg(feature = "embeddings")] + { + self.cross_encoder.is_some() + } + #[cfg(not(feature = "embeddings"))] + { + false + } } /// Rerank candidates based on relevance to the query /// - /// This uses a cross-encoder model for more accurate relevance scoring - /// than the initial bi-encoder embedding similarity. - /// - /// ## Algorithm - /// - /// 1. Score each (query, candidate) pair using cross-encoder - /// 2. Sort by score descending - /// 3. Return top-k results + /// Uses cross-encoder model when available for neural relevance scoring. + /// Falls back to BM25-like term overlap scoring otherwise. pub fn rerank( - &self, + &mut self, query: &str, - candidates: Vec<(T, String)>, // (item, text content) + candidates: Vec<(T, String)>, top_k: Option, ) -> Result>, RerankerError> { if query.is_empty() { @@ -138,15 +172,43 @@ impl Reranker { let limit = top_k.unwrap_or(self.config.result_count); - // For now, use a simplified scoring approach based on text similarity - // In a full implementation, this would use fastembed's RerankerModel - // when it becomes available in the public API + // Try cross-encoder first + #[cfg(feature = "embeddings")] + if let Some(ref mut model) = self.cross_encoder { + let documents: Vec<&str> = candidates.iter().map(|(_, text)| text.as_str()).collect(); + + if let Ok(rerank_results) = model.rerank(query, &documents, false, None) { + let mut results: Vec> = rerank_results + .into_iter() + .filter_map(|rr| { + candidates.get(rr.index).map(|(item, _)| RerankedResult { + item: item.clone(), + score: rr.score, + original_rank: rr.index, + }) + }) + .collect(); + + results.sort_by(|a, b| { + b.score.partial_cmp(&a.score).unwrap_or(std::cmp::Ordering::Equal) + }); + + if let Some(min_score) = self.config.min_score { + results.retain(|r| r.score >= min_score); + } + + results.truncate(limit); + return Ok(results); + } + // Cross-encoder failed on this call — fall through to BM25 fallback + } + + // Fallback: BM25-like scoring let mut results: Vec> = candidates .into_iter() .enumerate() .map(|(rank, (item, text))| { - // Simple BM25-like scoring based on term overlap - let score = self.compute_relevance_score(query, &text); + let score = Self::compute_relevance_score(query, &text); RerankedResult { item, score, @@ -155,25 +217,19 @@ impl Reranker { }) .collect(); - // Sort by score descending results.sort_by(|a, b| b.score.partial_cmp(&a.score).unwrap_or(std::cmp::Ordering::Equal)); - // Apply minimum score filter if let Some(min_score) = self.config.min_score { results.retain(|r| r.score >= min_score); } - // Take top-k results.truncate(limit); Ok(results) } - /// Compute relevance score between query and document - /// - /// This is a simplified BM25-inspired scoring function. - /// A full implementation would use a cross-encoder model. - fn compute_relevance_score(&self, query: &str, document: &str) -> f32 { + /// BM25-inspired term overlap scoring (fallback when cross-encoder unavailable) + fn compute_relevance_score(query: &str, document: &str) -> f32 { let query_lower = query.to_lowercase(); let query_terms: Vec<&str> = query_lower.split_whitespace().collect(); let doc_lower = document.to_lowercase(); @@ -184,22 +240,19 @@ impl Reranker { } let mut score = 0.0; - let k1 = 1.2_f32; // BM25 parameter - let b = 0.75_f32; // BM25 parameter - let avg_doc_len = 500.0_f32; // Assumed average document length + let k1 = 1.2_f32; + let b = 0.75_f32; + let avg_doc_len = 500.0_f32; for term in &query_terms { - // Count term frequency let tf = doc_lower.matches(term).count() as f32; if tf > 0.0 { - // BM25-like term frequency saturation let numerator = tf * (k1 + 1.0); let denominator = tf + k1 * (1.0 - b + b * (doc_len / avg_doc_len)); score += numerator / denominator; } } - // Normalize by query length if !query_terms.is_empty() { score /= query_terms.len() as f32; } @@ -223,7 +276,7 @@ mod tests { #[test] fn test_rerank_basic() { - let reranker = Reranker::default(); + let mut reranker = Reranker::default(); let candidates = vec![ (1, "The quick brown fox".to_string()), @@ -234,13 +287,12 @@ mod tests { let results = reranker.rerank("fox", candidates, Some(2)).unwrap(); assert_eq!(results.len(), 2); - // Results with "fox" should be ranked higher assert!(results[0].item == 1 || results[0].item == 3); } #[test] fn test_rerank_empty_candidates() { - let reranker = Reranker::default(); + let mut reranker = Reranker::default(); let candidates: Vec<(i32, String)> = vec![]; let results = reranker.rerank("query", candidates, Some(5)).unwrap(); @@ -249,7 +301,7 @@ mod tests { #[test] fn test_rerank_empty_query() { - let reranker = Reranker::default(); + let mut reranker = Reranker::default(); let candidates = vec![(1, "some text".to_string())]; let result = reranker.rerank("", candidates, Some(5)); @@ -258,22 +310,28 @@ mod tests { #[test] fn test_min_score_filter() { - let reranker = Reranker::new(RerankerConfig { + let mut reranker = Reranker::new(RerankerConfig { min_score: Some(0.5), ..Default::default() }); let candidates = vec![ - (1, "fox fox fox".to_string()), // High relevance - (2, "completely unrelated".to_string()), // Low relevance + (1, "fox fox fox".to_string()), + (2, "completely unrelated".to_string()), ]; let results = reranker.rerank("fox", candidates, None).unwrap(); - // Only high-relevance results should pass the filter assert!(results.len() <= 2); if !results.is_empty() { assert!(results[0].score >= 0.5); } } + + #[test] + fn test_default_has_no_cross_encoder() { + let reranker = Reranker::default(); + // Default constructor does NOT load the model — fast and test-friendly + assert!(!reranker.has_cross_encoder()); + } } diff --git a/crates/vestige-core/src/search/vector.rs b/crates/vestige-core/src/search/vector.rs index 35b6db0..5ada474 100644 --- a/crates/vestige-core/src/search/vector.rs +++ b/crates/vestige-core/src/search/vector.rs @@ -17,9 +17,9 @@ use usearch::{Index, IndexOptions, MetricKind, ScalarKind}; // CONSTANTS // ============================================================================ -/// Default embedding dimensions (BGE-base-en-v1.5: 768d) -/// 2026 GOD TIER UPGRADE: +30% retrieval accuracy over MiniLM (384d) -pub const DEFAULT_DIMENSIONS: usize = 768; +/// Default embedding dimensions after Matryoshka truncation (768 → 256) +/// 3x storage savings with only ~2% quality loss on MTEB benchmarks +pub const DEFAULT_DIMENSIONS: usize = 256; /// HNSW connectivity parameter (higher = better recall, more memory) pub const DEFAULT_CONNECTIVITY: usize = 16; @@ -137,7 +137,7 @@ impl VectorIndex { let options = IndexOptions { dimensions: config.dimensions, metric: config.metric, - quantization: ScalarKind::F32, + quantization: ScalarKind::F16, connectivity: config.connectivity, expansion_add: config.expansion_add, expansion_search: config.expansion_search, @@ -325,7 +325,7 @@ impl VectorIndex { let options = IndexOptions { dimensions: config.dimensions, metric: config.metric, - quantization: ScalarKind::F32, + quantization: ScalarKind::F16, connectivity: config.connectivity, expansion_add: config.expansion_add, expansion_search: config.expansion_search, diff --git a/crates/vestige-core/src/storage/sqlite.rs b/crates/vestige-core/src/storage/sqlite.rs index 143f4e6..4d9733a 100644 --- a/crates/vestige-core/src/storage/sqlite.rs +++ b/crates/vestige-core/src/storage/sqlite.rs @@ -22,10 +22,10 @@ use crate::memory::{ use crate::search::sanitize_fts5_query; #[cfg(feature = "embeddings")] -use crate::embeddings::{Embedding, EmbeddingService, EMBEDDING_DIMENSIONS}; +use crate::embeddings::{matryoshka_truncate, Embedding, EmbeddingService, EMBEDDING_DIMENSIONS}; #[cfg(feature = "vector-search")] -use crate::search::{reciprocal_rank_fusion, VectorIndex}; +use crate::search::{linear_combination, VectorIndex}; // ============================================================================ // ERROR TYPES @@ -202,7 +202,13 @@ impl Storage { for (node_id, embedding_bytes) in embeddings { if let Some(embedding) = Embedding::from_bytes(&embedding_bytes) { - if let Err(e) = index.add(&node_id, &embedding.vector) { + // Handle Matryoshka migration: old 768-dim → truncate to 256-dim + let vector = if embedding.dimensions != EMBEDDING_DIMENSIONS { + matryoshka_truncate(embedding.vector) + } else { + embedding.vector + }; + if let Err(e) = index.add(&node_id, &vector) { tracing::warn!("Failed to load embedding for {}: {}", node_id, e); } } @@ -690,7 +696,7 @@ impl Storage { } #[cfg(all(feature = "embeddings", feature = "vector-search"))] SearchMode::Hybrid => { - let results = self.hybrid_search(&input.query, input.limit, 0.5, 0.5)?; + let results = self.hybrid_search(&input.query, input.limit, 0.3, 0.7)?; results.into_iter().map(|r| r.node).collect() } #[cfg(not(all(feature = "embeddings", feature = "vector-search")))] @@ -1257,7 +1263,7 @@ impl Storage { }; let combined = if !semantic_results.is_empty() { - reciprocal_rank_fusion(&keyword_results, &semantic_results, 60.0) + linear_combination(&keyword_results, &semantic_results, keyword_weight, semantic_weight) } else { keyword_results.clone() }; diff --git a/crates/vestige-mcp/Cargo.toml b/crates/vestige-mcp/Cargo.toml index 0f1ce4d..a66cdef 100644 --- a/crates/vestige-mcp/Cargo.toml +++ b/crates/vestige-mcp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "vestige-mcp" -version = "1.5.0" +version = "1.6.0" edition = "2024" description = "Cognitive memory MCP server for Claude - FSRS-6, spreading activation, synaptic tagging, and 130 years of memory research" authors = ["samvallad33"] diff --git a/crates/vestige-mcp/src/cognitive.rs b/crates/vestige-mcp/src/cognitive.rs index 7feb33e..34e04af 100644 --- a/crates/vestige-mcp/src/cognitive.rs +++ b/crates/vestige-mcp/src/cognitive.rs @@ -64,7 +64,7 @@ pub struct CognitiveEngine { impl CognitiveEngine { /// Initialize all cognitive modules with default configurations. pub fn new() -> Self { - Self { + let engine = Self { // Neuroscience activation_network: ActivationNetwork::new(), synaptic_tagging: SynapticTaggingSystem::new(), @@ -98,6 +98,8 @@ impl CognitiveEngine { // Search reranker: Reranker::new(RerankerConfig::default()), temporal_searcher: TemporalSearcher::new(), - } + }; + + engine } } diff --git a/crates/vestige-mcp/src/dashboard/handlers.rs b/crates/vestige-mcp/src/dashboard/handlers.rs index 17b3f93..ffb11be 100644 --- a/crates/vestige-mcp/src/dashboard/handlers.rs +++ b/crates/vestige-mcp/src/dashboard/handlers.rs @@ -38,7 +38,7 @@ pub async fn list_memories( { // Use hybrid search let results = storage - .hybrid_search(query, limit, 0.5, 0.5) + .hybrid_search(query, limit, 0.3, 0.7) .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; let formatted: Vec = results diff --git a/crates/vestige-mcp/src/main.rs b/crates/vestige-mcp/src/main.rs index e1329aa..e7e7697 100644 --- a/crates/vestige-mcp/src/main.rs +++ b/crates/vestige-mcp/src/main.rs @@ -243,6 +243,18 @@ async fn main() { let cognitive = Arc::new(Mutex::new(cognitive::CognitiveEngine::new())); info!("CognitiveEngine initialized (26 modules)"); + // Load cross-encoder reranker in the background (downloads ~150MB on first run) + #[cfg(feature = "embeddings")] + { + let cog_clone = Arc::clone(&cognitive); + tokio::spawn(async move { + // Small delay so we don't block the stdio handshake + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + let mut cog = cog_clone.lock().await; + cog.reranker.init_cross_encoder(); + }); + } + // Create MCP server let server = McpServer::new(storage, cognitive); diff --git a/crates/vestige-mcp/src/tools/search.rs b/crates/vestige-mcp/src/tools/search.rs index ccbe27d..d72b264 100644 --- a/crates/vestige-mcp/src/tools/search.rs +++ b/crates/vestige-mcp/src/tools/search.rs @@ -162,8 +162,8 @@ pub async fn execute_hybrid( .hybrid_search( &args.query, args.limit.unwrap_or(10).clamp(1, 50), - args.keyword_weight.unwrap_or(0.5).clamp(0.0, 1.0), - args.semantic_weight.unwrap_or(0.5).clamp(0.0, 1.0), + args.keyword_weight.unwrap_or(0.3).clamp(0.0, 1.0), + args.semantic_weight.unwrap_or(0.7).clamp(0.0, 1.0), ) .map_err(|e| e.to_string())?; diff --git a/crates/vestige-mcp/src/tools/search_unified.rs b/crates/vestige-mcp/src/tools/search_unified.rs index eba5423..b133b1f 100644 --- a/crates/vestige-mcp/src/tools/search_unified.rs +++ b/crates/vestige-mcp/src/tools/search_unified.rs @@ -127,9 +127,9 @@ pub async fn execute( let min_retention = args.min_retention.unwrap_or(0.0).clamp(0.0, 1.0); let min_similarity = args.min_similarity.unwrap_or(0.5).clamp(0.0, 1.0); - // Use balanced weights for hybrid search (keyword + semantic) - let keyword_weight = 0.5_f32; - let semantic_weight = 0.5_f32; + // Favor semantic search — research shows 0.3/0.7 outperforms equal weights + let keyword_weight = 0.3_f32; + let semantic_weight = 0.7_f32; // ==================================================================== // STAGE 1: Hybrid search with 3x over-fetch for reranking pool @@ -160,7 +160,7 @@ pub async fn execute( // ==================================================================== // STAGE 2: Reranker (BM25-like rescoring, trim to requested limit) // ==================================================================== - if let Ok(cog) = cognitive.try_lock() { + if let Ok(mut cog) = cognitive.try_lock() { let candidates: Vec<_> = filtered_results .iter() .map(|r| (r.clone(), r.node.content.clone())) diff --git a/package.json b/package.json index bd1ee40..bdd7fe1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "vestige", - "version": "1.5.0", + "version": "1.6.0", "private": true, "description": "Cognitive memory for AI - MCP server with FSRS-6 spaced repetition", "author": "Sam Valladares", diff --git a/packages/vestige-init/package.json b/packages/vestige-init/package.json index 710ea45..05d3bc6 100644 --- a/packages/vestige-init/package.json +++ b/packages/vestige-init/package.json @@ -1,6 +1,6 @@ { "name": "@vestige/init", - "version": "1.5.0", + "version": "1.6.0", "description": "Give your AI a brain in 10 seconds — zero-config Vestige installer", "bin": { "vestige-init": "bin/init.js" diff --git a/packages/vestige-mcp-npm/package.json b/packages/vestige-mcp-npm/package.json index 415a0c8..418d97a 100644 --- a/packages/vestige-mcp-npm/package.json +++ b/packages/vestige-mcp-npm/package.json @@ -1,6 +1,6 @@ { "name": "vestige-mcp-server", - "version": "1.5.0", + "version": "1.6.0", "description": "Vestige MCP Server - AI Memory System for Claude and other assistants", "bin": { "vestige-mcp": "bin/vestige-mcp.js",