mirror of
https://github.com/samvallad33/vestige.git
synced 2026-05-09 07:42:37 +02:00
First AI memory system to model forgetting as a neuroscience-grounded PROCESS rather than passive decay. Adds the `suppress` MCP tool (#24), Rac1 cascade worker, migration V10, and dashboard forgetting indicators. Based on: - Anderson, Hanslmayr & Quaegebeur (2025), Nat Rev Neurosci — right lateral PFC as the domain-general inhibitory controller; SIF compounds with each stopping attempt. - Cervantes-Sandoval et al. (2020), Front Cell Neurosci PMC7477079 — Rac1 GTPase as the active synaptic destabilization mechanism. What's new: * `suppress` MCP tool — each call compounds `suppression_count` and subtracts a `0.15 × count` penalty (saturating at 80%) from retrieval scores during hybrid search. Distinct from delete (removes) and demote (one-shot). * Rac1 cascade worker — background sweep piggybacks the 6h consolidation loop, walks `memory_connections` edges from recently-suppressed seeds, applies attenuated FSRS decay to co-activated neighbors. You don't just forget Jake — you fade the café, the roommate, the birthday. * 24h labile window — reversible via `suppress({id, reverse: true})` within 24 hours. Matches Nader reconsolidation semantics. * Migration V10 — additive-only (`suppression_count`, `suppressed_at` + partial indices). All v2.0.x DBs upgrade seamlessly on first launch. * Dashboard: `ForgettingIndicator.svelte` pulses when suppressions are active. 3D graph nodes dim to 20% opacity when suppressed. New WebSocket events: `MemorySuppressed`, `MemoryUnsuppressed`, `Rac1CascadeSwept`. Heartbeat carries `suppressed_count`. * Search pipeline: SIF penalty inserted into the accessibility stage so it stacks on top of passive FSRS decay. * Tool count bumped 23 → 24. Cognitive modules 29 → 30. Memories persist — they are INHIBITED, not erased. `memory.get(id)` returns full content through any number of suppressions. The 24h labile window is a grace period for regret. Also fixes issue #31 (dashboard graph view buggy) as a companion UI bug discovered during the v2.0.5 audit cycle: * Root cause: node glow `SpriteMaterial` had no `map`, so `THREE.Sprite` rendered as a solid-coloured 1×1 plane. Additive blending + `UnrealBloomPass(0.8, 0.4, 0.85)` amplified the square edges into hard-edged glowing cubes. * Fix: shared 128×128 radial-gradient `CanvasTexture` singleton used as the sprite map. Retuned bloom to `(0.55, 0.6, 0.2)`. Halved fog density (0.008 → 0.0035). Edges bumped from dark navy `0x4a4a7a` to brand violet `0x8b5cf6` with higher opacity. Added explicit `scene.background` and a 2000-point starfield for depth. * 21 regression tests added in `ui-fixes.test.ts` locking every invariant in (shared texture singleton, depthWrite:false, scale ×6, bloom magic numbers via source regex, starfield presence). Tests: 1,284 Rust (+47) + 171 Vitest (+21) = 1,455 total, 0 failed Clippy: clean across all targets, zero warnings Release binary: 22.6MB, `cargo build --release -p vestige-mcp` green Versions: workspace aligned at 2.0.5 across all 6 crates/packages Closes #31
184 lines
5.4 KiB
Rust
184 lines
5.4 KiB
Rust
//! Search Tools (Deprecated - use search_unified instead)
|
|
//!
|
|
//! Semantic and hybrid search implementations.
|
|
|
|
use serde::Deserialize;
|
|
use serde_json::Value;
|
|
use std::sync::Arc;
|
|
|
|
use vestige_core::Storage;
|
|
|
|
/// Input schema for semantic_search tool
|
|
pub fn semantic_schema() -> Value {
|
|
serde_json::json!({
|
|
"type": "object",
|
|
"properties": {
|
|
"query": {
|
|
"type": "string",
|
|
"description": "Search query for semantic similarity"
|
|
},
|
|
"limit": {
|
|
"type": "integer",
|
|
"description": "Maximum number of results (default: 10)",
|
|
"default": 10,
|
|
"minimum": 1,
|
|
"maximum": 50
|
|
},
|
|
"min_similarity": {
|
|
"type": "number",
|
|
"description": "Minimum similarity threshold (0.0-1.0, default: 0.5)",
|
|
"default": 0.5,
|
|
"minimum": 0.0,
|
|
"maximum": 1.0
|
|
}
|
|
},
|
|
"required": ["query"]
|
|
})
|
|
}
|
|
|
|
/// Input schema for hybrid_search tool
|
|
pub fn hybrid_schema() -> Value {
|
|
serde_json::json!({
|
|
"type": "object",
|
|
"properties": {
|
|
"query": {
|
|
"type": "string",
|
|
"description": "Search query"
|
|
},
|
|
"limit": {
|
|
"type": "integer",
|
|
"description": "Maximum number of results (default: 10)",
|
|
"default": 10,
|
|
"minimum": 1,
|
|
"maximum": 50
|
|
},
|
|
"keyword_weight": {
|
|
"type": "number",
|
|
"description": "Weight for keyword search (0.0-1.0, default: 0.5)",
|
|
"default": 0.5,
|
|
"minimum": 0.0,
|
|
"maximum": 1.0
|
|
},
|
|
"semantic_weight": {
|
|
"type": "number",
|
|
"description": "Weight for semantic search (0.0-1.0, default: 0.5)",
|
|
"default": 0.5,
|
|
"minimum": 0.0,
|
|
"maximum": 1.0
|
|
}
|
|
},
|
|
"required": ["query"]
|
|
})
|
|
}
|
|
|
|
#[derive(Debug, Deserialize)]
|
|
#[serde(rename_all = "camelCase")]
|
|
struct SemanticSearchArgs {
|
|
query: String,
|
|
limit: Option<i32>,
|
|
min_similarity: Option<f32>,
|
|
}
|
|
|
|
#[derive(Debug, Deserialize)]
|
|
#[serde(rename_all = "camelCase")]
|
|
struct HybridSearchArgs {
|
|
query: String,
|
|
limit: Option<i32>,
|
|
keyword_weight: Option<f32>,
|
|
semantic_weight: Option<f32>,
|
|
}
|
|
|
|
pub async fn execute_semantic(
|
|
storage: &Arc<Storage>,
|
|
args: Option<Value>,
|
|
) -> Result<Value, String> {
|
|
let args: SemanticSearchArgs = match args {
|
|
Some(v) => serde_json::from_value(v).map_err(|e| format!("Invalid arguments: {}", e))?,
|
|
None => return Err("Missing arguments".to_string()),
|
|
};
|
|
|
|
if args.query.trim().is_empty() {
|
|
return Err("Query cannot be empty".to_string());
|
|
}
|
|
|
|
// Check if embeddings are ready
|
|
if !storage.is_embedding_ready() {
|
|
return Ok(serde_json::json!({
|
|
"error": "Embedding service not ready",
|
|
"hint": "Run consolidation first to initialize embeddings, or the model may still be loading.",
|
|
}));
|
|
}
|
|
|
|
let results = storage
|
|
.semantic_search(
|
|
&args.query,
|
|
args.limit.unwrap_or(10).clamp(1, 50),
|
|
args.min_similarity.unwrap_or(0.5).clamp(0.0, 1.0),
|
|
)
|
|
.map_err(|e| e.to_string())?;
|
|
|
|
let formatted: Vec<Value> = results
|
|
.iter()
|
|
.map(|r| {
|
|
serde_json::json!({
|
|
"id": r.node.id,
|
|
"content": r.node.content,
|
|
"similarity": r.similarity,
|
|
"nodeType": r.node.node_type,
|
|
"tags": r.node.tags,
|
|
"retentionStrength": r.node.retention_strength,
|
|
})
|
|
})
|
|
.collect();
|
|
|
|
Ok(serde_json::json!({
|
|
"query": args.query,
|
|
"method": "semantic",
|
|
"total": formatted.len(),
|
|
"results": formatted,
|
|
}))
|
|
}
|
|
|
|
pub async fn execute_hybrid(storage: &Arc<Storage>, args: Option<Value>) -> Result<Value, String> {
|
|
let args: HybridSearchArgs = match args {
|
|
Some(v) => serde_json::from_value(v).map_err(|e| format!("Invalid arguments: {}", e))?,
|
|
None => return Err("Missing arguments".to_string()),
|
|
};
|
|
|
|
if args.query.trim().is_empty() {
|
|
return Err("Query cannot be empty".to_string());
|
|
}
|
|
|
|
let results = storage
|
|
.hybrid_search(
|
|
&args.query,
|
|
args.limit.unwrap_or(10).clamp(1, 50),
|
|
args.keyword_weight.unwrap_or(0.3).clamp(0.0, 1.0),
|
|
args.semantic_weight.unwrap_or(0.7).clamp(0.0, 1.0),
|
|
)
|
|
.map_err(|e| e.to_string())?;
|
|
|
|
let formatted: Vec<Value> = results
|
|
.iter()
|
|
.map(|r| {
|
|
serde_json::json!({
|
|
"id": r.node.id,
|
|
"content": r.node.content,
|
|
"combinedScore": r.combined_score,
|
|
"keywordScore": r.keyword_score,
|
|
"semanticScore": r.semantic_score,
|
|
"matchType": format!("{:?}", r.match_type),
|
|
"nodeType": r.node.node_type,
|
|
"tags": r.node.tags,
|
|
"retentionStrength": r.node.retention_strength,
|
|
})
|
|
})
|
|
.collect();
|
|
|
|
Ok(serde_json::json!({
|
|
"query": args.query,
|
|
"method": "hybrid",
|
|
"total": formatted.len(),
|
|
"results": formatted,
|
|
}))
|
|
}
|