feat: Vestige v1.2.0 — dashboard, temporal tools, maintenance tools, detail levels

Add web dashboard (axum) on port 3927 with memory browser, search, and
system stats. New MCP tools: memory_timeline, memory_changelog,
health_check, consolidate, stats, backup, export, gc. Search now supports
detail_level (brief/summary/full) to control token usage. Add backup_to()
and get_recent_state_transitions() to storage layer. Bump to v1.2.0.
This commit is contained in:
Sam Valladares 2026-02-12 04:33:05 -06:00
parent a92fb2b6ed
commit 34f5e8d52a
18 changed files with 2850 additions and 25 deletions

View file

@ -0,0 +1,191 @@
//! Memory Changelog Tool
//!
//! View audit trail of memory changes.
//! Per-memory mode: state transitions for a single memory.
//! System-wide mode: consolidations + recent state transitions.
use chrono::{DateTime, Utc};
use serde::Deserialize;
use serde_json::Value;
use std::sync::Arc;
use tokio::sync::Mutex;
use uuid::Uuid;
use vestige_core::Storage;
/// Input schema for memory_changelog tool
pub fn schema() -> Value {
serde_json::json!({
"type": "object",
"properties": {
"memory_id": {
"type": "string",
"description": "Scope to a single memory's audit trail. If omitted, returns system-wide changelog."
},
"start": {
"type": "string",
"description": "Start of time range (ISO 8601). Only used in system-wide mode."
},
"end": {
"type": "string",
"description": "End of time range (ISO 8601). Only used in system-wide mode."
},
"limit": {
"type": "integer",
"description": "Maximum number of entries (default: 20, max: 100)",
"default": 20,
"minimum": 1,
"maximum": 100
}
}
})
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct ChangelogArgs {
memory_id: Option<String>,
#[allow(dead_code)]
start: Option<String>,
#[allow(dead_code)]
end: Option<String>,
limit: Option<i32>,
}
/// Execute memory_changelog tool
pub async fn execute(
storage: &Arc<Mutex<Storage>>,
args: Option<Value>,
) -> Result<Value, String> {
let args: ChangelogArgs = match args {
Some(v) => serde_json::from_value(v).map_err(|e| format!("Invalid arguments: {}", e))?,
None => ChangelogArgs {
memory_id: None,
start: None,
end: None,
limit: None,
},
};
let limit = args.limit.unwrap_or(20).clamp(1, 100);
let storage = storage.lock().await;
if let Some(ref memory_id) = args.memory_id {
// Per-memory mode: state transitions for a specific memory
execute_per_memory(&storage, memory_id, limit)
} else {
// System-wide mode: consolidations + recent transitions
execute_system_wide(&storage, limit)
}
}
/// Per-memory changelog: state transition audit trail
fn execute_per_memory(
storage: &Storage,
memory_id: &str,
limit: i32,
) -> Result<Value, String> {
// Validate UUID format
Uuid::parse_str(memory_id)
.map_err(|_| format!("Invalid memory_id '{}'. Must be a valid UUID.", memory_id))?;
// Get the memory for context
let node = storage
.get_node(memory_id)
.map_err(|e| e.to_string())?
.ok_or_else(|| format!("Memory '{}' not found.", memory_id))?;
// Get state transitions
let transitions = storage
.get_state_transitions(memory_id, limit)
.map_err(|e| e.to_string())?;
let formatted_transitions: Vec<Value> = transitions
.iter()
.map(|t| {
serde_json::json!({
"fromState": t.from_state,
"toState": t.to_state,
"reasonType": t.reason_type,
"reasonData": t.reason_data,
"timestamp": t.timestamp.to_rfc3339(),
})
})
.collect();
Ok(serde_json::json!({
"tool": "memory_changelog",
"mode": "per_memory",
"memoryId": memory_id,
"memoryContent": node.content,
"memoryType": node.node_type,
"currentRetention": node.retention_strength,
"totalTransitions": formatted_transitions.len(),
"transitions": formatted_transitions,
}))
}
/// System-wide changelog: consolidations + recent state transitions
fn execute_system_wide(
storage: &Storage,
limit: i32,
) -> Result<Value, String> {
// Get consolidation history
let consolidations = storage
.get_consolidation_history(limit)
.map_err(|e| e.to_string())?;
// Get recent state transitions across all memories
let transitions = storage
.get_recent_state_transitions(limit)
.map_err(|e| e.to_string())?;
// Build unified event list
let mut events: Vec<(DateTime<Utc>, Value)> = Vec::new();
for c in &consolidations {
events.push((
c.completed_at,
serde_json::json!({
"type": "consolidation",
"timestamp": c.completed_at.to_rfc3339(),
"durationMs": c.duration_ms,
"memoriesReplayed": c.memories_replayed,
"connectionFound": c.connections_found,
"connectionsStrengthened": c.connections_strengthened,
"connectionsPruned": c.connections_pruned,
"insightsGenerated": c.insights_generated,
}),
));
}
for t in &transitions {
events.push((
t.timestamp,
serde_json::json!({
"type": "state_transition",
"timestamp": t.timestamp.to_rfc3339(),
"memoryId": t.memory_id,
"fromState": t.from_state,
"toState": t.to_state,
"reasonType": t.reason_type,
"reasonData": t.reason_data,
}),
));
}
// Sort by timestamp descending
events.sort_by(|a, b| b.0.cmp(&a.0));
// Truncate to limit
events.truncate(limit as usize);
let formatted_events: Vec<Value> = events.into_iter().map(|(_, v)| v).collect();
Ok(serde_json::json!({
"tool": "memory_changelog",
"mode": "system_wide",
"totalEvents": formatted_events.len(),
"events": formatted_events,
}))
}

View file

@ -0,0 +1,550 @@
//! Maintenance MCP Tools
//!
//! Exposes CLI-only operations as MCP tools so Claude can trigger them automatically:
//! health_check, consolidate, stats, backup, export, gc.
use chrono::{NaiveDate, Utc};
use serde::Deserialize;
use serde_json::Value;
use std::sync::Arc;
use tokio::sync::Mutex;
use vestige_core::Storage;
// ============================================================================
// SCHEMAS
// ============================================================================
pub fn health_check_schema() -> Value {
serde_json::json!({
"type": "object",
"properties": {}
})
}
pub fn consolidate_schema() -> Value {
serde_json::json!({
"type": "object",
"properties": {}
})
}
pub fn stats_schema() -> Value {
serde_json::json!({
"type": "object",
"properties": {}
})
}
pub fn backup_schema() -> Value {
serde_json::json!({
"type": "object",
"properties": {}
})
}
pub fn export_schema() -> Value {
serde_json::json!({
"type": "object",
"properties": {
"format": {
"type": "string",
"description": "Export format: 'json' (default) or 'jsonl'",
"enum": ["json", "jsonl"],
"default": "json"
},
"tags": {
"type": "array",
"items": { "type": "string" },
"description": "Filter by tags (ALL must match)"
},
"since": {
"type": "string",
"description": "Only export memories created after this date (YYYY-MM-DD)"
},
"path": {
"type": "string",
"description": "Custom filename (not path). File is saved in ~/.vestige/exports/. Default: memories-{timestamp}.{format}"
}
}
})
}
pub fn gc_schema() -> Value {
serde_json::json!({
"type": "object",
"properties": {
"min_retention": {
"type": "number",
"description": "Delete memories with retention below this threshold (default: 0.1)",
"default": 0.1,
"minimum": 0.0,
"maximum": 1.0
},
"max_age_days": {
"type": "integer",
"description": "Only delete memories older than this many days (optional additional filter)",
"minimum": 1
},
"dry_run": {
"type": "boolean",
"description": "If true (default), only report what would be deleted without actually deleting",
"default": true
}
}
})
}
// ============================================================================
// EXECUTE FUNCTIONS
// ============================================================================
/// Health check tool
pub async fn execute_health_check(
storage: &Arc<Mutex<Storage>>,
_args: Option<Value>,
) -> Result<Value, String> {
let storage = storage.lock().await;
let stats = storage.get_stats().map_err(|e| e.to_string())?;
let status = if stats.total_nodes == 0 {
"empty"
} else if stats.average_retention < 0.3 {
"critical"
} else if stats.average_retention < 0.5 {
"degraded"
} else {
"healthy"
};
let embedding_coverage = if stats.total_nodes > 0 {
(stats.nodes_with_embeddings as f64 / stats.total_nodes as f64) * 100.0
} else {
0.0
};
let embedding_ready = storage.is_embedding_ready();
let mut warnings = Vec::new();
if stats.average_retention < 0.5 && stats.total_nodes > 0 {
warnings.push("Low average retention - consider running consolidation");
}
if stats.nodes_due_for_review > 10 {
warnings.push("Many memories are due for review");
}
if stats.total_nodes > 0 && stats.nodes_with_embeddings == 0 {
warnings.push("No embeddings generated - semantic search unavailable");
}
if embedding_coverage < 50.0 && stats.total_nodes > 10 {
warnings.push("Low embedding coverage - run consolidate to improve semantic search");
}
let mut recommendations = Vec::new();
if status == "critical" {
recommendations.push("CRITICAL: Many memories have very low retention. Review important memories.");
}
if stats.nodes_due_for_review > 5 {
recommendations.push("Review due memories to strengthen retention.");
}
if stats.nodes_with_embeddings < stats.total_nodes {
recommendations.push("Run 'consolidate' to generate missing embeddings.");
}
if stats.total_nodes > 100 && stats.average_retention < 0.7 {
recommendations.push("Consider running periodic consolidation.");
}
if status == "healthy" && recommendations.is_empty() {
recommendations.push("Memory system is healthy!");
}
Ok(serde_json::json!({
"tool": "health_check",
"status": status,
"totalMemories": stats.total_nodes,
"dueForReview": stats.nodes_due_for_review,
"averageRetention": stats.average_retention,
"embeddingCoverage": format!("{:.1}%", embedding_coverage),
"embeddingReady": embedding_ready,
"warnings": warnings,
"recommendations": recommendations,
}))
}
/// Consolidate tool
pub async fn execute_consolidate(
storage: &Arc<Mutex<Storage>>,
_args: Option<Value>,
) -> Result<Value, String> {
let mut storage = storage.lock().await;
let result = storage.run_consolidation().map_err(|e| e.to_string())?;
Ok(serde_json::json!({
"tool": "consolidate",
"nodesProcessed": result.nodes_processed,
"nodesPromoted": result.nodes_promoted,
"nodesPruned": result.nodes_pruned,
"decayApplied": result.decay_applied,
"embeddingsGenerated": result.embeddings_generated,
"durationMs": result.duration_ms,
}))
}
/// Stats tool
pub async fn execute_stats(
storage: &Arc<Mutex<Storage>>,
_args: Option<Value>,
) -> Result<Value, String> {
let storage = storage.lock().await;
let stats = storage.get_stats().map_err(|e| e.to_string())?;
// Compute state distribution from a sample of nodes
let nodes = storage.get_all_nodes(500, 0).map_err(|e| e.to_string())?;
let total = nodes.len();
let (active, dormant, silent, unavailable) = if total > 0 {
let mut a = 0usize;
let mut d = 0usize;
let mut s = 0usize;
let mut u = 0usize;
for node in &nodes {
let accessibility = node.retention_strength * 0.5
+ node.retrieval_strength * 0.3
+ node.storage_strength * 0.2;
if accessibility >= 0.7 {
a += 1;
} else if accessibility >= 0.4 {
d += 1;
} else if accessibility >= 0.1 {
s += 1;
} else {
u += 1;
}
}
(a, d, s, u)
} else {
(0, 0, 0, 0)
};
let embedding_coverage = if stats.total_nodes > 0 {
(stats.nodes_with_embeddings as f64 / stats.total_nodes as f64) * 100.0
} else {
0.0
};
Ok(serde_json::json!({
"tool": "stats",
"totalMemories": stats.total_nodes,
"dueForReview": stats.nodes_due_for_review,
"averageRetention": stats.average_retention,
"averageStorageStrength": stats.average_storage_strength,
"averageRetrievalStrength": stats.average_retrieval_strength,
"withEmbeddings": stats.nodes_with_embeddings,
"embeddingCoverage": format!("{:.1}%", embedding_coverage),
"embeddingModel": stats.embedding_model,
"oldestMemory": stats.oldest_memory.map(|dt| dt.to_rfc3339()),
"newestMemory": stats.newest_memory.map(|dt| dt.to_rfc3339()),
"stateDistribution": {
"active": active,
"dormant": dormant,
"silent": silent,
"unavailable": unavailable,
"sampled": total,
},
}))
}
/// Backup tool
pub async fn execute_backup(
storage: &Arc<Mutex<Storage>>,
_args: Option<Value>,
) -> Result<Value, String> {
// Determine backup path
let vestige_dir = directories::ProjectDirs::from("com", "vestige", "core")
.ok_or("Could not determine data directory")?;
let backup_dir = vestige_dir.data_dir().parent()
.unwrap_or(vestige_dir.data_dir())
.join("backups");
std::fs::create_dir_all(&backup_dir)
.map_err(|e| format!("Failed to create backup directory: {}", e))?;
let timestamp = Utc::now().format("%Y%m%d-%H%M%S");
let backup_path = backup_dir.join(format!("vestige-{}.db", timestamp));
// Use VACUUM INTO for a consistent backup (handles WAL properly)
{
let storage = storage.lock().await;
storage.backup_to(&backup_path)
.map_err(|e| format!("Failed to create backup: {}", e))?;
}
let file_size = std::fs::metadata(&backup_path)
.map(|m| m.len())
.unwrap_or(0);
Ok(serde_json::json!({
"tool": "backup",
"path": backup_path.display().to_string(),
"sizeBytes": file_size,
"timestamp": Utc::now().to_rfc3339(),
}))
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct ExportArgs {
format: Option<String>,
tags: Option<Vec<String>>,
since: Option<String>,
path: Option<String>,
}
/// Export tool
pub async fn execute_export(
storage: &Arc<Mutex<Storage>>,
args: Option<Value>,
) -> Result<Value, String> {
let args: ExportArgs = match args {
Some(v) => serde_json::from_value(v).map_err(|e| format!("Invalid arguments: {}", e))?,
None => ExportArgs {
format: None,
tags: None,
since: None,
path: None,
},
};
let format = args.format.unwrap_or_else(|| "json".to_string());
if format != "json" && format != "jsonl" {
return Err(format!("Invalid format '{}'. Must be 'json' or 'jsonl'.", format));
}
// Parse since date
let since_date = match &args.since {
Some(date_str) => {
let naive = NaiveDate::parse_from_str(date_str, "%Y-%m-%d")
.map_err(|e| format!("Invalid date '{}': {}. Use YYYY-MM-DD.", date_str, e))?;
Some(naive.and_hms_opt(0, 0, 0).unwrap().and_utc())
}
None => None,
};
let tag_filter: Vec<String> = args.tags.unwrap_or_default();
// Fetch all nodes (capped at 100K to prevent OOM)
let storage = storage.lock().await;
let mut all_nodes = Vec::new();
let page_size = 500;
let max_nodes = 100_000;
let mut offset = 0;
loop {
let batch = storage.get_all_nodes(page_size, offset).map_err(|e| e.to_string())?;
let batch_len = batch.len();
all_nodes.extend(batch);
if batch_len < page_size as usize || all_nodes.len() >= max_nodes {
break;
}
offset += page_size;
}
// Apply filters
let filtered: Vec<&vestige_core::KnowledgeNode> = all_nodes
.iter()
.filter(|node| {
if since_date.as_ref().is_some_and(|since_dt| node.created_at < *since_dt) {
return false;
}
if !tag_filter.is_empty() {
for tag in &tag_filter {
if !node.tags.iter().any(|t| t == tag) {
return false;
}
}
}
true
})
.collect();
// Determine export path — always constrained to vestige exports directory
let vestige_dir = directories::ProjectDirs::from("com", "vestige", "core")
.ok_or("Could not determine data directory")?;
let export_dir = vestige_dir.data_dir().parent()
.unwrap_or(vestige_dir.data_dir())
.join("exports");
std::fs::create_dir_all(&export_dir)
.map_err(|e| format!("Failed to create export directory: {}", e))?;
let export_path = match args.path {
Some(ref p) => {
// Only allow a filename, not a path — prevent path traversal
let filename = std::path::Path::new(p)
.file_name()
.ok_or("Invalid export filename: must be a simple filename, not a path")?;
let name_str = filename.to_str().ok_or("Invalid filename encoding")?;
if name_str.contains("..") {
return Err("Invalid export filename: '..' not allowed".to_string());
}
export_dir.join(filename)
}
None => {
let timestamp = Utc::now().format("%Y%m%d-%H%M%S");
export_dir.join(format!("memories-{}.{}", timestamp, format))
}
};
// Write export
let file = std::fs::File::create(&export_path)
.map_err(|e| format!("Failed to create export file: {}", e))?;
let mut writer = std::io::BufWriter::new(file);
use std::io::Write;
match format.as_str() {
"json" => {
serde_json::to_writer_pretty(&mut writer, &filtered)
.map_err(|e| format!("Failed to write JSON: {}", e))?;
writer.write_all(b"\n").map_err(|e| e.to_string())?;
}
"jsonl" => {
for node in &filtered {
serde_json::to_writer(&mut writer, node)
.map_err(|e| format!("Failed to write JSONL: {}", e))?;
writer.write_all(b"\n").map_err(|e| e.to_string())?;
}
}
_ => unreachable!(),
}
writer.flush().map_err(|e| e.to_string())?;
let file_size = std::fs::metadata(&export_path).map(|m| m.len()).unwrap_or(0);
Ok(serde_json::json!({
"tool": "export",
"path": export_path.display().to_string(),
"format": format,
"memoriesExported": filtered.len(),
"totalMemories": all_nodes.len(),
"sizeBytes": file_size,
}))
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct GcArgs {
min_retention: Option<f64>,
max_age_days: Option<u64>,
dry_run: Option<bool>,
}
/// Garbage collection tool
pub async fn execute_gc(
storage: &Arc<Mutex<Storage>>,
args: Option<Value>,
) -> Result<Value, String> {
let args: GcArgs = match args {
Some(v) => serde_json::from_value(v).map_err(|e| format!("Invalid arguments: {}", e))?,
None => GcArgs {
min_retention: None,
max_age_days: None,
dry_run: None,
},
};
let min_retention = args.min_retention.unwrap_or(0.1).clamp(0.0, 1.0);
let max_age_days = args.max_age_days;
let dry_run = args.dry_run.unwrap_or(true); // Default to dry_run for safety
let mut storage = storage.lock().await;
let now = Utc::now();
// Fetch all nodes (capped at 100K to prevent OOM)
let mut all_nodes = Vec::new();
let page_size = 500;
let max_nodes = 100_000;
let mut offset = 0;
loop {
let batch = storage.get_all_nodes(page_size, offset).map_err(|e| e.to_string())?;
let batch_len = batch.len();
all_nodes.extend(batch);
if batch_len < page_size as usize || all_nodes.len() >= max_nodes {
break;
}
offset += page_size;
}
// Find candidates
let candidates: Vec<&vestige_core::KnowledgeNode> = all_nodes
.iter()
.filter(|node| {
if node.retention_strength >= min_retention {
return false;
}
if let Some(max_days) = max_age_days {
let age_days = (now - node.created_at).num_days();
if age_days < 0 || (age_days as u64) < max_days {
return false;
}
}
true
})
.collect();
let candidate_count = candidates.len();
// Build sample for display
let sample: Vec<Value> = candidates
.iter()
.take(10)
.map(|node| {
let age_days = (now - node.created_at).num_days();
let content_preview: String = {
let preview: String = node.content.chars().take(60).collect();
if preview.len() < node.content.len() {
format!("{}...", preview)
} else {
preview
}
};
serde_json::json!({
"id": &node.id[..8.min(node.id.len())],
"retention": node.retention_strength,
"ageDays": age_days,
"contentPreview": content_preview,
})
})
.collect();
if dry_run {
return Ok(serde_json::json!({
"tool": "gc",
"dryRun": true,
"minRetention": min_retention,
"maxAgeDays": max_age_days,
"candidateCount": candidate_count,
"totalMemories": all_nodes.len(),
"sample": sample,
"message": format!("{} memories would be deleted. Set dry_run=false to delete.", candidate_count),
}));
}
// Perform actual deletion
let mut deleted = 0usize;
let mut errors = 0usize;
let ids: Vec<String> = candidates.iter().map(|n| n.id.clone()).collect();
for id in &ids {
match storage.delete_node(id) {
Ok(true) => deleted += 1,
Ok(false) => errors += 1,
Err(_) => errors += 1,
}
}
Ok(serde_json::json!({
"tool": "gc",
"dryRun": false,
"minRetention": min_retention,
"maxAgeDays": max_age_days,
"deleted": deleted,
"errors": errors,
"totalBefore": all_nodes.len(),
"totalAfter": all_nodes.len() - deleted,
}))
}

View file

@ -14,6 +14,13 @@ pub mod memory_unified;
pub mod search_unified;
pub mod smart_ingest;
// v1.2: Temporal query tools
pub mod changelog;
pub mod timeline;
// v1.2: Maintenance tools
pub mod maintenance;
// Deprecated tools - kept for internal backwards compatibility
// These modules are intentionally unused in the public API
#[allow(dead_code)]

View file

@ -40,6 +40,12 @@ pub fn schema() -> Value {
"default": 0.5,
"minimum": 0.0,
"maximum": 1.0
},
"detail_level": {
"type": "string",
"description": "Level of detail in results. 'brief' = id/type/tags/score only (saves tokens). 'summary' = default 8-field response. 'full' = all fields including FSRS state and timestamps.",
"enum": ["brief", "summary", "full"],
"default": "summary"
}
},
"required": ["query"]
@ -53,6 +59,8 @@ struct SearchArgs {
limit: Option<i32>,
min_retention: Option<f64>,
min_similarity: Option<f32>,
#[serde(alias = "detail_level")]
detail_level: Option<String>,
}
/// Execute unified search
@ -72,6 +80,19 @@ pub async fn execute(
return Err("Query cannot be empty".to_string());
}
// Validate detail_level
let detail_level = match args.detail_level.as_deref() {
Some("brief") => "brief",
Some("full") => "full",
Some("summary") | None => "summary",
Some(invalid) => {
return Err(format!(
"Invalid detail_level '{}'. Must be 'brief', 'summary', or 'full'.",
invalid
));
}
};
// Clamp all parameters to valid ranges
let limit = args.limit.unwrap_or(10).clamp(1, 100);
let min_retention = args.min_retention.unwrap_or(0.0).clamp(0.0, 1.0);
@ -97,10 +118,10 @@ pub async fn execute(
return false;
}
// Check similarity if semantic score is available
if let Some(sem_score) = r.semantic_score {
if sem_score < min_similarity {
return false;
}
if let Some(sem_score) = r.semantic_score
&& sem_score < min_similarity
{
return false;
}
true
})
@ -111,31 +132,114 @@ pub async fn execute(
let ids: Vec<&str> = filtered_results.iter().map(|r| r.node.id.as_str()).collect();
let _ = storage.strengthen_batch_on_access(&ids); // Ignore errors, don't fail search
// Format results
// Format results based on detail_level
let formatted: Vec<Value> = filtered_results
.iter()
.map(|r| {
serde_json::json!({
"id": r.node.id,
"content": r.node.content,
"combinedScore": r.combined_score,
"keywordScore": r.keyword_score,
"semanticScore": r.semantic_score,
"nodeType": r.node.node_type,
"tags": r.node.tags,
"retentionStrength": r.node.retention_strength,
})
})
.map(|r| format_search_result(r, detail_level))
.collect();
Ok(serde_json::json!({
"query": args.query,
"method": "hybrid",
"detailLevel": detail_level,
"total": formatted.len(),
"results": formatted,
}))
}
/// Format a search result based on the requested detail level.
fn format_search_result(r: &vestige_core::SearchResult, detail_level: &str) -> Value {
match detail_level {
"brief" => serde_json::json!({
"id": r.node.id,
"nodeType": r.node.node_type,
"tags": r.node.tags,
"retentionStrength": r.node.retention_strength,
"combinedScore": r.combined_score,
}),
"full" => serde_json::json!({
"id": r.node.id,
"content": r.node.content,
"combinedScore": r.combined_score,
"keywordScore": r.keyword_score,
"semanticScore": r.semantic_score,
"nodeType": r.node.node_type,
"tags": r.node.tags,
"retentionStrength": r.node.retention_strength,
"storageStrength": r.node.storage_strength,
"retrievalStrength": r.node.retrieval_strength,
"source": r.node.source,
"sentimentScore": r.node.sentiment_score,
"sentimentMagnitude": r.node.sentiment_magnitude,
"createdAt": r.node.created_at.to_rfc3339(),
"updatedAt": r.node.updated_at.to_rfc3339(),
"lastAccessed": r.node.last_accessed.to_rfc3339(),
"nextReview": r.node.next_review.map(|dt| dt.to_rfc3339()),
"stability": r.node.stability,
"difficulty": r.node.difficulty,
"reps": r.node.reps,
"lapses": r.node.lapses,
"validFrom": r.node.valid_from.map(|dt| dt.to_rfc3339()),
"validUntil": r.node.valid_until.map(|dt| dt.to_rfc3339()),
"matchType": format!("{:?}", r.match_type),
}),
// "summary" (default) — backwards compatible
_ => serde_json::json!({
"id": r.node.id,
"content": r.node.content,
"combinedScore": r.combined_score,
"keywordScore": r.keyword_score,
"semanticScore": r.semantic_score,
"nodeType": r.node.node_type,
"tags": r.node.tags,
"retentionStrength": r.node.retention_strength,
}),
}
}
/// Format a KnowledgeNode based on the requested detail level.
/// Reusable across search, timeline, and other tools.
pub fn format_node(node: &vestige_core::KnowledgeNode, detail_level: &str) -> Value {
match detail_level {
"brief" => serde_json::json!({
"id": node.id,
"nodeType": node.node_type,
"tags": node.tags,
"retentionStrength": node.retention_strength,
}),
"full" => serde_json::json!({
"id": node.id,
"content": node.content,
"nodeType": node.node_type,
"tags": node.tags,
"retentionStrength": node.retention_strength,
"storageStrength": node.storage_strength,
"retrievalStrength": node.retrieval_strength,
"source": node.source,
"sentimentScore": node.sentiment_score,
"sentimentMagnitude": node.sentiment_magnitude,
"createdAt": node.created_at.to_rfc3339(),
"updatedAt": node.updated_at.to_rfc3339(),
"lastAccessed": node.last_accessed.to_rfc3339(),
"nextReview": node.next_review.map(|dt| dt.to_rfc3339()),
"stability": node.stability,
"difficulty": node.difficulty,
"reps": node.reps,
"lapses": node.lapses,
"validFrom": node.valid_from.map(|dt| dt.to_rfc3339()),
"validUntil": node.valid_until.map(|dt| dt.to_rfc3339()),
}),
// "summary" (default)
_ => serde_json::json!({
"id": node.id,
"content": node.content,
"nodeType": node.node_type,
"tags": node.tags,
"retentionStrength": node.retention_strength,
}),
}
}
// ============================================================================
// TESTS
// ============================================================================
@ -489,4 +593,113 @@ mod tests {
assert_eq!(similarity_schema["maximum"], 1.0);
assert_eq!(similarity_schema["default"], 0.5);
}
// ========================================================================
// DETAIL LEVEL TESTS
// ========================================================================
#[test]
fn test_schema_has_detail_level() {
let schema_value = schema();
let dl = &schema_value["properties"]["detail_level"];
assert!(dl.is_object());
assert_eq!(dl["default"], "summary");
let enum_values = dl["enum"].as_array().unwrap();
assert!(enum_values.contains(&serde_json::json!("brief")));
assert!(enum_values.contains(&serde_json::json!("summary")));
assert!(enum_values.contains(&serde_json::json!("full")));
}
#[tokio::test]
async fn test_search_detail_level_brief_excludes_content() {
let (storage, _dir) = test_storage().await;
ingest_test_content(&storage, "Brief mode test content for search.").await;
let args = serde_json::json!({
"query": "brief",
"detail_level": "brief",
"min_similarity": 0.0
});
let result = execute(&storage, Some(args)).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["detailLevel"], "brief");
let results = value["results"].as_array().unwrap();
if !results.is_empty() {
let first = &results[0];
// Brief should NOT have content
assert!(first.get("content").is_none() || first["content"].is_null());
// Brief should have these fields
assert!(first["id"].is_string());
assert!(first["nodeType"].is_string());
assert!(first["tags"].is_array());
assert!(first["retentionStrength"].is_number());
assert!(first["combinedScore"].is_number());
}
}
#[tokio::test]
async fn test_search_detail_level_full_includes_timestamps() {
let (storage, _dir) = test_storage().await;
ingest_test_content(&storage, "Full mode test content for search.").await;
let args = serde_json::json!({
"query": "full",
"detail_level": "full",
"min_similarity": 0.0
});
let result = execute(&storage, Some(args)).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["detailLevel"], "full");
let results = value["results"].as_array().unwrap();
if !results.is_empty() {
let first = &results[0];
// Full should have timestamps
assert!(first["createdAt"].is_string());
assert!(first["updatedAt"].is_string());
assert!(first["content"].is_string());
assert!(first["storageStrength"].is_number());
assert!(first["retrievalStrength"].is_number());
assert!(first["matchType"].is_string());
}
}
#[tokio::test]
async fn test_search_detail_level_default_is_summary() {
let (storage, _dir) = test_storage().await;
ingest_test_content(&storage, "Default detail level test content.").await;
let args = serde_json::json!({
"query": "default",
"min_similarity": 0.0
});
let result = execute(&storage, Some(args)).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["detailLevel"], "summary");
let results = value["results"].as_array().unwrap();
if !results.is_empty() {
let first = &results[0];
// Summary should have content but not timestamps
assert!(first["content"].is_string());
assert!(first["id"].is_string());
assert!(first.get("createdAt").is_none() || first["createdAt"].is_null());
}
}
#[tokio::test]
async fn test_search_detail_level_invalid_fails() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({
"query": "test",
"detail_level": "invalid_level"
});
let result = execute(&storage, Some(args)).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("Invalid detail_level"));
}
}

View file

@ -0,0 +1,184 @@
//! Memory Timeline Tool
//!
//! Browse memories chronologically. Returns memories in a time range,
//! grouped by day. Defaults to last 7 days.
use chrono::{DateTime, NaiveDate, Utc};
use serde::Deserialize;
use serde_json::Value;
use std::collections::BTreeMap;
use std::sync::Arc;
use tokio::sync::Mutex;
use vestige_core::Storage;
use super::search_unified::format_node;
/// Input schema for memory_timeline tool
pub fn schema() -> Value {
serde_json::json!({
"type": "object",
"properties": {
"start": {
"type": "string",
"description": "Start of time range (ISO 8601 date or datetime). Default: 7 days ago."
},
"end": {
"type": "string",
"description": "End of time range (ISO 8601 date or datetime). Default: now."
},
"node_type": {
"type": "string",
"description": "Filter by node type (e.g. 'fact', 'concept', 'decision')"
},
"tags": {
"type": "array",
"items": { "type": "string" },
"description": "Filter by tags (ANY match)"
},
"limit": {
"type": "integer",
"description": "Maximum number of memories to return (default: 50, max: 200)",
"default": 50,
"minimum": 1,
"maximum": 200
},
"detail_level": {
"type": "string",
"description": "Level of detail: 'brief', 'summary' (default), or 'full'",
"enum": ["brief", "summary", "full"],
"default": "summary"
}
}
})
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct TimelineArgs {
start: Option<String>,
end: Option<String>,
node_type: Option<String>,
tags: Option<Vec<String>>,
limit: Option<i32>,
#[serde(alias = "detail_level")]
detail_level: Option<String>,
}
/// Parse an ISO 8601 date or datetime string into a DateTime<Utc>.
/// Supports both `2026-02-01` and `2026-02-01T00:00:00Z` formats.
fn parse_datetime(s: &str) -> Result<DateTime<Utc>, String> {
// Try full datetime first
if let Ok(dt) = DateTime::parse_from_rfc3339(s) {
return Ok(dt.with_timezone(&Utc));
}
// Try date-only (YYYY-MM-DD)
if let Ok(date) = NaiveDate::parse_from_str(s, "%Y-%m-%d") {
let dt = date
.and_hms_opt(0, 0, 0)
.ok_or_else(|| format!("Invalid date: {}", s))?
.and_utc();
return Ok(dt);
}
Err(format!(
"Invalid date/datetime '{}'. Use ISO 8601 format: YYYY-MM-DD or YYYY-MM-DDTHH:MM:SSZ",
s
))
}
/// Execute memory_timeline tool
pub async fn execute(
storage: &Arc<Mutex<Storage>>,
args: Option<Value>,
) -> Result<Value, String> {
let args: TimelineArgs = match args {
Some(v) => serde_json::from_value(v).map_err(|e| format!("Invalid arguments: {}", e))?,
None => TimelineArgs {
start: None,
end: None,
node_type: None,
tags: None,
limit: None,
detail_level: None,
},
};
// Validate detail_level
let detail_level = match args.detail_level.as_deref() {
Some("brief") => "brief",
Some("full") => "full",
Some("summary") | None => "summary",
Some(invalid) => {
return Err(format!(
"Invalid detail_level '{}'. Must be 'brief', 'summary', or 'full'.",
invalid
));
}
};
// Parse time range
let now = Utc::now();
let start = match &args.start {
Some(s) => Some(parse_datetime(s)?),
None => Some(now - chrono::Duration::days(7)),
};
let end = match &args.end {
Some(e) => Some(parse_datetime(e)?),
None => Some(now),
};
let limit = args.limit.unwrap_or(50).clamp(1, 200);
let storage = storage.lock().await;
// Query memories in time range
let mut results = storage
.query_time_range(start, end, limit)
.map_err(|e| e.to_string())?;
// Post-query filters
if let Some(ref node_type) = args.node_type {
results.retain(|n| n.node_type == *node_type);
}
if let Some(tags) = args.tags.as_ref().filter(|t| !t.is_empty()) {
results.retain(|n| tags.iter().any(|t| n.tags.contains(t)));
}
// Group by day
let mut by_day: BTreeMap<NaiveDate, Vec<Value>> = BTreeMap::new();
for node in &results {
let date = node.created_at.date_naive();
by_day
.entry(date)
.or_default()
.push(format_node(node, detail_level));
}
// Build timeline (newest first)
let timeline: Vec<Value> = by_day
.into_iter()
.rev()
.map(|(date, memories)| {
serde_json::json!({
"date": date.to_string(),
"count": memories.len(),
"memories": memories,
})
})
.collect();
let total = results.len();
let days = timeline.len();
Ok(serde_json::json!({
"tool": "memory_timeline",
"range": {
"start": start.map(|dt| dt.to_rfc3339()),
"end": end.map(|dt| dt.to_rfc3339()),
},
"detailLevel": detail_level,
"totalMemories": total,
"days": days,
"timeline": timeline,
}))
}