feat: Vestige v1.7.0 — 18 tools, automation triggers, SQLite perf

Tool consolidation: 23 → 18 tools
- ingest merged into smart_ingest (single + batch mode)
- session_checkpoint merged into smart_ingest batch (items param)
- promote_memory/demote_memory merged into memory(action=promote/demote)
- health_check/stats merged into system_status

Automation triggers in system_status:
- lastDreamTimestamp, savesSinceLastDream, lastBackupTimestamp,
  lastConsolidationTimestamp — enables Claude to conditionally
  trigger dream/backup/gc/find_duplicates at session start
- Migration v6: dream_history table (dreams were in-memory only)
- DreamHistoryRecord struct + save/query methods
- Dream persistence in dream.rs (non-fatal on failure)

SQLite performance:
- PRAGMA mmap_size = 256MB (2-5x read speedup)
- PRAGMA journal_size_limit = 64MB (prevents WAL bloat)
- PRAGMA optimize = 0x10002 (fresh query planner stats on connect)
- FTS5 segment merge during consolidation (20-40% keyword boost)
- PRAGMA optimize during consolidation cycle

1,152 tests passing, 0 failures, release build clean.
This commit is contained in:
Sam Valladares 2026-02-20 21:59:52 -06:00
parent 33d8b6b405
commit c29023dd80
20 changed files with 1478 additions and 168 deletions

View file

@ -4,8 +4,9 @@
use std::sync::Arc;
use tokio::sync::Mutex;
use chrono::Utc;
use crate::cognitive::CognitiveEngine;
use vestige_core::Storage;
use vestige_core::{DreamHistoryRecord, Storage};
pub fn schema() -> serde_json::Value {
serde_json::json!({
@ -31,8 +32,8 @@ pub async fn execute(
.and_then(|v| v.as_u64())
.unwrap_or(50) as usize;
let storage = storage.lock().await;
let all_nodes = storage.get_all_nodes(memory_count as i32, 0)
let storage_guard = storage.lock().await;
let all_nodes = storage_guard.get_all_nodes(memory_count as i32, 0)
.map_err(|e| format!("Failed to load memories: {}", e))?;
if all_nodes.len() < 5 {
@ -47,18 +48,36 @@ pub async fn execute(
vestige_core::DreamMemory {
id: n.id.clone(),
content: n.content.clone(),
embedding: storage.get_node_embedding(&n.id).ok().flatten(),
embedding: storage_guard.get_node_embedding(&n.id).ok().flatten(),
tags: n.tags.clone(),
created_at: n.created_at,
access_count: n.reps as u32,
}
}).collect();
// Drop storage lock before taking cognitive lock (strict ordering)
drop(storage);
drop(storage_guard);
let cog = cognitive.lock().await;
let dream_result = cog.dreamer.dream(&dream_memories).await;
let insights = cog.dreamer.synthesize_insights(&dream_memories);
drop(cog);
// Persist dream history (non-fatal on failure — dream still happened)
{
let mut storage_guard = storage.lock().await;
let record = DreamHistoryRecord {
dreamed_at: Utc::now(),
duration_ms: dream_result.duration_ms as i64,
memories_replayed: dream_memories.len() as i32,
connections_found: dream_result.new_connections_found as i32,
insights_generated: dream_result.insights_generated.len() as i32,
memories_strengthened: dream_result.memories_strengthened as i32,
memories_compressed: dream_result.memories_compressed as i32,
};
if let Err(e) = storage_guard.save_dream_history(&record) {
tracing::warn!("Failed to persist dream history: {}", e);
}
}
Ok(serde_json::json!({
"status": "dreamed",
@ -189,4 +208,28 @@ mod tests {
assert!(value["stats"]["insights_generated"].is_number());
assert!(value["stats"]["duration_ms"].is_number());
}
#[tokio::test]
async fn test_dream_persists_to_database() {
let (storage, _dir) = test_storage().await;
ingest_n_memories(&storage, 10).await;
// Before dream: no dream history
{
let s = storage.lock().await;
assert!(s.get_last_dream().unwrap().is_none());
}
let result = execute(&storage, &test_cognitive(), None).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["status"], "dreamed");
// After dream: dream history should exist
{
let s = storage.lock().await;
let last = s.get_last_dream().unwrap();
assert!(last.is_some(), "Dream should have been persisted to database");
}
}
}

View file

@ -280,7 +280,7 @@ pub async fn execute_request_feedback(
"description": "Give Claude a custom instruction (e.g., 'update this memory', 'merge with X', 'add tag Y')"
}
],
"instruction": "PRESENT THESE OPTIONS TO THE USER. If they choose A, call promote_memory. If B, call demote_memory. If C, they will provide a custom instruction - execute it (could be: update the memory content, delete it, merge it, add tags, research something, etc.)."
"instruction": "PRESENT THESE OPTIONS TO THE USER. If they choose A, call memory(action='promote'). If B, call memory(action='demote'). If C, they will provide a custom instruction - execute it (could be: update the memory content, delete it, merge it, add tags, research something, etc.)."
}))
}

View file

@ -1,7 +1,7 @@
//! Maintenance MCP Tools
//!
//! Exposes CLI-only operations as MCP tools so Claude can trigger them automatically:
//! health_check, consolidate, stats, backup, export, gc.
//! system_status, consolidate, backup, export, gc.
use chrono::{NaiveDate, Utc};
use serde::Deserialize;
@ -17,6 +17,8 @@ use vestige_core::{FSRSScheduler, MemoryLifecycle, MemoryState, Storage};
// SCHEMAS
// ============================================================================
/// Deprecated in v1.7 — use system_status_schema() instead
#[allow(dead_code)]
pub fn health_check_schema() -> Value {
serde_json::json!({
"type": "object",
@ -31,6 +33,8 @@ pub fn consolidate_schema() -> Value {
})
}
/// Deprecated in v1.7 — use system_status_schema() instead
#[allow(dead_code)]
pub fn stats_schema() -> Value {
serde_json::json!({
"type": "object",
@ -97,11 +101,203 @@ pub fn gc_schema() -> Value {
})
}
/// Combined system status schema (replaces health_check + stats in v1.7.0)
pub fn system_status_schema() -> Value {
serde_json::json!({
"type": "object",
"properties": {}
})
}
// ============================================================================
// EXECUTE FUNCTIONS
// ============================================================================
/// Health check tool
/// Combined system status tool (merges health_check + stats, v1.7.0)
///
/// Returns system health status, full statistics, FSRS preview,
/// cognitive module health, state distribution, and actionable recommendations.
pub async fn execute_system_status(
storage: &Arc<Mutex<Storage>>,
cognitive: &Arc<Mutex<CognitiveEngine>>,
_args: Option<Value>,
) -> Result<Value, String> {
let storage_guard = storage.lock().await;
let stats = storage_guard.get_stats().map_err(|e| e.to_string())?;
// === Health assessment ===
let status = if stats.total_nodes == 0 {
"empty"
} else if stats.average_retention < 0.3 {
"critical"
} else if stats.average_retention < 0.5 {
"degraded"
} else {
"healthy"
};
let embedding_coverage = if stats.total_nodes > 0 {
(stats.nodes_with_embeddings as f64 / stats.total_nodes as f64) * 100.0
} else {
0.0
};
let embedding_ready = storage_guard.is_embedding_ready();
let mut warnings = Vec::new();
if stats.average_retention < 0.5 && stats.total_nodes > 0 {
warnings.push("Low average retention - consider running consolidation");
}
if stats.nodes_due_for_review > 10 {
warnings.push("Many memories are due for review");
}
if stats.total_nodes > 0 && stats.nodes_with_embeddings == 0 {
warnings.push("No embeddings generated - semantic search unavailable");
}
if embedding_coverage < 50.0 && stats.total_nodes > 10 {
warnings.push("Low embedding coverage - run consolidate to improve semantic search");
}
let mut recommendations = Vec::new();
if status == "critical" {
recommendations.push("CRITICAL: Many memories have very low retention. Review important memories.");
}
if stats.nodes_due_for_review > 5 {
recommendations.push("Review due memories to strengthen retention.");
}
if stats.nodes_with_embeddings < stats.total_nodes {
recommendations.push("Run 'consolidate' to generate missing embeddings.");
}
if stats.total_nodes > 100 && stats.average_retention < 0.7 {
recommendations.push("Consider running periodic consolidation.");
}
if status == "healthy" && recommendations.is_empty() {
recommendations.push("Memory system is healthy!");
}
// === State distribution ===
let nodes = storage_guard.get_all_nodes(500, 0).map_err(|e| e.to_string())?;
let total = nodes.len();
let (active, dormant, silent, unavailable) = if total > 0 {
let mut a = 0usize;
let mut d = 0usize;
let mut s = 0usize;
let mut u = 0usize;
for node in &nodes {
let accessibility = node.retention_strength * 0.5
+ node.retrieval_strength * 0.3
+ node.storage_strength * 0.2;
if accessibility >= 0.7 {
a += 1;
} else if accessibility >= 0.4 {
d += 1;
} else if accessibility >= 0.1 {
s += 1;
} else {
u += 1;
}
}
(a, d, s, u)
} else {
(0, 0, 0, 0)
};
// === FSRS Preview ===
let scheduler = FSRSScheduler::default();
let fsrs_preview = if let Some(representative) = nodes.first() {
let mut state = scheduler.new_card();
state.difficulty = representative.difficulty;
state.stability = representative.stability;
state.reps = representative.reps;
state.lapses = representative.lapses;
state.last_review = representative.last_accessed;
let elapsed = scheduler.days_since_review(&state.last_review);
let preview = scheduler.preview_reviews(&state, elapsed);
Some(serde_json::json!({
"representativeMemoryId": representative.id,
"elapsedDays": format!("{:.1}", elapsed),
"intervalIfGood": preview.good.interval,
"intervalIfEasy": preview.easy.interval,
"intervalIfHard": preview.hard.interval,
"currentRetrievability": format!("{:.3}", preview.good.retrievability),
}))
} else {
None
};
// === Cognitive health ===
let cognitive_health = if let Ok(cog) = cognitive.try_lock() {
let activation_count = cog.activation_network.get_associations("_probe_").len();
let prediction_accuracy = cog.predictive_memory.prediction_accuracy().unwrap_or(0.0);
let scheduler_stats = cog.consolidation_scheduler.get_activity_stats();
Some(serde_json::json!({
"activationNetworkSize": activation_count,
"predictionAccuracy": format!("{:.2}", prediction_accuracy),
"modulesActive": 28,
"schedulerStats": {
"totalEvents": scheduler_stats.total_events,
"eventsPerMinute": scheduler_stats.events_per_minute,
"isIdle": scheduler_stats.is_idle,
"timeUntilNextConsolidation": format!("{:?}", cog.consolidation_scheduler.time_until_next()),
},
}))
} else {
None
};
// === Automation triggers (for conditional dream/backup/gc at session start) ===
let last_consolidation = storage_guard.get_last_consolidation().ok().flatten();
let last_dream = storage_guard.get_last_dream().ok().flatten();
let saves_since_last_dream = match &last_dream {
Some(dt) => storage_guard.count_memories_since(*dt).unwrap_or(0),
None => stats.total_nodes as i64,
};
let last_backup = Storage::get_last_backup_timestamp();
drop(storage_guard);
Ok(serde_json::json!({
"tool": "system_status",
// Health
"status": status,
"warnings": warnings,
"recommendations": recommendations,
"embeddingReady": embedding_ready,
// Stats
"totalMemories": stats.total_nodes,
"dueForReview": stats.nodes_due_for_review,
"averageRetention": stats.average_retention,
"averageStorageStrength": stats.average_storage_strength,
"averageRetrievalStrength": stats.average_retrieval_strength,
"withEmbeddings": stats.nodes_with_embeddings,
"embeddingCoverage": format!("{:.1}%", embedding_coverage),
"embeddingModel": stats.embedding_model,
"oldestMemory": stats.oldest_memory.map(|dt| dt.to_rfc3339()),
"newestMemory": stats.newest_memory.map(|dt| dt.to_rfc3339()),
// Distribution
"stateDistribution": {
"active": active,
"dormant": dormant,
"silent": silent,
"unavailable": unavailable,
"sampled": total,
},
// FSRS
"fsrsPreview": fsrs_preview,
// Cognitive
"cognitiveHealth": cognitive_health,
// Automation triggers — Claude uses these to decide when to dream/backup/gc
"automationTriggers": {
"lastDreamTimestamp": last_dream.map(|dt| dt.to_rfc3339()),
"savesSinceLastDream": saves_since_last_dream,
"lastBackupTimestamp": last_backup.map(|dt| dt.to_rfc3339()),
"lastConsolidationTimestamp": last_consolidation.map(|dt| dt.to_rfc3339()),
},
}))
}
/// Health check tool — deprecated in v1.7, use execute_system_status() instead
#[allow(dead_code)]
pub async fn execute_health_check(
storage: &Arc<Mutex<Storage>>,
_args: Option<Value>,
@ -193,7 +389,8 @@ pub async fn execute_consolidate(
}))
}
/// Stats tool
/// Stats tool — deprecated in v1.7, use execute_system_status() instead
#[allow(dead_code)]
pub async fn execute_stats(
storage: &Arc<Mutex<Storage>>,
cognitive: &Arc<Mutex<CognitiveEngine>>,
@ -671,3 +868,119 @@ pub async fn execute_gc(
"totalAfter": all_nodes.len() - deleted,
}))
}
// ============================================================================
// TESTS
// ============================================================================
#[cfg(test)]
mod tests {
use super::*;
use crate::cognitive::CognitiveEngine;
use tempfile::TempDir;
fn test_cognitive() -> Arc<Mutex<CognitiveEngine>> {
Arc::new(Mutex::new(CognitiveEngine::new()))
}
async fn test_storage() -> (Arc<Mutex<Storage>>, TempDir) {
let dir = TempDir::new().unwrap();
let storage = Storage::new(Some(dir.path().join("test.db"))).unwrap();
(Arc::new(Mutex::new(storage)), dir)
}
#[test]
fn test_system_status_schema() {
let schema = system_status_schema();
assert_eq!(schema["type"], "object");
}
#[tokio::test]
async fn test_system_status_empty_db() {
let (storage, _dir) = test_storage().await;
let result = execute_system_status(&storage, &test_cognitive(), None).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["tool"], "system_status");
assert_eq!(value["status"], "empty");
assert_eq!(value["totalMemories"], 0);
assert!(value["warnings"].is_array());
assert!(value["recommendations"].is_array());
}
#[tokio::test]
async fn test_system_status_with_memories() {
let (storage, _dir) = test_storage().await;
{
let mut s = storage.lock().await;
s.ingest(vestige_core::IngestInput {
content: "Test memory for status".to_string(),
node_type: "fact".to_string(),
source: None,
sentiment_score: 0.0,
sentiment_magnitude: 0.0,
tags: vec![],
valid_from: None,
valid_until: None,
}).unwrap();
}
let result = execute_system_status(&storage, &test_cognitive(), None).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["totalMemories"], 1);
assert!(value["stateDistribution"].is_object());
assert!(value["embeddingCoverage"].is_string());
}
#[tokio::test]
async fn test_system_status_has_cognitive_health() {
let (storage, _dir) = test_storage().await;
let result = execute_system_status(&storage, &test_cognitive(), None).await;
let value = result.unwrap();
assert!(value["cognitiveHealth"].is_object());
assert_eq!(value["cognitiveHealth"]["modulesActive"], 28);
}
#[tokio::test]
async fn test_system_status_has_automation_triggers() {
let (storage, _dir) = test_storage().await;
let result = execute_system_status(&storage, &test_cognitive(), None).await;
assert!(result.is_ok());
let value = result.unwrap();
let triggers = &value["automationTriggers"];
assert!(triggers.is_object(), "automationTriggers should be present");
assert!(triggers["lastDreamTimestamp"].is_null(), "No dreams yet");
assert_eq!(triggers["savesSinceLastDream"], 0, "Empty DB = 0 saves");
assert!(triggers["lastConsolidationTimestamp"].is_null(), "No consolidation yet");
// lastBackupTimestamp depends on filesystem state, just check it exists
assert!(triggers.get("lastBackupTimestamp").is_some());
}
#[tokio::test]
async fn test_system_status_automation_triggers_with_memories() {
let (storage, _dir) = test_storage().await;
{
let mut s = storage.lock().await;
for i in 0..3 {
s.ingest(vestige_core::IngestInput {
content: format!("Automation trigger test memory {}", i),
node_type: "fact".to_string(),
source: None,
sentiment_score: 0.0,
sentiment_magnitude: 0.0,
tags: vec![],
valid_from: None,
valid_until: None,
}).unwrap();
}
}
let result = execute_system_status(&storage, &test_cognitive(), None).await;
let value = result.unwrap();
let triggers = &value["automationTriggers"];
// No dream ever → savesSinceLastDream == totalMemories
assert_eq!(triggers["savesSinceLastDream"], 3);
assert!(triggers["lastDreamTimestamp"].is_null());
}
}

View file

@ -8,7 +8,8 @@ use serde_json::Value;
use std::sync::Arc;
use tokio::sync::Mutex;
use vestige_core::{MemoryState, Storage};
use crate::cognitive::CognitiveEngine;
use vestige_core::{MemoryState, Modification, OutcomeType, Storage};
// Accessibility thresholds based on retention strength
const ACCESSIBILITY_ACTIVE: f64 = 0.7;
@ -42,12 +43,16 @@ pub fn schema() -> Value {
"properties": {
"action": {
"type": "string",
"enum": ["get", "delete", "state"],
"description": "Action to perform: 'get' retrieves full memory node, 'delete' removes memory, 'state' returns accessibility state"
"enum": ["get", "delete", "state", "promote", "demote"],
"description": "Action to perform: 'get' retrieves full memory node, 'delete' removes memory, 'state' returns accessibility state, 'promote' increases retrieval strength (thumbs up), 'demote' decreases retrieval strength (thumbs down)"
},
"id": {
"type": "string",
"description": "The ID of the memory node"
},
"reason": {
"type": "string",
"description": "Why this memory is being promoted/demoted (optional, for logging). Only used with promote/demote actions."
}
},
"required": ["action", "id"]
@ -59,11 +64,13 @@ pub fn schema() -> Value {
struct MemoryArgs {
action: String,
id: String,
reason: Option<String>,
}
/// Execute the unified memory tool
pub async fn execute(
storage: &Arc<Mutex<Storage>>,
cognitive: &Arc<Mutex<CognitiveEngine>>,
args: Option<Value>,
) -> Result<Value, String> {
let args: MemoryArgs = match args {
@ -78,8 +85,10 @@ pub async fn execute(
"get" => execute_get(storage, &args.id).await,
"delete" => execute_delete(storage, &args.id).await,
"state" => execute_state(storage, &args.id).await,
"promote" => execute_promote(storage, cognitive, &args.id, args.reason).await,
"demote" => execute_demote(storage, cognitive, &args.id, args.reason).await,
_ => Err(format!(
"Invalid action '{}'. Must be one of: get, delete, state",
"Invalid action '{}'. Must be one of: get, delete, state, promote, demote",
args.action
)),
}
@ -186,6 +195,120 @@ async fn execute_state(storage: &Arc<Mutex<Storage>>, id: &str) -> Result<Value,
}))
}
/// Promote a memory (thumbs up) — increases retrieval strength with cognitive feedback pipeline
async fn execute_promote(
storage: &Arc<Mutex<Storage>>,
cognitive: &Arc<Mutex<CognitiveEngine>>,
id: &str,
reason: Option<String>,
) -> Result<Value, String> {
let storage_guard = storage.lock().await;
let before = storage_guard.get_node(id).map_err(|e| e.to_string())?
.ok_or_else(|| format!("Node not found: {}", id))?;
let node = storage_guard.promote_memory(id).map_err(|e| e.to_string())?;
drop(storage_guard);
// Cognitive feedback pipeline
if let Ok(mut cog) = cognitive.try_lock() {
cog.reward_signal.record_outcome(id, OutcomeType::Helpful);
cog.importance_tracker.on_retrieved(id, true);
if cog.reconsolidation.is_labile(id) {
cog.reconsolidation.apply_modification(
id,
Modification::StrengthenConnection {
target_memory_id: id.to_string(),
boost: 0.2,
},
);
}
}
Ok(serde_json::json!({
"success": true,
"action": "promoted",
"nodeId": node.id,
"reason": reason,
"changes": {
"retrievalStrength": {
"before": before.retrieval_strength,
"after": node.retrieval_strength,
"delta": "+0.20"
},
"retentionStrength": {
"before": before.retention_strength,
"after": node.retention_strength,
"delta": "+0.10"
},
"stability": {
"before": before.stability,
"after": node.stability,
"multiplier": "1.5x"
}
},
"message": format!("Memory promoted. It will now surface more often in searches. Retrieval: {:.2} -> {:.2}",
before.retrieval_strength, node.retrieval_strength),
}))
}
/// Demote a memory (thumbs down) — decreases retrieval strength with cognitive feedback pipeline
async fn execute_demote(
storage: &Arc<Mutex<Storage>>,
cognitive: &Arc<Mutex<CognitiveEngine>>,
id: &str,
reason: Option<String>,
) -> Result<Value, String> {
let storage_guard = storage.lock().await;
let before = storage_guard.get_node(id).map_err(|e| e.to_string())?
.ok_or_else(|| format!("Node not found: {}", id))?;
let node = storage_guard.demote_memory(id).map_err(|e| e.to_string())?;
drop(storage_guard);
// Cognitive feedback pipeline
if let Ok(mut cog) = cognitive.try_lock() {
cog.reward_signal.record_outcome(id, OutcomeType::NotHelpful);
cog.importance_tracker.on_retrieved(id, false);
if cog.reconsolidation.is_labile(id) {
cog.reconsolidation.apply_modification(
id,
Modification::AddContext {
context: "User reported this memory was wrong/unhelpful".to_string(),
},
);
}
}
Ok(serde_json::json!({
"success": true,
"action": "demoted",
"nodeId": node.id,
"reason": reason,
"changes": {
"retrievalStrength": {
"before": before.retrieval_strength,
"after": node.retrieval_strength,
"delta": "-0.30"
},
"retentionStrength": {
"before": before.retention_strength,
"after": node.retention_strength,
"delta": "-0.15"
},
"stability": {
"before": before.stability,
"after": node.stability,
"multiplier": "0.5x"
}
},
"message": format!("Memory demoted. Better alternatives will now surface instead. Retrieval: {:.2} -> {:.2}",
before.retrieval_strength, node.retrieval_strength),
"note": "Memory is NOT deleted - it remains searchable but ranks lower."
}))
}
#[cfg(test)]
mod tests {
use super::*;
@ -218,11 +341,21 @@ mod tests {
let schema = schema();
assert!(schema["properties"]["action"].is_object());
assert!(schema["properties"]["id"].is_object());
assert!(schema["properties"]["reason"].is_object());
assert_eq!(schema["required"], serde_json::json!(["action", "id"]));
// Verify all 5 actions are in enum
let actions = schema["properties"]["action"]["enum"].as_array().unwrap();
assert_eq!(actions.len(), 5);
assert!(actions.contains(&serde_json::json!("promote")));
assert!(actions.contains(&serde_json::json!("demote")));
}
// === INTEGRATION TESTS ===
fn test_cognitive() -> Arc<Mutex<CognitiveEngine>> {
Arc::new(Mutex::new(CognitiveEngine::new()))
}
async fn test_storage() -> (Arc<Mutex<Storage>>, tempfile::TempDir) {
let dir = tempfile::TempDir::new().unwrap();
let storage = Storage::new(Some(dir.path().join("test.db"))).unwrap();
@ -249,7 +382,7 @@ mod tests {
#[tokio::test]
async fn test_missing_args_fails() {
let (storage, _dir) = test_storage().await;
let result = execute(&storage, None).await;
let result = execute(&storage, &test_cognitive(), None).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("Missing arguments"));
}
@ -258,7 +391,7 @@ mod tests {
async fn test_invalid_action_fails() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({ "action": "invalid", "id": "00000000-0000-0000-0000-000000000000" });
let result = execute(&storage, Some(args)).await;
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("Invalid action"));
}
@ -267,7 +400,7 @@ mod tests {
async fn test_invalid_uuid_fails() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({ "action": "get", "id": "not-a-uuid" });
let result = execute(&storage, Some(args)).await;
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("Invalid memory ID format"));
}
@ -277,7 +410,7 @@ mod tests {
let (storage, _dir) = test_storage().await;
let id = ingest_memory(&storage).await;
let args = serde_json::json!({ "action": "get", "id": id });
let result = execute(&storage, Some(args)).await;
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["action"], "get");
@ -293,7 +426,7 @@ mod tests {
async fn test_get_nonexistent_memory() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({ "action": "get", "id": "00000000-0000-0000-0000-000000000000" });
let result = execute(&storage, Some(args)).await;
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["found"], false);
@ -305,7 +438,7 @@ mod tests {
let (storage, _dir) = test_storage().await;
let id = ingest_memory(&storage).await;
let args = serde_json::json!({ "action": "delete", "id": id });
let result = execute(&storage, Some(args)).await;
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["action"], "delete");
@ -316,7 +449,7 @@ mod tests {
async fn test_delete_nonexistent_memory() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({ "action": "delete", "id": "00000000-0000-0000-0000-000000000000" });
let result = execute(&storage, Some(args)).await;
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["success"], false);
@ -328,9 +461,9 @@ mod tests {
let (storage, _dir) = test_storage().await;
let id = ingest_memory(&storage).await;
let del_args = serde_json::json!({ "action": "delete", "id": id });
execute(&storage, Some(del_args)).await.unwrap();
execute(&storage, &test_cognitive(), Some(del_args)).await.unwrap();
let get_args = serde_json::json!({ "action": "get", "id": id });
let result = execute(&storage, Some(get_args)).await;
let result = execute(&storage, &test_cognitive(), Some(get_args)).await;
let value = result.unwrap();
assert_eq!(value["found"], false);
}
@ -340,7 +473,7 @@ mod tests {
let (storage, _dir) = test_storage().await;
let id = ingest_memory(&storage).await;
let args = serde_json::json!({ "action": "state", "id": id });
let result = execute(&storage, Some(args)).await;
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["action"], "state");
@ -360,14 +493,13 @@ mod tests {
async fn test_state_nonexistent_memory_fails() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({ "action": "state", "id": "00000000-0000-0000-0000-000000000000" });
let result = execute(&storage, Some(args)).await;
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("not found"));
}
#[test]
fn test_accessibility_boundary_active() {
// Exactly at active threshold
let a = compute_accessibility(1.0, 0.7, 0.5);
assert!(a >= ACCESSIBILITY_ACTIVE);
assert!(matches!(state_from_accessibility(a), MemoryState::Active));
@ -379,4 +511,114 @@ mod tests {
assert_eq!(a, 0.0);
assert!(matches!(state_from_accessibility(a), MemoryState::Unavailable));
}
// ========================================================================
// PROMOTE/DEMOTE TESTS (ported from feedback.rs, v1.7.0 merge)
// ========================================================================
#[tokio::test]
async fn test_promote_missing_id_fails() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({ "action": "promote", "id": "not-a-uuid" });
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("Invalid memory ID format"));
}
#[tokio::test]
async fn test_promote_nonexistent_node_fails() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({ "action": "promote", "id": "00000000-0000-0000-0000-000000000000" });
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("Node not found"));
}
#[tokio::test]
async fn test_promote_succeeds() {
let (storage, _dir) = test_storage().await;
let id = ingest_memory(&storage).await;
let args = serde_json::json!({ "action": "promote", "id": id, "reason": "It was helpful" });
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["success"], true);
assert_eq!(value["action"], "promoted");
assert_eq!(value["nodeId"], id);
assert_eq!(value["reason"], "It was helpful");
assert!(value["changes"]["retrievalStrength"].is_object());
}
#[tokio::test]
async fn test_promote_without_reason_succeeds() {
let (storage, _dir) = test_storage().await;
let id = ingest_memory(&storage).await;
let args = serde_json::json!({ "action": "promote", "id": id });
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["success"], true);
assert!(value["reason"].is_null());
}
#[tokio::test]
async fn test_promote_changes_contain_expected_fields() {
let (storage, _dir) = test_storage().await;
let id = ingest_memory(&storage).await;
let args = serde_json::json!({ "action": "promote", "id": id });
let result = execute(&storage, &test_cognitive(), Some(args)).await;
let value = result.unwrap();
assert!(value["changes"]["retrievalStrength"]["before"].is_number());
assert!(value["changes"]["retrievalStrength"]["after"].is_number());
assert_eq!(value["changes"]["retrievalStrength"]["delta"], "+0.20");
assert!(value["changes"]["retentionStrength"]["before"].is_number());
assert_eq!(value["changes"]["retentionStrength"]["delta"], "+0.10");
assert_eq!(value["changes"]["stability"]["multiplier"], "1.5x");
}
#[tokio::test]
async fn test_demote_invalid_uuid_fails() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({ "action": "demote", "id": "bad-id" });
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("Invalid memory ID format"));
}
#[tokio::test]
async fn test_demote_nonexistent_node_fails() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({ "action": "demote", "id": "00000000-0000-0000-0000-000000000000" });
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("Node not found"));
}
#[tokio::test]
async fn test_demote_succeeds() {
let (storage, _dir) = test_storage().await;
let id = ingest_memory(&storage).await;
let args = serde_json::json!({ "action": "demote", "id": id, "reason": "It was wrong" });
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["success"], true);
assert_eq!(value["action"], "demoted");
assert_eq!(value["nodeId"], id);
assert_eq!(value["reason"], "It was wrong");
assert!(value["note"].as_str().unwrap().contains("NOT deleted"));
}
#[tokio::test]
async fn test_demote_changes_contain_expected_fields() {
let (storage, _dir) = test_storage().await;
let id = ingest_memory(&storage).await;
let args = serde_json::json!({ "action": "demote", "id": id });
let result = execute(&storage, &test_cognitive(), Some(args)).await;
let value = result.unwrap();
assert!(value["changes"]["retrievalStrength"]["before"].is_number());
assert_eq!(value["changes"]["retrievalStrength"]["delta"], "-0.30");
assert_eq!(value["changes"]["retentionStrength"]["delta"], "-0.15");
assert_eq!(value["changes"]["stability"]["multiplier"], "0.5x");
}
}

View file

@ -8,7 +8,6 @@
// Active unified tools
pub mod codebase_unified;
pub mod ingest;
pub mod intention_unified;
pub mod memory_unified;
pub mod search_unified;
@ -22,7 +21,6 @@ pub mod timeline;
pub mod maintenance;
// v1.3: Auto-save and dedup tools
pub mod checkpoint;
pub mod dedup;
pub mod importance;
@ -35,6 +33,8 @@ pub mod restore;
// Deprecated tools - kept for internal backwards compatibility
// These modules are intentionally unused in the public API
#[allow(dead_code)]
pub mod checkpoint;
#[allow(dead_code)]
pub mod codebase;
#[allow(dead_code)]
pub mod consolidate;
@ -43,6 +43,8 @@ pub mod context;
#[allow(dead_code)]
pub mod feedback;
#[allow(dead_code)]
pub mod ingest;
#[allow(dead_code)]
pub mod intentions;
#[allow(dead_code)]
pub mod knowledge;

View file

@ -26,13 +26,17 @@ use vestige_core::{
};
/// Input schema for smart_ingest tool
///
/// Supports two modes:
/// - **Single mode**: provide `content` (required) + optional fields
/// - **Batch mode**: provide `items` array (max 20), each with full cognitive pipeline
pub fn schema() -> Value {
serde_json::json!({
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "The content to remember. Will be compared against existing memories."
"description": "The content to remember. Will be compared against existing memories. (Single mode)"
},
"node_type": {
"type": "string",
@ -52,20 +56,61 @@ pub fn schema() -> Value {
"type": "boolean",
"description": "Force creation of a new memory even if similar content exists",
"default": false
},
"items": {
"type": "array",
"description": "Batch mode: array of items to save (max 20). Each runs through full cognitive pipeline with Prediction Error Gating. Use at session end or before context compaction.",
"maxItems": 20,
"items": {
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "The content to remember"
},
"tags": {
"type": "array",
"items": { "type": "string" },
"description": "Tags for categorization"
},
"node_type": {
"type": "string",
"description": "Type: fact, concept, event, person, place, note, pattern, decision",
"default": "fact"
},
"source": {
"type": "string",
"description": "Source reference"
}
},
"required": ["content"]
}
}
},
"required": ["content"]
}
})
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct SmartIngestArgs {
content: String,
content: Option<String>,
#[serde(alias = "node_type")]
node_type: Option<String>,
tags: Option<Vec<String>>,
source: Option<String>,
force_create: Option<bool>,
items: Option<Vec<BatchItem>>,
}
/// A single item in batch mode
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct BatchItem {
content: String,
tags: Option<Vec<String>>,
#[serde(alias = "node_type")]
node_type: Option<String>,
source: Option<String>,
}
pub async fn execute(
@ -78,12 +123,20 @@ pub async fn execute(
None => return Err("Missing arguments".to_string()),
};
// Detect mode: batch (items present) vs single (content present)
if let Some(items) = args.items {
return execute_batch(storage, cognitive, items).await;
}
// Single mode: content is required
let content = args.content.ok_or("Missing 'content' field. Provide 'content' for single mode or 'items' for batch mode.")?;
// Validate content
if args.content.trim().is_empty() {
if content.trim().is_empty() {
return Err("Content cannot be empty".to_string());
}
if args.content.len() > 1_000_000 {
if content.len() > 1_000_000 {
return Err("Content too large (max 1MB)".to_string());
}
@ -96,7 +149,7 @@ pub async fn execute(
if let Ok(cog) = cognitive.try_lock() {
// 4A. Full 4-channel importance scoring
let context = ImportanceContext::current();
let importance = cog.importance_signals.compute_importance(&args.content, &context);
let importance = cog.importance_signals.compute_importance(&content, &context);
importance_composite = importance.composite;
// 4B. Intent detection → auto-tag
@ -113,11 +166,11 @@ pub async fn execute(
}
// 4D. Adaptive embedding — detect content type for logging
let _content_type = ContentType::detect(&args.content);
let _content_type = ContentType::detect(&content);
}
let input = IngestInput {
content: args.content.clone(),
content: content.clone(),
node_type: args.node_type.unwrap_or_else(|| "fact".to_string()),
source: args.source,
sentiment_score: 0.0,
@ -217,6 +270,181 @@ pub async fn execute(
}
}
/// Execute batch mode: process up to 20 items, each with full cognitive pipeline.
///
/// Unlike the old `session_checkpoint` tool, batch mode runs the full cognitive
/// pre-ingest (importance scoring, intent detection) and post-ingest (synaptic
/// tagging, novelty update, hippocampal indexing) pipelines per item.
async fn execute_batch(
storage: &Arc<Mutex<Storage>>,
cognitive: &Arc<Mutex<CognitiveEngine>>,
items: Vec<BatchItem>,
) -> Result<Value, String> {
if items.is_empty() {
return Err("Items array cannot be empty".to_string());
}
if items.len() > 20 {
return Err("Maximum 20 items per batch".to_string());
}
let mut results = Vec::new();
let mut created = 0u32;
let mut updated = 0u32;
let mut skipped = 0u32;
let mut errors = 0u32;
for (i, item) in items.into_iter().enumerate() {
// Skip empty content
if item.content.trim().is_empty() {
results.push(serde_json::json!({
"index": i,
"status": "skipped",
"reason": "Empty content"
}));
skipped += 1;
continue;
}
// Skip content > 1MB
if item.content.len() > 1_000_000 {
results.push(serde_json::json!({
"index": i,
"status": "skipped",
"reason": "Content too large (max 1MB)"
}));
skipped += 1;
continue;
}
// ================================================================
// COGNITIVE PRE-INGEST (per item)
// ================================================================
let mut importance_composite = 0.0_f64;
let mut tags = item.tags.unwrap_or_default();
if let Ok(cog) = cognitive.try_lock() {
let context = ImportanceContext::current();
let importance = cog.importance_signals.compute_importance(&item.content, &context);
importance_composite = importance.composite;
let intent_result = cog.intent_detector.detect_intent();
if intent_result.confidence > 0.5 {
let intent_tag = format!("intent:{:?}", intent_result.primary_intent);
let intent_tag = if intent_tag.len() > 50 {
format!("{}...", &intent_tag[..47])
} else {
intent_tag
};
tags.push(intent_tag);
}
let _content_type = ContentType::detect(&item.content);
}
let input = IngestInput {
content: item.content.clone(),
node_type: item.node_type.unwrap_or_else(|| "fact".to_string()),
source: item.source,
sentiment_score: 0.0,
sentiment_magnitude: importance_composite,
tags,
valid_from: None,
valid_until: None,
};
// ================================================================
// INGEST (storage lock per item)
// ================================================================
let mut storage_guard = storage.lock().await;
#[cfg(all(feature = "embeddings", feature = "vector-search"))]
{
match storage_guard.smart_ingest(input) {
Ok(result) => {
let node_id = result.node.id.clone();
let node_content = result.node.content.clone();
let node_type = result.node.node_type.clone();
drop(storage_guard);
match result.decision.as_str() {
"create" | "supersede" | "replace" => created += 1,
"update" | "reinforce" | "merge" | "add_context" => updated += 1,
_ => created += 1,
}
// Post-ingest cognitive side effects
run_post_ingest(cognitive, &node_id, &node_content, &node_type, importance_composite);
results.push(serde_json::json!({
"index": i,
"status": "saved",
"decision": result.decision,
"nodeId": node_id,
"similarity": result.similarity,
"importanceScore": importance_composite,
"reason": result.reason
}));
}
Err(e) => {
drop(storage_guard);
errors += 1;
results.push(serde_json::json!({
"index": i,
"status": "error",
"reason": e.to_string()
}));
}
}
}
#[cfg(not(all(feature = "embeddings", feature = "vector-search")))]
{
match storage_guard.ingest(input) {
Ok(node) => {
let node_id = node.id.clone();
let node_content = node.content.clone();
let node_type = node.node_type.clone();
drop(storage_guard);
created += 1;
run_post_ingest(cognitive, &node_id, &node_content, &node_type, importance_composite);
results.push(serde_json::json!({
"index": i,
"status": "saved",
"decision": "create",
"nodeId": node_id,
"importanceScore": importance_composite,
"reason": "Embeddings not available - used regular ingest"
}));
}
Err(e) => {
drop(storage_guard);
errors += 1;
results.push(serde_json::json!({
"index": i,
"status": "error",
"reason": e.to_string()
}));
}
}
}
}
Ok(serde_json::json!({
"success": errors == 0,
"mode": "batch",
"summary": {
"total": results.len(),
"created": created,
"updated": updated,
"skipped": skipped,
"errors": errors
},
"results": results
}))
}
/// Cognitive post-ingest side effects: synaptic tagging, novelty update, hippocampal indexing.
///
/// Uses try_lock() for non-blocking access. If cognitive is locked, side effects are skipped.
@ -323,7 +551,9 @@ mod tests {
assert_eq!(schema_value["type"], "object");
assert!(schema_value["properties"]["content"].is_object());
assert!(schema_value["properties"]["forceCreate"].is_object());
assert!(schema_value["required"].as_array().unwrap().contains(&serde_json::json!("content")));
assert!(schema_value["properties"]["items"].is_object());
// v1.7: no top-level required — content for single mode, items for batch mode
assert!(schema_value.get("required").is_none() || schema_value["required"].is_null());
}
#[tokio::test]
@ -402,6 +632,253 @@ mod tests {
let args = serde_json::json!({ "tags": ["test"] });
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("Invalid arguments"));
assert!(result.unwrap_err().contains("content"));
}
// ========================================================================
// TESTS PORTED FROM ingest.rs (v1.7.0 merge)
// ========================================================================
#[tokio::test]
async fn test_smart_ingest_with_all_optional_fields() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({
"content": "Complex memory with all metadata.",
"node_type": "decision",
"tags": ["architecture", "design"],
"source": "team meeting notes"
});
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["success"], true);
assert!(value["nodeId"].is_string());
}
#[tokio::test]
async fn test_smart_ingest_default_node_type_is_fact() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({ "content": "Default type test content." });
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_ok());
let node_id = result.unwrap()["nodeId"].as_str().unwrap().to_string();
let storage_lock = storage.lock().await;
let node = storage_lock.get_node(&node_id).unwrap().unwrap();
assert_eq!(node.node_type, "fact");
}
#[test]
fn test_schema_has_optional_fields() {
let schema_value = schema();
assert!(schema_value["properties"]["node_type"].is_object());
assert!(schema_value["properties"]["tags"].is_object());
assert!(schema_value["properties"]["source"].is_object());
}
#[tokio::test]
async fn test_smart_ingest_with_source() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({
"content": "MCP protocol version 2024-11-05 is the current standard.",
"source": "https://modelcontextprotocol.io/spec"
});
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["success"], true);
}
// ========================================================================
// BATCH MODE TESTS (ported from checkpoint.rs, v1.7.0 merge)
// ========================================================================
#[tokio::test]
async fn test_batch_empty_items_fails() {
let (storage, _dir) = test_storage().await;
let result = execute(&storage, &test_cognitive(), Some(serde_json::json!({ "items": [] }))).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("empty"));
}
#[tokio::test]
async fn test_batch_ingest() {
let (storage, _dir) = test_storage().await;
let result = execute(
&storage, &test_cognitive(),
Some(serde_json::json!({
"items": [
{ "content": "First batch item", "tags": ["test"] },
{ "content": "Second batch item", "tags": ["test"] }
]
})),
).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["mode"], "batch");
assert_eq!(value["summary"]["total"], 2);
}
#[tokio::test]
async fn test_batch_skips_empty_content() {
let (storage, _dir) = test_storage().await;
let result = execute(
&storage, &test_cognitive(),
Some(serde_json::json!({
"items": [
{ "content": "Valid item" },
{ "content": "" },
{ "content": "Another valid item" }
]
})),
).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["summary"]["skipped"], 1);
}
#[tokio::test]
async fn test_batch_missing_args_fails() {
let (storage, _dir) = test_storage().await;
let result = execute(&storage, &test_cognitive(), None).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("Missing arguments"));
}
#[tokio::test]
async fn test_batch_exceeds_20_items_fails() {
let (storage, _dir) = test_storage().await;
let items: Vec<serde_json::Value> = (0..21)
.map(|i| serde_json::json!({ "content": format!("Item {}", i) }))
.collect();
let result = execute(&storage, &test_cognitive(), Some(serde_json::json!({ "items": items }))).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("Maximum 20 items"));
}
#[tokio::test]
async fn test_batch_exactly_20_items_succeeds() {
let (storage, _dir) = test_storage().await;
let items: Vec<serde_json::Value> = (0..20)
.map(|i| serde_json::json!({ "content": format!("Item {}", i) }))
.collect();
let result = execute(&storage, &test_cognitive(), Some(serde_json::json!({ "items": items }))).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["summary"]["total"], 20);
}
#[tokio::test]
async fn test_batch_skips_whitespace_only_content() {
let (storage, _dir) = test_storage().await;
let result = execute(
&storage, &test_cognitive(),
Some(serde_json::json!({
"items": [
{ "content": " \t\n " },
{ "content": "Valid content" }
]
})),
).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["summary"]["skipped"], 1);
assert_eq!(value["summary"]["created"], 1);
}
#[tokio::test]
async fn test_batch_single_item_succeeds() {
let (storage, _dir) = test_storage().await;
let result = execute(
&storage, &test_cognitive(),
Some(serde_json::json!({
"items": [{ "content": "Single item" }]
})),
).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["summary"]["total"], 1);
assert_eq!(value["success"], true);
}
#[tokio::test]
async fn test_batch_items_with_all_fields() {
let (storage, _dir) = test_storage().await;
let result = execute(
&storage, &test_cognitive(),
Some(serde_json::json!({
"items": [{
"content": "Full fields item",
"tags": ["test", "batch"],
"node_type": "decision",
"source": "test-suite"
}]
})),
).await;
assert!(result.is_ok());
let value = result.unwrap();
assert_eq!(value["summary"]["created"], 1);
}
#[tokio::test]
async fn test_batch_results_array_matches_items() {
let (storage, _dir) = test_storage().await;
let result = execute(
&storage, &test_cognitive(),
Some(serde_json::json!({
"items": [
{ "content": "First" },
{ "content": "" },
{ "content": "Third" }
]
})),
).await;
let value = result.unwrap();
let results = value["results"].as_array().unwrap();
assert_eq!(results.len(), 3);
assert_eq!(results[0]["index"], 0);
assert_eq!(results[1]["index"], 1);
assert_eq!(results[1]["status"], "skipped");
assert_eq!(results[2]["index"], 2);
}
#[tokio::test]
async fn test_batch_success_true_when_only_skipped() {
let (storage, _dir) = test_storage().await;
let result = execute(
&storage, &test_cognitive(),
Some(serde_json::json!({
"items": [
{ "content": "" },
{ "content": " " }
]
})),
).await;
let value = result.unwrap();
assert_eq!(value["success"], true); // skipped ≠ errors
assert_eq!(value["summary"]["errors"], 0);
assert_eq!(value["summary"]["skipped"], 2);
}
#[tokio::test]
async fn test_batch_has_importance_scores() {
let (storage, _dir) = test_storage().await;
let result = execute(
&storage, &test_cognitive(),
Some(serde_json::json!({
"items": [{ "content": "Important batch memory content" }]
})),
).await;
let value = result.unwrap();
let results = value["results"].as_array().unwrap();
assert!(results[0]["importanceScore"].is_number());
}
#[tokio::test]
async fn test_no_content_no_items_fails() {
let (storage, _dir) = test_storage().await;
let args = serde_json::json!({ "tags": ["orphan"] });
let result = execute(&storage, &test_cognitive(), Some(args)).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("content"));
}
}