chore: license AGPL-3.0, zero clippy warnings, CHANGELOG through v1.6.0

License:
- Replace MIT/Apache-2.0 with AGPL-3.0-only across all crates and npm packages
- Replace LICENSE file with official GNU AGPL-3.0 text
- Remove LICENSE-MIT and LICENSE-APACHE

Code quality:
- Fix all 44 clippy warnings (zero remaining)
- Collapsible if statements, redundant closures, manual Option::map
- Remove duplicate #[allow(dead_code)] attributes in deprecated tool modules
- Add Default impl for CognitiveEngine
- Replace manual sort_by with sort_by_key

Documentation:
- Update CHANGELOG with v1.2.0, v1.3.0, v1.5.0, v1.6.0 entries
- Update README with v1.6.0 highlights and accurate stats (52K lines, 1100+ tests)
- Add fastembed-rs/ to .gitignore
- Add fastembed-rs to workspace exclude

1115 tests passing, zero warnings, RUSTFLAGS="-Dwarnings" clean.
This commit is contained in:
Sam Valladares 2026-02-19 03:00:39 -06:00
parent 495a88331f
commit ce520bb246
40 changed files with 953 additions and 424 deletions

View file

@ -226,19 +226,19 @@ pub async fn execute(
// ====================================================================
// STAGE 5: Context matching (Tulving 1973 encoding specificity)
// ====================================================================
if let Some(ref topics) = args.context_topics {
if !topics.is_empty() {
let retrieval_ctx = EncodingContext::new()
.with_topical(TopicalContext::with_topics(topics.clone()));
if let Ok(cog) = cognitive.try_lock() {
for result in &mut filtered_results {
// Build encoding context from memory's tags
let encoding_ctx = EncodingContext::new()
.with_topical(TopicalContext::with_topics(result.node.tags.clone()));
let context_score = cog.context_matcher.match_contexts(&encoding_ctx, &retrieval_ctx);
// Blend: context match boosts relevance up to +30%
result.combined_score *= 1.0 + (context_score as f32 * 0.3);
}
if let Some(ref topics) = args.context_topics
&& !topics.is_empty()
{
let retrieval_ctx = EncodingContext::new()
.with_topical(TopicalContext::with_topics(topics.clone()));
if let Ok(cog) = cognitive.try_lock() {
for result in &mut filtered_results {
// Build encoding context from memory's tags
let encoding_ctx = EncodingContext::new()
.with_topical(TopicalContext::with_topics(result.node.tags.clone()));
let context_score = cog.context_matcher.match_contexts(&encoding_ctx, &retrieval_ctx);
// Blend: context match boosts relevance up to +30%
result.combined_score *= 1.0 + (context_score as f32 * 0.3);
}
}
}
@ -270,23 +270,23 @@ pub async fn execute(
// STAGE 5B: Retrieval competition (Anderson et al. 1994)
// ====================================================================
let mut suppressed_count = 0_usize;
if filtered_results.len() > 1 {
if let Ok(mut cog) = cognitive.try_lock() {
let candidates: Vec<CompetitionCandidate> = filtered_results
.iter()
.map(|r| CompetitionCandidate {
memory_id: r.node.id.clone(),
relevance_score: r.combined_score as f64,
similarity_to_query: r.semantic_score.unwrap_or(0.0) as f64,
})
.collect();
if let Some(result) = cog.competition_mgr.run_competition(&candidates, 0.7) {
// Apply suppression: losers get penalized
for suppressed_id in &result.suppressed_ids {
if let Some(r) = filtered_results.iter_mut().find(|r| &r.node.id == suppressed_id) {
r.combined_score *= 0.85; // 15% suppression penalty
suppressed_count += 1;
}
if filtered_results.len() > 1
&& let Ok(mut cog) = cognitive.try_lock()
{
let candidates: Vec<CompetitionCandidate> = filtered_results
.iter()
.map(|r| CompetitionCandidate {
memory_id: r.node.id.clone(),
relevance_score: r.combined_score as f64,
similarity_to_query: r.semantic_score.unwrap_or(0.0) as f64,
})
.collect();
if let Some(result) = cog.competition_mgr.run_competition(&candidates, 0.7) {
// Apply suppression: losers get penalized
for suppressed_id in &result.suppressed_ids {
if let Some(r) = filtered_results.iter_mut().find(|r| &r.node.id == suppressed_id) {
r.combined_score *= 0.85; // 15% suppression penalty
suppressed_count += 1;
}
}
}