Implement Open Source edition limits and feature restrictions

- Add 5 mutation types (paraphrase, noise, tone_shift, prompt_injection, custom)
- Cap mutations at 50 per test run
- Force sequential execution only
- Disable GitHub Actions integration (Cloud feature)
- Add upgrade prompts throughout CLI
- Update README with feature comparison
- Add limits.py module for centralized limit management
- Add cloud and limits CLI commands
- Update all documentation with Cloud upgrade messaging
This commit is contained in:
Entropix 2025-12-29 00:11:02 +08:00
parent 2016be238d
commit 7b75fc9530
47 changed files with 3560 additions and 1012 deletions

View file

@ -14,4 +14,3 @@ pyo3.workspace = true
rayon.workspace = true
serde.workspace = true
serde_json.workspace = true

21
rust/pyproject.toml Normal file
View file

@ -0,0 +1,21 @@
[build-system]
requires = ["maturin>=1.4,<2.0"]
build-backend = "maturin"
[project]
name = "entropix_rust"
version = "0.1.0"
description = "High-performance Rust extensions for Entropix"
requires-python = ">=3.9"
classifiers = [
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Rust",
"License :: OSI Approved :: Apache Software License",
]
[tool.maturin]
features = ["pyo3/extension-module"]
module-name = "entropix_rust"

View file

@ -34,10 +34,10 @@ fn calculate_robustness_score(
if total == 0 {
return 0.0;
}
let weighted_sum = semantic_weight * semantic_passed as f64
let weighted_sum = semantic_weight * semantic_passed as f64
+ deterministic_weight * deterministic_passed as f64;
weighted_sum / total as f64
}
@ -52,18 +52,18 @@ fn calculate_weighted_score(
if results.is_empty() {
return 0.0;
}
let total_weight: f64 = results.iter().map(|(_, w)| w).sum();
let passed_weight: f64 = results
.iter()
.filter(|(passed, _)| *passed)
.map(|(_, w)| w)
.sum();
if total_weight == 0.0 {
return 0.0;
}
passed_weight / total_weight
}
@ -96,20 +96,20 @@ fn parallel_process_mutations(
fn levenshtein_distance(s1: &str, s2: &str) -> usize {
let len1 = s1.chars().count();
let len2 = s2.chars().count();
if len1 == 0 {
return len2;
}
if len2 == 0 {
return len1;
}
let s1_chars: Vec<char> = s1.chars().collect();
let s2_chars: Vec<char> = s2.chars().collect();
let mut prev_row: Vec<usize> = (0..=len2).collect();
let mut curr_row: Vec<usize> = vec![0; len2 + 1];
for i in 1..=len1 {
curr_row[0] = i;
for j in 1..=len2 {
@ -121,7 +121,7 @@ fn levenshtein_distance(s1: &str, s2: &str) -> usize {
}
std::mem::swap(&mut prev_row, &mut curr_row);
}
prev_row[len2]
}
@ -130,11 +130,11 @@ fn levenshtein_distance(s1: &str, s2: &str) -> usize {
fn string_similarity(s1: &str, s2: &str) -> f64 {
let distance = levenshtein_distance(s1, s2);
let max_len = std::cmp::max(s1.chars().count(), s2.chars().count());
if max_len == 0 {
return 1.0;
}
1.0 - (distance as f64 / max_len as f64)
}
@ -183,4 +183,3 @@ mod tests {
assert!(sim > 0.7 && sim < 0.9);
}
}

View file

@ -16,7 +16,7 @@ where
.num_threads(max_concurrency)
.build()
.unwrap_or_else(|_| rayon::ThreadPoolBuilder::new().build().unwrap());
pool.install(|| {
items.into_par_iter().map(f).collect()
})
@ -39,7 +39,7 @@ where
.chunks(batch_size)
.map(|chunk| chunk.to_vec())
.collect();
batches
.into_par_iter()
.flat_map(|batch| f(&batch))
@ -57,4 +57,3 @@ mod tests {
assert_eq!(results, vec![2, 4, 6, 8, 10]);
}
}

View file

@ -51,7 +51,7 @@ pub fn calculate_statistics(results: &[MutationResult]) -> TestStatistics {
let total = results.len();
let passed = results.iter().filter(|r| r.passed).count();
let failed = total - passed;
// Calculate robustness score
let total_weight: f64 = results.iter().map(|r| r.weight).sum();
let passed_weight: f64 = results
@ -59,27 +59,27 @@ pub fn calculate_statistics(results: &[MutationResult]) -> TestStatistics {
.filter(|r| r.passed)
.map(|r| r.weight)
.sum();
let robustness_score = if total_weight > 0.0 {
passed_weight / total_weight
} else {
0.0
};
// Calculate latency statistics
let mut latencies: Vec<f64> = results.iter().map(|r| r.latency_ms).collect();
latencies.sort_by(|a, b| a.partial_cmp(b).unwrap());
let avg_latency = if !latencies.is_empty() {
latencies.iter().sum::<f64>() / latencies.len() as f64
} else {
0.0
};
let p50 = percentile(&latencies, 50);
let p95 = percentile(&latencies, 95);
let p99 = percentile(&latencies, 99);
// Statistics by mutation type
let mut type_stats = std::collections::HashMap::new();
for result in results {
@ -91,7 +91,7 @@ pub fn calculate_statistics(results: &[MutationResult]) -> TestStatistics {
entry.1 += 1;
}
}
let by_type: Vec<TypeStatistics> = type_stats
.into_iter()
.map(|(mutation_type, (total, passed))| TypeStatistics {
@ -101,7 +101,7 @@ pub fn calculate_statistics(results: &[MutationResult]) -> TestStatistics {
pass_rate: passed as f64 / total as f64,
})
.collect();
TestStatistics {
total_mutations: total,
passed_mutations: passed,
@ -120,7 +120,7 @@ fn percentile(sorted_values: &[f64], p: usize) -> f64 {
if sorted_values.is_empty() {
return 0.0;
}
let index = (p as f64 / 100.0 * (sorted_values.len() - 1) as f64).round() as usize;
sorted_values[index.min(sorted_values.len() - 1)]
}
@ -161,7 +161,7 @@ mod tests {
checks: vec![],
},
];
let stats = calculate_statistics(&results);
assert_eq!(stats.total_mutations, 3);
assert_eq!(stats.passed_mutations, 2);
@ -169,4 +169,3 @@ mod tests {
assert!(stats.robustness_score > 0.5);
}
}