Release prep: 54 engines, self-hosted signatures, i18n, dashboard updates

This commit is contained in:
DmitrL-dev 2026-03-23 16:45:40 +10:00
parent 694e32be26
commit 41cbfd6e0a
178 changed files with 36008 additions and 399 deletions

View file

@ -0,0 +1,179 @@
package soc
import (
"math"
"sync"
"time"
)
// AnomalyDetector implements §5 — statistical baseline anomaly detection.
// Uses exponentially weighted moving average (EWMA) with Z-score thresholds.
type AnomalyDetector struct {
mu sync.RWMutex
baselines map[string]*Baseline
alerts []AnomalyAlert
zThreshold float64 // Z-score threshold for anomaly (default: 3.0)
maxAlerts int
}
// Baseline tracks statistical properties of a metric.
type Baseline struct {
Name string `json:"name"`
Mean float64 `json:"mean"`
Variance float64 `json:"variance"`
StdDev float64 `json:"std_dev"`
Count int64 `json:"count"`
LastValue float64 `json:"last_value"`
LastUpdate time.Time `json:"last_update"`
Alpha float64 `json:"alpha"` // EWMA smoothing factor
}
// AnomalyAlert is raised when a metric deviates beyond the threshold.
type AnomalyAlert struct {
ID string `json:"id"`
Metric string `json:"metric"`
Value float64 `json:"value"`
Expected float64 `json:"expected"`
StdDev float64 `json:"std_dev"`
ZScore float64 `json:"z_score"`
Severity string `json:"severity"`
Timestamp time.Time `json:"timestamp"`
}
// NewAnomalyDetector creates the detector with default Z-score threshold of 3.0.
func NewAnomalyDetector() *AnomalyDetector {
return &AnomalyDetector{
baselines: make(map[string]*Baseline),
zThreshold: 3.0,
maxAlerts: 500,
}
}
// SetThreshold configures the Z-score anomaly threshold.
func (d *AnomalyDetector) SetThreshold(z float64) {
d.mu.Lock()
defer d.mu.Unlock()
d.zThreshold = z
}
// Observe records a new data point for a metric and checks for anomalies.
// Returns an AnomalyAlert if the value exceeds the threshold, nil otherwise.
func (d *AnomalyDetector) Observe(metric string, value float64) *AnomalyAlert {
d.mu.Lock()
defer d.mu.Unlock()
b, exists := d.baselines[metric]
if !exists {
// First observation: initialize baseline
d.baselines[metric] = &Baseline{
Name: metric,
Mean: value,
Count: 1,
LastValue: value,
LastUpdate: time.Now(),
Alpha: 0.1, // EWMA smoothing factor
}
return nil
}
b.Count++
b.LastValue = value
b.LastUpdate = time.Now()
// Need minimum observations for meaningful statistics
if b.Count < 10 {
// Update running variance (Welford's online algorithm)
// delta MUST be computed BEFORE updating the mean
delta := value - b.Mean
b.Mean = b.Mean + delta/float64(b.Count)
delta2 := value - b.Mean
b.Variance = b.Variance + (delta*delta2-b.Variance)/float64(b.Count)
b.StdDev = math.Sqrt(b.Variance)
return nil
}
// Calculate Z-score
if b.StdDev == 0 {
b.StdDev = 0.001 // prevent division by zero
}
zScore := math.Abs(value-b.Mean) / b.StdDev
// Update baseline using EWMA
b.Mean = b.Alpha*value + (1-b.Alpha)*b.Mean
delta := value - b.Mean
b.Variance = b.Alpha*(delta*delta) + (1-b.Alpha)*b.Variance
b.StdDev = math.Sqrt(b.Variance)
// Check threshold
if zScore >= d.zThreshold {
alert := &AnomalyAlert{
ID: genID("anomaly"),
Metric: metric,
Value: value,
Expected: b.Mean,
StdDev: b.StdDev,
ZScore: math.Round(zScore*100) / 100,
Severity: d.classifySeverity(zScore),
Timestamp: time.Now(),
}
if len(d.alerts) >= d.maxAlerts {
copy(d.alerts, d.alerts[1:])
d.alerts[len(d.alerts)-1] = *alert
} else {
d.alerts = append(d.alerts, *alert)
}
return alert
}
return nil
}
// classifySeverity maps Z-score to severity level.
func (d *AnomalyDetector) classifySeverity(z float64) string {
switch {
case z >= 5.0:
return "CRITICAL"
case z >= 4.0:
return "HIGH"
case z >= 3.0:
return "MEDIUM"
default:
return "LOW"
}
}
// Alerts returns recent anomaly alerts.
func (d *AnomalyDetector) Alerts(limit int) []AnomalyAlert {
d.mu.RLock()
defer d.mu.RUnlock()
if limit <= 0 || limit > len(d.alerts) {
limit = len(d.alerts)
}
start := len(d.alerts) - limit
result := make([]AnomalyAlert, limit)
copy(result, d.alerts[start:])
return result
}
// Baselines returns all tracked metric baselines.
func (d *AnomalyDetector) Baselines() map[string]Baseline {
d.mu.RLock()
defer d.mu.RUnlock()
result := make(map[string]Baseline, len(d.baselines))
for k, v := range d.baselines {
result[k] = *v
}
return result
}
// Stats returns detector statistics.
func (d *AnomalyDetector) Stats() map[string]any {
d.mu.RLock()
defer d.mu.RUnlock()
return map[string]any{
"metrics_tracked": len(d.baselines),
"total_alerts": len(d.alerts),
"z_threshold": d.zThreshold,
}
}

View file

@ -0,0 +1,101 @@
package soc
import (
"testing"
)
func TestAnomalyDetector_NoAlertDuringWarmup(t *testing.T) {
d := NewAnomalyDetector()
// First 10 observations are warmup — should never alert
for i := 0; i < 10; i++ {
alert := d.Observe("cpu", 50.0)
if alert != nil {
t.Fatalf("should not alert during warmup, got alert at observation %d", i)
}
}
}
func TestAnomalyDetector_NormalValues(t *testing.T) {
d := NewAnomalyDetector()
// Build baseline with consistent values
for i := 0; i < 20; i++ {
d.Observe("rps", 100.0+float64(i%3)) // values: 100, 101, 102
}
// Normal value should not trigger
alert := d.Observe("rps", 103.0)
if alert != nil {
t.Fatal("normal value should not trigger anomaly")
}
}
func TestAnomalyDetector_ExtremeValue(t *testing.T) {
d := NewAnomalyDetector()
// Build tight baseline
for i := 0; i < 30; i++ {
d.Observe("latency_ms", 10.0)
}
// Extreme spike should trigger
alert := d.Observe("latency_ms", 1000.0)
if alert == nil {
t.Fatal("extreme value should trigger anomaly")
}
if alert.Severity != "CRITICAL" {
t.Fatalf("extreme deviation should be CRITICAL, got %s", alert.Severity)
}
if alert.ZScore < 3.0 {
t.Fatalf("Z-score should be >= 3.0, got %f", alert.ZScore)
}
}
func TestAnomalyDetector_CustomThreshold(t *testing.T) {
d := NewAnomalyDetector()
d.SetThreshold(2.0) // More sensitive
for i := 0; i < 30; i++ {
d.Observe("mem", 50.0)
}
// Moderate deviation should trigger with lower threshold
alert := d.Observe("mem", 80.0)
if alert == nil {
t.Fatal("moderate deviation should trigger with Z=2.0 threshold")
}
}
func TestAnomalyDetector_Baselines(t *testing.T) {
d := NewAnomalyDetector()
d.Observe("metric_a", 10.0)
d.Observe("metric_b", 20.0)
baselines := d.Baselines()
if len(baselines) != 2 {
t.Fatalf("expected 2 baselines, got %d", len(baselines))
}
}
func TestAnomalyDetector_Alerts(t *testing.T) {
d := NewAnomalyDetector()
for i := 0; i < 30; i++ {
d.Observe("test", 10.0)
}
d.Observe("test", 10000.0) // trigger alert
alerts := d.Alerts(10)
if len(alerts) != 1 {
t.Fatalf("expected 1 alert, got %d", len(alerts))
}
}
func TestAnomalyDetector_Stats(t *testing.T) {
d := NewAnomalyDetector()
d.Observe("x", 1.0)
stats := d.Stats()
if stats["metrics_tracked"].(int) != 1 {
t.Fatal("should track 1 metric")
}
if stats["z_threshold"].(float64) != 3.0 {
t.Fatal("default threshold should be 3.0")
}
}

View file

@ -0,0 +1,272 @@
package soc
import (
"math"
"sync"
"time"
)
// AlertCluster groups related SOC events using temporal + categorical similarity.
// Phase 1: temporal+session_id fallback (cold start).
// Phase 2: embedding-based DBSCAN when enough events accumulated.
//
// Cold start strategy (§7.6):
//
// fallback: temporal_clustering
// timeout: 5m — force embedding mode after 5 minutes even if <50 events
// min_events_for_embedding: 50
type AlertCluster struct {
ID string `json:"id"`
Events []string `json:"events"` // Event IDs
Category string `json:"category"` // Dominant category
Severity string `json:"severity"` // Max severity
Source string `json:"source"` // Dominant source
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
// ClusterEngine groups related alerts using configurable strategies.
type ClusterEngine struct {
mu sync.RWMutex
clusters map[string]*AlertCluster
config ClusterConfig
// Cold start tracking
startTime time.Time
eventCount int
mode ClusterMode
}
// ClusterConfig holds Alert Clustering parameters.
type ClusterConfig struct {
// Cold start (§7.6)
MinEventsForEmbedding int `yaml:"min_events_for_embedding" json:"min_events_for_embedding"`
ColdStartTimeout time.Duration `yaml:"cold_start_timeout" json:"cold_start_timeout"`
// Temporal clustering parameters
TemporalWindow time.Duration `yaml:"temporal_window" json:"temporal_window"` // Group events within this window
MaxClusterSize int `yaml:"max_cluster_size" json:"max_cluster_size"`
// Embedding clustering parameters (Phase 2)
SimilarityThreshold float64 `yaml:"similarity_threshold" json:"similarity_threshold"` // 0.0-1.0
EmbeddingModel string `yaml:"embedding_model" json:"embedding_model"` // e.g., "all-MiniLM-L6-v2"
}
// DefaultClusterConfig returns the default clustering configuration (§7.6).
func DefaultClusterConfig() ClusterConfig {
return ClusterConfig{
MinEventsForEmbedding: 50,
ColdStartTimeout: 5 * time.Minute,
TemporalWindow: 2 * time.Minute,
MaxClusterSize: 50,
SimilarityThreshold: 0.75,
EmbeddingModel: "all-MiniLM-L6-v2",
}
}
// ClusterMode tracks the engine operating mode.
type ClusterMode int
const (
ClusterModeColdStart ClusterMode = iota // Temporal+session_id fallback
ClusterModeEmbedding // Full embedding-based clustering
)
func (m ClusterMode) String() string {
switch m {
case ClusterModeEmbedding:
return "embedding"
default:
return "cold_start"
}
}
// NewClusterEngine creates a cluster engine with the given config.
func NewClusterEngine(config ClusterConfig) *ClusterEngine {
return &ClusterEngine{
clusters: make(map[string]*AlertCluster),
config: config,
startTime: time.Now(),
mode: ClusterModeColdStart,
}
}
// AddEvent assigns an event to a cluster. Returns the cluster ID.
func (ce *ClusterEngine) AddEvent(event SOCEvent) string {
ce.mu.Lock()
defer ce.mu.Unlock()
ce.eventCount++
// Check if we should transition to embedding mode
if ce.mode == ClusterModeColdStart {
if ce.eventCount >= ce.config.MinEventsForEmbedding ||
time.Since(ce.startTime) >= ce.config.ColdStartTimeout {
ce.mode = ClusterModeEmbedding
}
}
// Phase 2: Embedding/semantic clustering (DBSCAN-inspired)
if ce.mode == ClusterModeEmbedding {
clusterID := ce.findSemanticCluster(event)
if clusterID != "" {
return clusterID
}
}
// Fallback: Temporal + category clustering (Phase 1)
clusterID := ce.findOrCreateTemporalCluster(event)
return clusterID
}
// findSemanticCluster uses cosine similarity of event descriptions to find matching clusters.
// This is a simplified DBSCAN-inspired approach that works without an external ML model.
func (ce *ClusterEngine) findSemanticCluster(event SOCEvent) string {
if event.Description == "" {
return ""
}
eventVec := textToVector(event.Description)
bestScore := 0.0
bestCluster := ""
for id, cluster := range ce.clusters {
if len(cluster.Events) >= ce.config.MaxClusterSize {
continue
}
// Use cluster category + source as proxy embedding when no ML model
clusterVec := textToVector(cluster.Category + " " + cluster.Source)
sim := cosineSimilarity(eventVec, clusterVec)
if sim > ce.config.SimilarityThreshold && sim > bestScore {
bestScore = sim
bestCluster = id
}
}
if bestCluster != "" {
c := ce.clusters[bestCluster]
c.Events = append(c.Events, event.ID)
c.UpdatedAt = time.Now()
if event.Severity.Rank() > EventSeverity(c.Severity).Rank() {
c.Severity = string(event.Severity)
}
return bestCluster
}
return ""
}
// textToVector creates a simple character-frequency vector for cosine similarity.
// Serves as fallback when no external embedding model is available.
func textToVector(text string) map[rune]float64 {
vec := make(map[rune]float64)
for _, r := range text {
if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r == '_' {
vec[r]++
}
}
return vec
}
// cosineSimilarity computes cosine similarity between two sparse vectors.
func cosineSimilarity(a, b map[rune]float64) float64 {
dot := 0.0
magA := 0.0
magB := 0.0
for k, v := range a {
magA += v * v
if bv, ok := b[k]; ok {
dot += v * bv
}
}
for _, v := range b {
magB += v * v
}
if magA == 0 || magB == 0 {
return 0
}
return dot / (math.Sqrt(magA) * math.Sqrt(magB))
}
// findOrCreateTemporalCluster groups by (category + source) within temporal window.
func (ce *ClusterEngine) findOrCreateTemporalCluster(event SOCEvent) string {
now := time.Now()
key := string(event.Source) + ":" + event.Category
// Search existing clusters within temporal window
for id, cluster := range ce.clusters {
if cluster.Category == event.Category &&
cluster.Source == string(event.Source) &&
now.Sub(cluster.UpdatedAt) <= ce.config.TemporalWindow &&
len(cluster.Events) < ce.config.MaxClusterSize {
// Add to existing cluster
cluster.Events = append(cluster.Events, event.ID)
cluster.UpdatedAt = now
if event.Severity.Rank() > EventSeverity(cluster.Severity).Rank() {
cluster.Severity = string(event.Severity)
}
return id
}
}
// Create new cluster
clusterID := "clst-" + key + "-" + now.Format("150405")
ce.clusters[clusterID] = &AlertCluster{
ID: clusterID,
Events: []string{event.ID},
Category: event.Category,
Severity: string(event.Severity),
Source: string(event.Source),
CreatedAt: now,
UpdatedAt: now,
}
return clusterID
}
// Stats returns clustering statistics.
func (ce *ClusterEngine) Stats() map[string]any {
ce.mu.RLock()
defer ce.mu.RUnlock()
totalEvents := 0
maxSize := 0
for _, c := range ce.clusters {
totalEvents += len(c.Events)
if len(c.Events) > maxSize {
maxSize = len(c.Events)
}
}
avgSize := 0.0
if len(ce.clusters) > 0 {
avgSize = math.Round(float64(totalEvents)/float64(len(ce.clusters))*100) / 100
}
uiHint := "Smart clustering active"
if ce.mode == ClusterModeColdStart {
uiHint = "Clustering warming up..."
}
return map[string]any{
"mode": ce.mode.String(),
"ui_hint": uiHint,
"total_clusters": len(ce.clusters),
"total_events": totalEvents,
"avg_cluster_size": avgSize,
"max_cluster_size": maxSize,
"events_processed": ce.eventCount,
"embedding_model": ce.config.EmbeddingModel,
"cold_start_threshold": ce.config.MinEventsForEmbedding,
}
}
// Clusters returns all current clusters.
func (ce *ClusterEngine) Clusters() []*AlertCluster {
ce.mu.RLock()
defer ce.mu.RUnlock()
result := make([]*AlertCluster, 0, len(ce.clusters))
for _, c := range ce.clusters {
result = append(result, c)
}
return result
}

View file

@ -6,18 +6,23 @@ import (
)
// SOCCorrelationRule defines a time-windowed correlation rule for SOC events.
// Unlike oracle.CorrelationRule (pattern-based), SOC rules operate on event
// categories within a sliding time window.
// Supports two modes:
// - Co-occurrence: RequiredCategories must all appear within TimeWindow (unordered)
// - Temporal sequence: SequenceCategories must appear in ORDER within TimeWindow
type SOCCorrelationRule struct {
ID string `json:"id"`
Name string `json:"name"`
RequiredCategories []string `json:"required_categories"` // Event categories that must co-occur
MinEvents int `json:"min_events"` // Minimum distinct events to trigger
TimeWindow time.Duration `json:"time_window"` // Sliding window for temporal correlation
Severity EventSeverity `json:"severity"` // Resulting incident severity
RequiredCategories []string `json:"required_categories"` // Co-occurrence (unordered)
SequenceCategories []string `json:"sequence_categories"` // Temporal sequence (ordered A→B→C)
SeverityTrend string `json:"severity_trend,omitempty"` // "ascending" — detect escalation pattern
TrendCategory string `json:"trend_category,omitempty"` // Category to track for severity trend
MinEvents int `json:"min_events"`
TimeWindow time.Duration `json:"time_window"`
Severity EventSeverity `json:"severity"`
KillChainPhase string `json:"kill_chain_phase"`
MITREMapping []string `json:"mitre_mapping"`
Description string `json:"description"`
CrossSensor bool `json:"cross_sensor"`
}
// DefaultSOCCorrelationRules returns built-in SOC correlation rules (§7 from spec).
@ -100,6 +105,98 @@ func DefaultSOCCorrelationRules() []SOCCorrelationRule {
MITREMapping: []string{"T1546", "T1053"},
Description: "Jailbreak followed by persistence mechanism indicates attacker establishing long-term foothold.",
},
{
ID: "SOC-CR-008",
Name: "Slow Data Exfiltration",
RequiredCategories: []string{"pii_leak", "exfiltration"},
MinEvents: 5,
TimeWindow: 1 * time.Hour,
Severity: SeverityHigh,
KillChainPhase: "Exfiltration",
MITREMapping: []string{"T1041", "T1048"},
Description: "Multiple small PII leaks over extended period from same session. Low-and-slow exfiltration evades threshold-based detection.",
},
// --- Temporal sequence rules (ordered A→B→C) ---
{
ID: "SOC-CR-009",
Name: "Recon→Exploit→Exfil Chain",
SequenceCategories: []string{"reconnaissance", "prompt_injection", "exfiltration"},
MinEvents: 3,
TimeWindow: 30 * time.Minute,
Severity: SeverityCritical,
KillChainPhase: "Full Kill Chain",
MITREMapping: []string{"T1595", "T1059", "T1041"},
Description: "Ordered sequence: reconnaissance followed by prompt injection followed by data exfiltration. Full kill chain attack in progress.",
},
{
ID: "SOC-CR-010",
Name: "Auth Spray→Bypass Sequence",
SequenceCategories: []string{"auth_bypass", "tool_abuse"},
MinEvents: 2,
TimeWindow: 10 * time.Minute,
Severity: SeverityHigh,
KillChainPhase: "Exploitation",
MITREMapping: []string{"T1110", "T1078"},
Description: "Authentication bypass attempt followed by tool abuse within 10 minutes. Credential compromise leading to privilege escalation.",
},
{
ID: "SOC-CR-011",
Name: "Cross-Sensor Session Attack",
MinEvents: 3,
TimeWindow: 15 * time.Minute,
Severity: SeverityCritical,
KillChainPhase: "Lateral Movement",
MITREMapping: []string{"T1021", "T1550"},
CrossSensor: true,
Description: "Same session_id seen across 3+ distinct sensors within 15 minutes. Indicates a compromised session exploited from multiple attack vectors.",
},
// ── Lattice Integration Rules ──────────────────────────────────
{
ID: "SOC-CR-012",
Name: "TSA Chain Violation",
SequenceCategories: []string{"auth_bypass", "tool_abuse", "exfiltration"},
MinEvents: 3,
TimeWindow: 15 * time.Minute,
Severity: SeverityCritical,
KillChainPhase: "Actions on Objectives",
MITREMapping: []string{"T1078", "T1059", "T1048"},
Description: "Trust-Safety-Alignment chain violation: auth bypass followed by tool abuse and data exfiltration within 15 minutes. Full kill chain detected.",
},
{
ID: "SOC-CR-013",
Name: "GPS Early Warning",
RequiredCategories: []string{"anomaly", "exfiltration"},
MinEvents: 2,
TimeWindow: 10 * time.Minute,
Severity: SeverityHigh,
KillChainPhase: "Reconnaissance",
MITREMapping: []string{"T1595", "T1041"},
Description: "Guardrail-Perimeter-Surveillance early warning: anomaly detection followed by exfiltration attempt. Potential reconnaissance-to-extraction pipeline.",
},
{
ID: "SOC-CR-014",
Name: "MIRE Containment Activated",
SequenceCategories: []string{"prompt_injection", "jailbreak"},
MinEvents: 2,
TimeWindow: 5 * time.Minute,
Severity: SeverityCritical,
KillChainPhase: "Weaponization",
MITREMapping: []string{"T1059.007", "T1203"},
Description: "Monitor-Isolate-Respond-Evaluate containment: prompt injection escalated to jailbreak within 5 minutes. Immune system response required.",
},
// ── Severity Trend Rules ──────────────────────────────────────
{
ID: "SOC-CR-015",
Name: "Crescendo Escalation",
SeverityTrend: "ascending",
TrendCategory: "jailbreak",
MinEvents: 3,
TimeWindow: 15 * time.Minute,
Severity: SeverityCritical,
KillChainPhase: "Exploitation",
MITREMapping: []string{"T1059", "T1548"},
Description: "Crescendo attack: 3+ jailbreak attempts with ascending severity within 15 minutes. Gradual guardrail erosion detected.",
},
}
}
@ -152,6 +249,21 @@ func evaluateRule(rule SOCCorrelationRule, events []SOCEvent, now time.Time) *Co
return nil
}
// Severity trend: detect ascending severity in same-category events.
if rule.SeverityTrend == "ascending" && rule.TrendCategory != "" {
return evaluateSeverityTrendRule(rule, inWindow)
}
// Temporal sequence: check ordered occurrence (A→B→C within window).
if len(rule.SequenceCategories) > 0 {
return evaluateSequenceRule(rule, inWindow)
}
// Cross-sensor session attack: same session_id across 3+ distinct sources.
if rule.CrossSensor {
return evaluateCrossSensorRule(rule, inWindow)
}
// Special case: SOC-CR-002 (Coordinated Attack) — check distinct category count.
if len(rule.RequiredCategories) == 0 && rule.MinEvents > 0 {
return evaluateCoordinatedAttack(rule, inWindow)
@ -214,3 +326,137 @@ func evaluateCoordinatedAttack(rule SOCCorrelationRule, events []SOCEvent) *Corr
}
return nil
}
// evaluateCrossSensorRule detects the same session_id seen across N+ distinct sources/sensors.
// Triggers SOC-CR-011: indicates lateral movement or compromised session.
func evaluateCrossSensorRule(rule SOCCorrelationRule, events []SOCEvent) *CorrelationMatch {
// Group events by session_id, track distinct sources per session.
type sessionInfo struct {
sources map[EventSource]bool
events []SOCEvent
}
sessions := make(map[string]*sessionInfo)
for _, e := range events {
if e.SessionID == "" {
continue
}
si, ok := sessions[e.SessionID]
if !ok {
si = &sessionInfo{sources: make(map[EventSource]bool)}
sessions[e.SessionID] = si
}
si.sources[e.Source] = true
si.events = append(si.events, e)
}
for _, si := range sessions {
if len(si.sources) >= rule.MinEvents {
return &CorrelationMatch{
Rule: rule,
Events: si.events,
MatchedAt: time.Now(),
}
}
}
return nil
}
// evaluateSequenceRule checks for ordered temporal sequences (A→B→C).
// Events must appear in the specified order within the time window.
func evaluateSequenceRule(rule SOCCorrelationRule, events []SOCEvent) *CorrelationMatch {
// Sort events by timestamp (oldest first).
sorted := make([]SOCEvent, len(events))
copy(sorted, events)
sort.Slice(sorted, func(i, j int) bool {
return sorted[i].Timestamp.Before(sorted[j].Timestamp)
})
// Walk through events, matching each sequence step in order.
seqIdx := 0
var matchedEvents []SOCEvent
var firstTime time.Time
for _, e := range sorted {
if seqIdx >= len(rule.SequenceCategories) {
break
}
if e.Category == rule.SequenceCategories[seqIdx] {
if seqIdx == 0 {
firstTime = e.Timestamp
}
// Ensure all events are within the time window of the first event.
if seqIdx > 0 && e.Timestamp.Sub(firstTime) > rule.TimeWindow {
// Window exceeded — reset and try from this event.
seqIdx = 0
matchedEvents = nil
if e.Category == rule.SequenceCategories[0] {
firstTime = e.Timestamp
matchedEvents = append(matchedEvents, e)
seqIdx = 1
}
continue
}
matchedEvents = append(matchedEvents, e)
seqIdx++
}
}
// All sequence steps matched?
if seqIdx >= len(rule.SequenceCategories) {
return &CorrelationMatch{
Rule: rule,
Events: matchedEvents,
MatchedAt: time.Now(),
}
}
return nil
}
// evaluateSeverityTrendRule detects ascending severity pattern in same-category events.
// Example: jailbreak(LOW) → jailbreak(MEDIUM) → jailbreak(HIGH) within 15 min = CRESCENDO.
func evaluateSeverityTrendRule(rule SOCCorrelationRule, events []SOCEvent) *CorrelationMatch {
// Filter to target category only.
var categoryEvents []SOCEvent
for _, e := range events {
if e.Category == rule.TrendCategory {
categoryEvents = append(categoryEvents, e)
}
}
if len(categoryEvents) < rule.MinEvents {
return nil
}
// Sort by timestamp.
sort.Slice(categoryEvents, func(i, j int) bool {
return categoryEvents[i].Timestamp.Before(categoryEvents[j].Timestamp)
})
// Find longest ascending severity subsequence.
var bestRun []SOCEvent
var currentRun []SOCEvent
for _, e := range categoryEvents {
if len(currentRun) == 0 || e.Severity.Rank() > currentRun[len(currentRun)-1].Severity.Rank() {
currentRun = append(currentRun, e)
} else {
if len(currentRun) > len(bestRun) {
bestRun = currentRun
}
currentRun = []SOCEvent{e}
}
}
if len(currentRun) > len(bestRun) {
bestRun = currentRun
}
if len(bestRun) >= rule.MinEvents {
return &CorrelationMatch{
Rule: rule,
Events: bestRun,
MatchedAt: time.Now(),
}
}
return nil
}

View file

@ -139,7 +139,7 @@ func TestCorrelateEmptyInput(t *testing.T) {
func TestDefaultRuleCount(t *testing.T) {
rules := DefaultSOCCorrelationRules()
if len(rules) != 7 {
t.Errorf("expected 7 default rules, got %d", len(rules))
if len(rules) != 15 {
t.Errorf("expected 15 default rules, got %d", len(rules))
}
}

View file

@ -0,0 +1,59 @@
package soc
import "errors"
// Domain-level sentinel errors for the SOC subsystem.
// These replace string matching in HTTP handlers with proper errors.Is() checks.
var (
// ErrNotFound is returned when a requested entity (event, incident, sensor) does not exist.
ErrNotFound = errors.New("soc: not found")
// ErrAuthFailed is returned when sensor key validation fails (§17.3 T-01).
ErrAuthFailed = errors.New("soc: authentication failed")
// ErrRateLimited is returned when a sensor exceeds MaxEventsPerSecondPerSensor (§17.3).
ErrRateLimited = errors.New("soc: rate limit exceeded")
// ErrSecretDetected is returned when the Secret Scanner (Step 0) detects credentials
// in the event payload. This is an INVARIANT — cannot be disabled (§5.4).
ErrSecretDetected = errors.New("soc: secret scanner rejected")
// ErrInvalidInput is returned when event fields fail validation.
ErrInvalidInput = errors.New("soc: invalid input")
// ErrDraining is returned when the service is in drain mode (§15.7).
// HTTP handlers should return 503 Service Unavailable.
ErrDraining = errors.New("soc: service draining for update")
)
// ValidationError provides detailed field-level validation errors.
type ValidationError struct {
Field string `json:"field"`
Message string `json:"message"`
}
// ValidationErrors collects multiple field validation errors.
type ValidationErrors struct {
Errors []ValidationError `json:"errors"`
}
func (ve *ValidationErrors) Error() string {
if len(ve.Errors) == 0 {
return ErrInvalidInput.Error()
}
return ErrInvalidInput.Error() + ": " + ve.Errors[0].Message
}
func (ve *ValidationErrors) Unwrap() error {
return ErrInvalidInput
}
// Add appends a field validation error.
func (ve *ValidationErrors) Add(field, message string) {
ve.Errors = append(ve.Errors, ValidationError{Field: field, Message: message})
}
// HasErrors returns true if any validation errors were recorded.
func (ve *ValidationErrors) HasErrors() bool {
return len(ve.Errors) > 0
}

View file

@ -4,6 +4,7 @@
package soc
import (
"crypto/sha256"
"fmt"
"time"
)
@ -56,6 +57,7 @@ const (
SourceImmune EventSource = "immune"
SourceMicroSwarm EventSource = "micro-swarm"
SourceGoMCP EventSource = "gomcp"
SourceShadowAI EventSource = "shadow-ai"
SourceExternal EventSource = "external"
)
@ -64,6 +66,7 @@ const (
// Sensor → Secret Scanner (Step 0) → DIP → Decision Logger → Queue → Correlation.
type SOCEvent struct {
ID string `json:"id"`
TenantID string `json:"tenant_id,omitempty"`
Source EventSource `json:"source"`
SensorID string `json:"sensor_id"`
SensorKey string `json:"-"` // §17.3 T-01: pre-shared key (never serialized)
@ -74,6 +77,7 @@ type SOCEvent struct {
Description string `json:"description"`
Payload string `json:"payload,omitempty"` // Raw input for Secret Scanner Step 0
SessionID string `json:"session_id,omitempty"`
ContentHash string `json:"content_hash,omitempty"` // SHA-256 dedup key (§5.2)
DecisionHash string `json:"decision_hash,omitempty"` // SHA-256 chain link
Verdict Verdict `json:"verdict"`
ZeroGMode bool `json:"zero_g_mode,omitempty"` // §13.4: Strike Force operation tag
@ -81,10 +85,101 @@ type SOCEvent struct {
Metadata map[string]string `json:"metadata,omitempty"` // Extensible key-value pairs
}
// ComputeContentHash generates a SHA-256 hash from source+category+description+payload
// for content-based deduplication (§5.2 step 2).
func (e *SOCEvent) ComputeContentHash() string {
h := sha256.New()
fmt.Fprintf(h, "%s|%s|%s|%s", e.Source, e.Category, e.Description, e.Payload)
e.ContentHash = fmt.Sprintf("%x", h.Sum(nil))
return e.ContentHash
}
// KnownCategories is the set of recognized event categories.
// Events with unknown categories are still accepted but logged as warnings.
var KnownCategories = map[string]bool{
"jailbreak": true,
"prompt_injection": true,
"tool_abuse": true,
"exfiltration": true,
"pii_leak": true,
"auth_bypass": true,
"encoding": true,
"persistence": true,
"sensor_anomaly": true,
"dos": true,
"model_theft": true,
"supply_chain": true,
"data_poisoning": true,
"evasion": true,
"shadow_ai_usage": true,
"integration_health": true,
"other": true,
// GenAI EDR categories (SDD-001)
"genai_child_process": true,
"genai_sensitive_file_access": true,
"genai_unusual_domain": true,
"genai_credential_access": true,
"genai_persistence": true,
"genai_config_modification": true,
}
// ValidSeverity returns true if the severity is a known value.
func ValidSeverity(s EventSeverity) bool {
switch s {
case SeverityInfo, SeverityLow, SeverityMedium, SeverityHigh, SeverityCritical:
return true
}
return false
}
// ValidSource returns true if the source is a known value.
func ValidSource(s EventSource) bool {
switch s {
case SourceSentinelCore, SourceShield, SourceImmune, SourceMicroSwarm, SourceGoMCP, SourceShadowAI, SourceExternal:
return true
}
return false
}
// Validate checks all required fields and enum values.
// Returns nil if valid, or a *ValidationErrors with field-level details.
func (e SOCEvent) Validate() error {
ve := &ValidationErrors{}
if e.Source == "" {
ve.Add("source", "source is required")
} else if !ValidSource(e.Source) {
ve.Add("source", fmt.Sprintf("unknown source: %q (valid: sentinel-core, shield, immune, micro-swarm, gomcp, external)", e.Source))
}
if e.Severity == "" {
ve.Add("severity", "severity is required")
} else if !ValidSeverity(e.Severity) {
ve.Add("severity", fmt.Sprintf("unknown severity: %q (valid: INFO, LOW, MEDIUM, HIGH, CRITICAL)", e.Severity))
}
if e.Category == "" {
ve.Add("category", "category is required")
}
if e.Description == "" {
ve.Add("description", "description is required")
}
if e.Confidence < 0 || e.Confidence > 1 {
ve.Add("confidence", "confidence must be between 0.0 and 1.0")
}
if ve.HasErrors() {
return ve
}
return nil
}
// NewSOCEvent creates a new SOC event with auto-generated ID.
func NewSOCEvent(source EventSource, severity EventSeverity, category, description string) SOCEvent {
return SOCEvent{
ID: fmt.Sprintf("evt-%d-%s", time.Now().UnixMicro(), source),
ID: genID("evt"),
Source: source,
Severity: severity,
Category: category,
@ -122,3 +217,4 @@ func (e SOCEvent) WithVerdict(v Verdict) SOCEvent {
func (e SOCEvent) IsCritical() bool {
return e.Severity == SeverityHigh || e.Severity == SeverityCritical
}

View file

@ -0,0 +1,69 @@
package soc
import (
"log/slog"
"sync"
)
// EventBus implements a pub-sub event bus for real-time event streaming (SSE/WebSocket).
// Subscribers receive events as they are ingested via IngestEvent pipeline.
type EventBus struct {
mu sync.RWMutex
subscribers map[string]chan SOCEvent
bufSize int
}
// NewEventBus creates a new event bus with the given channel buffer size.
func NewEventBus(bufSize int) *EventBus {
if bufSize <= 0 {
bufSize = 100
}
return &EventBus{
subscribers: make(map[string]chan SOCEvent),
bufSize: bufSize,
}
}
// Subscribe creates a new subscriber channel. Returns channel and subscriber ID.
func (eb *EventBus) Subscribe(id string) <-chan SOCEvent {
eb.mu.Lock()
defer eb.mu.Unlock()
ch := make(chan SOCEvent, eb.bufSize)
eb.subscribers[id] = ch
return ch
}
// Unsubscribe removes a subscriber and closes its channel.
func (eb *EventBus) Unsubscribe(id string) {
eb.mu.Lock()
defer eb.mu.Unlock()
if ch, ok := eb.subscribers[id]; ok {
close(ch)
delete(eb.subscribers, id)
}
}
// Publish sends an event to all subscribers. Non-blocking — drops if subscriber is full.
func (eb *EventBus) Publish(event SOCEvent) {
eb.mu.RLock()
defer eb.mu.RUnlock()
slog.Info("eventbus: publish", "event_id", event.ID, "severity", event.Severity, "subscribers", len(eb.subscribers))
for id, ch := range eb.subscribers {
select {
case ch <- event:
slog.Info("eventbus: delivered", "subscriber", id, "event_id", event.ID)
default:
slog.Warn("eventbus: dropped (slow subscriber)", "subscriber", id, "event_id", event.ID)
}
}
}
// SubscriberCount returns the number of active subscribers.
func (eb *EventBus) SubscriberCount() int {
eb.mu.RLock()
defer eb.mu.RUnlock()
return len(eb.subscribers)
}

View file

@ -0,0 +1,449 @@
package soc
import (
"bytes"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"sync"
"time"
)
// ActionExecutor defines the interface for playbook action handlers.
// Each executor implements a specific action type (webhook, block_ip, log, etc.)
type ActionExecutor interface {
// Type returns the action type this executor handles (e.g., "webhook", "block_ip", "log").
Type() string
// Execute runs the action with the given parameters.
// Returns a result summary or error.
Execute(params ActionParams) (string, error)
}
// ActionParams contains the context passed to an action executor.
type ActionParams struct {
IncidentID string `json:"incident_id"`
Severity EventSeverity `json:"severity"`
Category string `json:"category"`
Description string `json:"description"`
EventCount int `json:"event_count"`
RuleName string `json:"rule_name"`
Extra map[string]any `json:"extra,omitempty"`
}
// ExecutorRegistry manages registered action executors.
type ExecutorRegistry struct {
mu sync.RWMutex
executors map[string]ActionExecutor
}
// NewExecutorRegistry creates a registry with the default LogExecutor.
func NewExecutorRegistry() *ExecutorRegistry {
reg := &ExecutorRegistry{
executors: make(map[string]ActionExecutor),
}
reg.Register(&LogExecutor{})
return reg
}
// Register adds an executor to the registry.
func (r *ExecutorRegistry) Register(exec ActionExecutor) {
r.mu.Lock()
defer r.mu.Unlock()
r.executors[exec.Type()] = exec
}
// Execute runs the named action. Returns error if executor not found.
func (r *ExecutorRegistry) Execute(actionType string, params ActionParams) (string, error) {
r.mu.RLock()
exec, ok := r.executors[actionType]
r.mu.RUnlock()
if !ok {
return "", fmt.Errorf("executor not found: %s", actionType)
}
return exec.Execute(params)
}
// List returns all registered executor types.
func (r *ExecutorRegistry) List() []string {
r.mu.RLock()
defer r.mu.RUnlock()
types := make([]string, 0, len(r.executors))
for t := range r.executors {
types = append(types, t)
}
return types
}
// --- Built-in Executors ---
// LogExecutor logs the action (default, always available).
type LogExecutor struct{}
func (e *LogExecutor) Type() string { return "log" }
func (e *LogExecutor) Execute(params ActionParams) (string, error) {
slog.Info("playbook action executed",
"type", "log",
"incident_id", params.IncidentID,
"severity", params.Severity,
"category", params.Category,
"rule", params.RuleName,
)
return "logged", nil
}
// WebhookExecutor sends HTTP POST to a webhook URL (Slack, PagerDuty, etc.)
type WebhookExecutor struct {
URL string
Headers map[string]string
client *http.Client
}
// NewWebhookExecutor creates a webhook executor for the given URL.
func NewWebhookExecutor(url string, headers map[string]string) *WebhookExecutor {
return &WebhookExecutor{
URL: url,
Headers: headers,
client: &http.Client{
Timeout: 10 * time.Second,
},
}
}
func (e *WebhookExecutor) Type() string { return "webhook" }
func (e *WebhookExecutor) Execute(params ActionParams) (string, error) {
payload, err := json.Marshal(map[string]any{
"incident_id": params.IncidentID,
"severity": params.Severity,
"category": params.Category,
"description": params.Description,
"event_count": params.EventCount,
"rule_name": params.RuleName,
"timestamp": time.Now().Format(time.RFC3339),
"source": "sentinel-soc",
})
if err != nil {
return "", fmt.Errorf("webhook: marshal: %w", err)
}
req, err := http.NewRequest(http.MethodPost, e.URL, bytes.NewReader(payload))
if err != nil {
return "", fmt.Errorf("webhook: create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
for k, v := range e.Headers {
req.Header.Set(k, v)
}
resp, err := e.client.Do(req)
if err != nil {
slog.Error("webhook delivery failed", "url", e.URL, "error", err)
return "", fmt.Errorf("webhook: send: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
slog.Warn("webhook returned error", "url", e.URL, "status", resp.StatusCode)
return "", fmt.Errorf("webhook: HTTP %d", resp.StatusCode)
}
slog.Info("webhook delivered", "url", e.URL, "status", resp.StatusCode,
"incident_id", params.IncidentID)
return fmt.Sprintf("webhook: HTTP %d", resp.StatusCode), nil
}
// BlockIPExecutor stubs a firewall block action.
// In production, this would call a firewall API (iptables, AWS SG, etc.)
type BlockIPExecutor struct{}
func (e *BlockIPExecutor) Type() string { return "block_ip" }
func (e *BlockIPExecutor) Execute(params ActionParams) (string, error) {
ip, _ := params.Extra["ip"].(string)
if ip == "" {
return "", fmt.Errorf("block_ip: missing ip in extra params")
}
// TODO: Implement actual firewall API call
slog.Warn("block_ip action (stub)",
"ip", ip,
"incident_id", params.IncidentID,
)
return fmt.Sprintf("block_ip: %s (stub — implement firewall API)", ip), nil
}
// NotifyExecutor sends a formatted alert notification via HTTP POST.
// Supports Slack, Telegram, PagerDuty, or any webhook-compatible endpoint.
type NotifyExecutor struct {
DefaultURL string
Headers map[string]string
client *http.Client
}
// NewNotifyExecutor creates a notification executor with a default webhook URL.
func NewNotifyExecutor(url string) *NotifyExecutor {
return &NotifyExecutor{
DefaultURL: url,
client: &http.Client{Timeout: 10 * time.Second},
}
}
func (e *NotifyExecutor) Type() string { return "notify" }
func (e *NotifyExecutor) Execute(params ActionParams) (string, error) {
channel, _ := params.Extra["channel"].(string)
if channel == "" {
channel = "soc-alerts"
}
url := e.DefaultURL
if customURL, ok := params.Extra["webhook_url"].(string); ok && customURL != "" {
url = customURL
}
// Build structured alert payload (Slack-compatible format)
sevEmoji := map[EventSeverity]string{
SeverityCritical: "🔴", SeverityHigh: "🟠",
SeverityMedium: "🟡", SeverityLow: "🔵", SeverityInfo: "⚪",
}
emoji := sevEmoji[params.Severity]
if emoji == "" {
emoji = "⚠️"
}
payload := map[string]any{
"text": fmt.Sprintf("%s *[%s] %s*\nIncident: `%s` | Events: %d\n%s",
emoji, params.Severity, params.Category,
params.IncidentID, params.EventCount, params.Description),
"channel": channel,
"username": "SYNTREX SOC",
// Slack blocks for rich formatting
"blocks": []map[string]any{
{
"type": "section",
"text": map[string]string{
"type": "mrkdwn",
"text": fmt.Sprintf("%s *%s Alert — %s*", emoji, params.Severity, params.Category),
},
},
{
"type": "section",
"fields": []map[string]string{
{"type": "mrkdwn", "text": fmt.Sprintf("*Incident:*\n`%s`", params.IncidentID)},
{"type": "mrkdwn", "text": fmt.Sprintf("*Events:*\n%d", params.EventCount)},
{"type": "mrkdwn", "text": fmt.Sprintf("*Rule:*\n%s", params.RuleName)},
{"type": "mrkdwn", "text": fmt.Sprintf("*Severity:*\n%s", params.Severity)},
},
},
},
}
if url == "" {
// No webhook configured — log and succeed (graceful degradation)
slog.Info("notify: no webhook URL configured, logging alert",
"channel", channel, "incident_id", params.IncidentID, "severity", params.Severity)
return fmt.Sprintf("notify: logged to channel=%s (no webhook URL)", channel), nil
}
body, err := json.Marshal(payload)
if err != nil {
return "", fmt.Errorf("notify: marshal: %w", err)
}
req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(body))
if err != nil {
return "", fmt.Errorf("notify: create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
for k, v := range e.Headers {
req.Header.Set(k, v)
}
resp, err := e.client.Do(req)
if err != nil {
slog.Error("notify: delivery failed", "url", url, "error", err)
return "", fmt.Errorf("notify: send: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
return "", fmt.Errorf("notify: HTTP %d from %s", resp.StatusCode, url)
}
slog.Info("notify: alert delivered",
"channel", channel, "url", url, "status", resp.StatusCode,
"incident_id", params.IncidentID)
return fmt.Sprintf("notify: delivered to %s (HTTP %d)", channel, resp.StatusCode), nil
}
// QuarantineExecutor marks a session or IP as quarantined.
// Maintains an in-memory blocklist and logs quarantine actions.
type QuarantineExecutor struct {
mu sync.RWMutex
blocklist map[string]time.Time // IP/session → quarantine expiry
}
func NewQuarantineExecutor() *QuarantineExecutor {
return &QuarantineExecutor{
blocklist: make(map[string]time.Time),
}
}
func (e *QuarantineExecutor) Type() string { return "quarantine" }
func (e *QuarantineExecutor) Execute(params ActionParams) (string, error) {
scope, _ := params.Extra["scope"].(string)
if scope == "" {
scope = "session"
}
target, _ := params.Extra["target"].(string)
if target == "" {
target, _ = params.Extra["ip"].(string)
}
if target == "" {
target = params.IncidentID // Quarantine by incident
}
duration := 1 * time.Hour
if durStr, ok := params.Extra["duration"].(string); ok {
if d, err := time.ParseDuration(durStr); err == nil {
duration = d
}
}
e.mu.Lock()
e.blocklist[target] = time.Now().Add(duration)
e.mu.Unlock()
slog.Warn("quarantine: target isolated",
"scope", scope,
"target", target,
"duration", duration,
"incident_id", params.IncidentID,
"severity", params.Severity,
)
return fmt.Sprintf("quarantine: %s=%s isolated for %s", scope, target, duration), nil
}
// IsQuarantined checks if a target is currently quarantined.
func (e *QuarantineExecutor) IsQuarantined(target string) bool {
e.mu.RLock()
defer e.mu.RUnlock()
expiry, ok := e.blocklist[target]
if !ok {
return false
}
if time.Now().After(expiry) {
return false
}
return true
}
// QuarantinedTargets returns all currently active quarantines.
func (e *QuarantineExecutor) QuarantinedTargets() map[string]time.Time {
e.mu.RLock()
defer e.mu.RUnlock()
now := time.Now()
active := make(map[string]time.Time)
for target, expiry := range e.blocklist {
if now.Before(expiry) {
active[target] = expiry
}
}
return active
}
// EscalateExecutor auto-assigns incidents and fires escalation webhooks.
type EscalateExecutor struct {
EscalationURL string // Webhook URL for escalation alerts (PagerDuty, etc.)
client *http.Client
}
func NewEscalateExecutor(url string) *EscalateExecutor {
return &EscalateExecutor{
EscalationURL: url,
client: &http.Client{Timeout: 10 * time.Second},
}
}
func (e *EscalateExecutor) Type() string { return "escalate" }
func (e *EscalateExecutor) Execute(params ActionParams) (string, error) {
team, _ := params.Extra["team"].(string)
if team == "" {
team = "soc-team"
}
slog.Warn("escalate: incident escalated",
"team", team,
"incident_id", params.IncidentID,
"severity", params.Severity,
"category", params.Category,
)
// Fire escalation webhook if configured
if e.EscalationURL != "" {
payload, _ := json.Marshal(map[string]any{
"event_type": "escalation",
"incident_id": params.IncidentID,
"severity": params.Severity,
"category": params.Category,
"team": team,
"description": params.Description,
"timestamp": time.Now().Format(time.RFC3339),
"source": "syntrex-soc",
})
req, err := http.NewRequest(http.MethodPost, e.EscalationURL, bytes.NewReader(payload))
if err == nil {
req.Header.Set("Content-Type", "application/json")
if resp, err := e.client.Do(req); err == nil {
resp.Body.Close()
slog.Info("escalate: webhook delivered", "url", e.EscalationURL, "status", resp.StatusCode)
} else {
slog.Error("escalate: webhook failed", "url", e.EscalationURL, "error", err)
}
}
}
return fmt.Sprintf("escalate: assigned to team=%s", team), nil
}
// --- ExecutorActionHandler bridges PlaybookEngine → ExecutorRegistry ---
// ExecutorActionHandler implements ActionHandler by delegating to ExecutorRegistry.
// This is the bridge that makes playbook actions actually execute real handlers.
type ExecutorActionHandler struct {
Registry *ExecutorRegistry
}
func (h *ExecutorActionHandler) Handle(action PlaybookAction, incidentID string) error {
params := ActionParams{
IncidentID: incidentID,
Extra: make(map[string]any),
}
// Copy playbook action params to executor params
for k, v := range action.Params {
params.Extra[k] = v
}
result, err := h.Registry.Execute(action.Type, params)
if err != nil {
slog.Error("playbook action failed",
"type", action.Type,
"incident_id", incidentID,
"error", err,
)
return err
}
slog.Info("playbook action executed",
"type", action.Type,
"incident_id", incidentID,
"result", result,
)
return nil
}

View file

@ -0,0 +1,140 @@
package soc
// GenAI Process Monitoring & Detection
//
// Defines GenAI-specific process names, credential files, LLM DNS endpoints,
// and auto-response actions for GenAI EDR (SDD-001).
// GenAIProcessNames is the canonical list of GenAI IDE agent process names.
// Used by IMMUNE eBPF hooks and GoMCP SOC correlation rules.
var GenAIProcessNames = []string{
"claude",
"cursor",
"Cursor Helper",
"Cursor Helper (Plugin)",
"copilot",
"copilot-agent",
"windsurf",
"gemini",
"aider",
"continue",
"cline",
"codex",
"codex-cli",
}
// CredentialFiles is the list of sensitive files monitored for GenAI access.
// Access by a GenAI process or its descendants triggers CRITICAL alert.
var CredentialFiles = []string{
"credentials.db",
"Cookies",
"Login Data",
"logins.json",
"key3.db",
"key4.db",
"cert9.db",
".ssh/id_rsa",
".ssh/id_ed25519",
".aws/credentials",
".env",
".netrc",
}
// LLMDNSEndpoints is the list of known LLM API endpoints for DNS monitoring.
// Shield DNS monitor emits events when these domains are resolved.
var LLMDNSEndpoints = []string{
"api.anthropic.com",
"api.openai.com",
"chatgpt.com",
"claude.ai",
"generativelanguage.googleapis.com",
"gemini.googleapis.com",
"api.deepseek.com",
"api.together.xyz",
"api.groq.com",
"api.mistral.ai",
"api.cohere.com",
}
// GenAI event categories for the SOC event bus.
const (
CategoryGenAIChildProcess = "genai_child_process"
CategoryGenAISensitiveFile = "genai_sensitive_file_access"
CategoryGenAIUnusualDomain = "genai_unusual_domain"
CategoryGenAICredentialAccess = "genai_credential_access"
CategoryGenAIPersistence = "genai_persistence"
CategoryGenAIConfigModification = "genai_config_modification"
)
// AutoAction defines an automated response for GenAI EDR rules.
type AutoAction struct {
Type string `json:"type"` // "kill_process", "notify", "quarantine"
Target string `json:"target"` // Process ID, file path, etc.
Reason string `json:"reason"` // Human-readable justification
}
// IsGenAIProcess returns true if the process name matches a known GenAI agent.
func IsGenAIProcess(processName string) bool {
for _, name := range GenAIProcessNames {
if processName == name {
return true
}
}
return false
}
// IsCredentialFile returns true if the file path matches a known credential file.
func IsCredentialFile(filePath string) bool {
for _, cred := range CredentialFiles {
// Check if the file path ends with the credential file name
if len(filePath) >= len(cred) && filePath[len(filePath)-len(cred):] == cred {
return true
}
}
return false
}
// IsLLMEndpoint returns true if the domain matches a known LLM API endpoint.
func IsLLMEndpoint(domain string) bool {
for _, endpoint := range LLMDNSEndpoints {
if domain == endpoint {
return true
}
}
return false
}
// ProcessAncestry represents the process tree for Entity ID Intersection.
type ProcessAncestry struct {
PID int `json:"pid"`
Name string `json:"name"`
Executable string `json:"executable"`
Args []string `json:"args,omitempty"`
ParentPID int `json:"parent_pid"`
ParentName string `json:"parent_name"`
Ancestry []string `json:"ancestry"` // Full ancestry chain (oldest first)
EntityID string `json:"entity_id"`
}
// HasGenAIAncestor returns true if any process in the ancestry chain is a GenAI agent.
func (p *ProcessAncestry) HasGenAIAncestor() bool {
for _, ancestor := range p.Ancestry {
if IsGenAIProcess(ancestor) {
return true
}
}
return IsGenAIProcess(p.ParentName)
}
// GenAIAncestorName returns the name of the GenAI ancestor, or empty string if none.
func (p *ProcessAncestry) GenAIAncestorName() string {
if IsGenAIProcess(p.ParentName) {
return p.ParentName
}
for _, ancestor := range p.Ancestry {
if IsGenAIProcess(ancestor) {
return ancestor
}
}
return ""
}

View file

@ -0,0 +1,118 @@
package soc
import "time"
// GenAI EDR Detection Rules (SDD-001)
//
// 6 correlation rules for detecting GenAI agent threats,
// ported from Elastic's production detection ruleset.
// Rules SOC-CR-016 through SOC-CR-021.
// GenAICorrelationRules returns the 6 GenAI-specific detection rules.
// These are appended to DefaultSOCCorrelationRules() in the correlation engine.
func GenAICorrelationRules() []SOCCorrelationRule {
return []SOCCorrelationRule{
// R1: GenAI Child Process Execution (BBR — info-level building block)
{
ID: "SOC-CR-016",
Name: "GenAI Child Process Execution",
RequiredCategories: []string{CategoryGenAIChildProcess},
MinEvents: 1,
TimeWindow: 1 * time.Minute,
Severity: SeverityInfo,
KillChainPhase: "Execution",
MITREMapping: []string{"T1059"},
Description: "GenAI agent spawned a child process. Building block rule — provides visibility into GenAI process activity. Not actionable alone.",
},
// R2: GenAI Suspicious Descendant (sequence: child → suspicious tool)
{
ID: "SOC-CR-017",
Name: "GenAI Suspicious Descendant",
SequenceCategories: []string{CategoryGenAIChildProcess, "tool_abuse"},
MinEvents: 2,
TimeWindow: 5 * time.Minute,
Severity: SeverityMedium,
KillChainPhase: "Execution",
MITREMapping: []string{"T1059", "T1059.004"},
Description: "GenAI agent spawned a child process that performed suspicious activity (shell execution, network tool usage). Potential GenAI-facilitated attack.",
},
// R3: GenAI Unusual Domain Connection (new_terms equivalent)
{
ID: "SOC-CR-018",
Name: "GenAI Unusual Domain Connection",
RequiredCategories: []string{CategoryGenAIUnusualDomain},
MinEvents: 1,
TimeWindow: 5 * time.Minute,
Severity: SeverityMedium,
KillChainPhase: "Command and Control",
MITREMapping: []string{"T1071", "T1102"},
Description: "GenAI process connected to a previously-unseen domain. May indicate command-and-control channel established through GenAI agent.",
},
// R4: GenAI Credential Access (CRITICAL — auto kill_process)
{
ID: "SOC-CR-019",
Name: "GenAI Credential Access",
SequenceCategories: []string{CategoryGenAIChildProcess, CategoryGenAICredentialAccess},
MinEvents: 2,
TimeWindow: 2 * time.Minute,
Severity: SeverityCritical,
KillChainPhase: "Credential Access",
MITREMapping: []string{"T1555", "T1539", "T1552"},
Description: "CRITICAL: GenAI agent or its descendant accessed credential file (credentials.db, cookies, logins.json, SSH keys). Auto-response: kill_process. This matches Elastic's production detection for real credential theft by Claude/Cursor processes.",
},
// R5: GenAI Persistence Mechanism
{
ID: "SOC-CR-020",
Name: "GenAI Persistence Mechanism",
SequenceCategories: []string{CategoryGenAIChildProcess, CategoryGenAIPersistence},
MinEvents: 2,
TimeWindow: 10 * time.Minute,
Severity: SeverityHigh,
KillChainPhase: "Persistence",
MITREMapping: []string{"T1543", "T1547", "T1053"},
Description: "GenAI agent created a persistence mechanism (startup entry, LaunchAgent, cron job, systemd service). Establishing long-term foothold through AI agent.",
},
// R6: GenAI Config Modification by Non-GenAI Process
{
ID: "SOC-CR-021",
Name: "GenAI Config Modification",
RequiredCategories: []string{CategoryGenAIConfigModification},
MinEvents: 1,
TimeWindow: 5 * time.Minute,
Severity: SeverityMedium,
KillChainPhase: "Defense Evasion",
MITREMapping: []string{"T1562", "T1112"},
Description: "Non-GenAI process modified GenAI agent configuration (hooks, MCP servers, tool permissions). Potential defense evasion or supply-chain attack via config poisoning.",
},
}
}
// GenAIAutoActions returns the auto-response actions for GenAI rules.
// Currently only SOC-CR-019 (credential access) has auto-response.
func GenAIAutoActions() map[string]*AutoAction {
return map[string]*AutoAction{
"SOC-CR-019": {
Type: "kill_process",
Target: "genai_descendant",
Reason: "GenAI descendant accessing credential files — immediate termination required per SDD-001 M5",
},
}
}
// AllSOCCorrelationRules returns all correlation rules including GenAI rules.
// This combines the 15 default rules with the 6 GenAI rules = 21 total.
func AllSOCCorrelationRules() []SOCCorrelationRule {
rules := DefaultSOCCorrelationRules()
rules = append(rules, GenAICorrelationRules()...)
return rules
}
// EvaluateGenAIAutoResponse checks if a correlation match triggers an auto-response.
// Returns the AutoAction if one exists for the matched rule, or nil.
func EvaluateGenAIAutoResponse(match CorrelationMatch) *AutoAction {
actions := GenAIAutoActions()
if action, ok := actions[match.Rule.ID]; ok {
return action
}
return nil
}

View file

@ -0,0 +1,312 @@
package soc
import (
"testing"
"time"
)
// === GenAI Monitor Tests ===
func TestIsGenAIProcess(t *testing.T) {
tests := []struct {
name string
process string
expected bool
}{
{"claude detected", "claude", true},
{"cursor detected", "cursor", true},
{"Cursor Helper detected", "Cursor Helper", true},
{"copilot detected", "copilot", true},
{"windsurf detected", "windsurf", true},
{"gemini detected", "gemini", true},
{"aider detected", "aider", true},
{"codex detected", "codex", true},
{"normal process ignored", "python3", false},
{"vim ignored", "vim", false},
{"empty string ignored", "", false},
{"partial match rejected", "claud", false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := IsGenAIProcess(tt.process)
if got != tt.expected {
t.Errorf("IsGenAIProcess(%q) = %v, want %v", tt.process, got, tt.expected)
}
})
}
}
func TestIsCredentialFile(t *testing.T) {
tests := []struct {
name string
path string
expected bool
}{
{"credentials.db", "/home/user/.config/google-chrome/Default/credentials.db", true},
{"Cookies", "/home/user/.config/chromium/Default/Cookies", true},
{"Login Data", "/home/user/.config/google-chrome/Default/Login Data", true},
{"logins.json", "/home/user/.mozilla/firefox/profile/logins.json", true},
{"ssh key", "/home/user/.ssh/id_rsa", true},
{"aws credentials", "/home/user/.aws/credentials", true},
{"env file", "/app/.env", true},
{"normal file ignored", "/home/user/document.txt", false},
{"code file ignored", "/home/user/project/main.go", false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := IsCredentialFile(tt.path)
if got != tt.expected {
t.Errorf("IsCredentialFile(%q) = %v, want %v", tt.path, got, tt.expected)
}
})
}
}
func TestIsLLMEndpoint(t *testing.T) {
tests := []struct {
name string
domain string
expected bool
}{
{"anthropic", "api.anthropic.com", true},
{"openai", "api.openai.com", true},
{"gemini", "gemini.googleapis.com", true},
{"deepseek", "api.deepseek.com", true},
{"normal domain", "google.com", false},
{"github", "api.github.com", false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := IsLLMEndpoint(tt.domain)
if got != tt.expected {
t.Errorf("IsLLMEndpoint(%q) = %v, want %v", tt.domain, got, tt.expected)
}
})
}
}
func TestProcessAncestryHasGenAIAncestor(t *testing.T) {
tests := []struct {
name string
ancestry ProcessAncestry
expected bool
}{
{
"claude parent",
ProcessAncestry{ParentName: "claude", Ancestry: []string{"zsh", "login"}},
true,
},
{
"claude in ancestry chain",
ProcessAncestry{ParentName: "python3", Ancestry: []string{"claude", "zsh", "login"}},
true,
},
{
"no genai ancestor",
ProcessAncestry{ParentName: "bash", Ancestry: []string{"sshd", "login"}},
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := tt.ancestry.HasGenAIAncestor()
if got != tt.expected {
t.Errorf("HasGenAIAncestor() = %v, want %v", got, tt.expected)
}
})
}
}
func TestGenAIAncestorName(t *testing.T) {
p := ProcessAncestry{ParentName: "python3", Ancestry: []string{"cursor", "zsh"}}
if name := p.GenAIAncestorName(); name != "cursor" {
t.Errorf("GenAIAncestorName() = %q, want %q", name, "cursor")
}
}
// === GenAI Rules Tests ===
func TestGenAICorrelationRulesCount(t *testing.T) {
rules := GenAICorrelationRules()
if len(rules) != 6 {
t.Errorf("GenAICorrelationRules() returned %d rules, want 6", len(rules))
}
}
func TestAllSOCCorrelationRulesCount(t *testing.T) {
rules := AllSOCCorrelationRules()
// 15 default + 6 GenAI = 21
if len(rules) != 21 {
t.Errorf("AllSOCCorrelationRules() returned %d rules, want 21", len(rules))
}
}
func TestGenAIChildProcessRule(t *testing.T) {
now := time.Now()
events := []SOCEvent{
{
Source: SourceImmune,
Category: CategoryGenAIChildProcess,
Severity: SeverityInfo,
Timestamp: now.Add(-30 * time.Second),
Metadata: map[string]string{
"parent_process": "claude",
"child_process": "python3",
},
},
}
rules := GenAICorrelationRules()
matches := CorrelateSOCEvents(events, rules[:1]) // R1 only
if len(matches) != 1 {
t.Fatalf("expected 1 match for GenAI child process, got %d", len(matches))
}
if matches[0].Rule.ID != "SOC-CR-016" {
t.Errorf("expected SOC-CR-016, got %s", matches[0].Rule.ID)
}
}
func TestGenAISuspiciousDescendantRule(t *testing.T) {
now := time.Now()
events := []SOCEvent{
{
Source: SourceImmune,
Category: CategoryGenAIChildProcess,
Severity: SeverityInfo,
Timestamp: now.Add(-3 * time.Minute),
},
{
Source: SourceImmune,
Category: "tool_abuse",
Severity: SeverityMedium,
Timestamp: now.Add(-1 * time.Minute),
},
}
rules := GenAICorrelationRules()
matches := CorrelateSOCEvents(events, rules[1:2]) // R2 only
if len(matches) != 1 {
t.Fatalf("expected 1 match for GenAI suspicious descendant, got %d", len(matches))
}
if matches[0].Rule.ID != "SOC-CR-017" {
t.Errorf("expected SOC-CR-017, got %s", matches[0].Rule.ID)
}
}
func TestGenAICredentialAccessRule(t *testing.T) {
now := time.Now()
events := []SOCEvent{
{
Source: SourceImmune,
Category: CategoryGenAIChildProcess,
Severity: SeverityInfo,
Timestamp: now.Add(-1 * time.Minute),
},
{
Source: SourceImmune,
Category: CategoryGenAICredentialAccess,
Severity: SeverityCritical,
Timestamp: now.Add(-30 * time.Second),
Metadata: map[string]string{
"file_path": "/home/user/.config/google-chrome/Default/Login Data",
},
},
}
rules := GenAICorrelationRules()
matches := CorrelateSOCEvents(events, rules[3:4]) // R4 only
if len(matches) != 1 {
t.Fatalf("expected 1 match for GenAI credential access, got %d", len(matches))
}
if matches[0].Rule.Severity != SeverityCritical {
t.Errorf("expected CRITICAL severity, got %s", matches[0].Rule.Severity)
}
}
func TestGenAICredentialAccessAutoKill(t *testing.T) {
match := CorrelationMatch{
Rule: SOCCorrelationRule{ID: "SOC-CR-019"},
}
action := EvaluateGenAIAutoResponse(match)
if action == nil {
t.Fatal("expected auto-response for SOC-CR-019, got nil")
}
if action.Type != "kill_process" {
t.Errorf("expected kill_process, got %s", action.Type)
}
}
func TestGenAIPersistenceRule(t *testing.T) {
now := time.Now()
events := []SOCEvent{
{
Source: SourceImmune,
Category: CategoryGenAIChildProcess,
Severity: SeverityInfo,
Timestamp: now.Add(-8 * time.Minute),
},
{
Source: SourceImmune,
Category: CategoryGenAIPersistence,
Severity: SeverityHigh,
Timestamp: now.Add(-2 * time.Minute),
},
}
rules := GenAICorrelationRules()
matches := CorrelateSOCEvents(events, rules[4:5]) // R5 only
if len(matches) != 1 {
t.Fatalf("expected 1 match for GenAI persistence, got %d", len(matches))
}
if matches[0].Rule.ID != "SOC-CR-020" {
t.Errorf("expected SOC-CR-020, got %s", matches[0].Rule.ID)
}
}
func TestGenAIConfigModificationRule(t *testing.T) {
now := time.Now()
events := []SOCEvent{
{
Source: SourceImmune,
Category: CategoryGenAIConfigModification,
Severity: SeverityMedium,
Timestamp: now.Add(-2 * time.Minute),
},
}
rules := GenAICorrelationRules()
matches := CorrelateSOCEvents(events, rules[5:6]) // R6 only
if len(matches) != 1 {
t.Fatalf("expected 1 match for GenAI config modification, got %d", len(matches))
}
}
func TestGenAINonGenAIProcessIgnored(t *testing.T) {
now := time.Now()
// Normal process events should not trigger GenAI rules
events := []SOCEvent{
{
Source: SourceSentinelCore,
Category: "prompt_injection",
Severity: SeverityHigh,
Timestamp: now.Add(-1 * time.Minute),
},
}
rules := GenAICorrelationRules()
matches := CorrelateSOCEvents(events, rules)
// None of the 6 GenAI rules should fire on a regular prompt_injection event
for _, m := range matches {
if m.Rule.ID >= "SOC-CR-016" && m.Rule.ID <= "SOC-CR-021" {
t.Errorf("GenAI rule %s should not fire on non-GenAI event", m.Rule.ID)
}
}
}
func TestGenAINoAutoResponseForNonCredentialRules(t *testing.T) {
// Rules other than SOC-CR-019 should NOT have auto-response
nonAutoRuleIDs := []string{"SOC-CR-016", "SOC-CR-017", "SOC-CR-018", "SOC-CR-020", "SOC-CR-021"}
for _, ruleID := range nonAutoRuleIDs {
match := CorrelationMatch{
Rule: SOCCorrelationRule{ID: ruleID},
}
action := EvaluateGenAIAutoResponse(match)
if action != nil {
t.Errorf("rule %s should NOT have auto-response, got %+v", ruleID, action)
}
}
}

15
internal/domain/soc/id.go Normal file
View file

@ -0,0 +1,15 @@
package soc
import (
"crypto/rand"
"fmt"
)
// genID generates a collision-safe unique ID with the given prefix.
// Uses crypto/rand for 8 random hex bytes instead of time.UnixNano
// to prevent collisions under high concurrency.
func genID(prefix string) string {
b := make([]byte, 8)
_, _ = rand.Read(b)
return fmt.Sprintf("%s-%x", prefix, b)
}

View file

@ -2,6 +2,7 @@ package soc
import (
"fmt"
"sync/atomic"
"time"
)
@ -15,10 +16,28 @@ const (
StatusFalsePositive IncidentStatus = "FALSE_POSITIVE"
)
// IncidentNote represents an analyst investigation note.
type IncidentNote struct {
ID string `json:"id"`
Author string `json:"author"`
Content string `json:"content"`
CreatedAt time.Time `json:"created_at"`
}
// TimelineEntry represents a single event in the incident timeline.
type TimelineEntry struct {
Timestamp time.Time `json:"timestamp"`
Type string `json:"type"` // event, playbook, status_change, note, assign
Actor string `json:"actor"` // system, analyst name, playbook ID
Description string `json:"description"`
Metadata map[string]any `json:"metadata,omitempty"`
}
// Incident represents a correlated security incident aggregated from multiple SOCEvents.
// Each incident maintains a cryptographic anchor to the Decision Logger hash chain.
type Incident struct {
ID string `json:"id"` // INC-YYYY-NNNN
TenantID string `json:"tenant_id,omitempty"`
Status IncidentStatus `json:"status"`
Severity EventSeverity `json:"severity"` // Max severity of constituent events
Title string `json:"title"`
@ -35,23 +54,37 @@ type Incident struct {
UpdatedAt time.Time `json:"updated_at"`
ResolvedAt *time.Time `json:"resolved_at,omitempty"`
AssignedTo string `json:"assigned_to,omitempty"`
Notes []IncidentNote `json:"notes,omitempty"`
Timeline []TimelineEntry `json:"timeline,omitempty"`
}
// incidentCounter is a simple in-memory counter for generating incident IDs.
var incidentCounter int
// incidentCounter is an atomic counter for concurrent-safe incident ID generation.
var incidentCounter atomic.Int64
// noteCounter for unique note IDs.
var noteCounter atomic.Int64
// NewIncident creates a new incident from a correlation match.
// Thread-safe: uses atomic increment for unique ID generation.
func NewIncident(title string, severity EventSeverity, correlationRule string) Incident {
incidentCounter++
return Incident{
ID: fmt.Sprintf("INC-%d-%04d", time.Now().Year(), incidentCounter),
seq := incidentCounter.Add(1)
now := time.Now()
inc := Incident{
ID: fmt.Sprintf("INC-%d-%04d", now.Year(), seq),
Status: StatusOpen,
Severity: severity,
Title: title,
CorrelationRule: correlationRule,
CreatedAt: time.Now(),
UpdatedAt: time.Now(),
CreatedAt: now,
UpdatedAt: now,
}
inc.Timeline = append(inc.Timeline, TimelineEntry{
Timestamp: now,
Type: "created",
Actor: "system",
Description: fmt.Sprintf("Incident created by rule: %s", correlationRule),
})
return inc
}
// AddEvent adds an event ID to the incident and updates severity if needed.
@ -62,6 +95,12 @@ func (inc *Incident) AddEvent(eventID string, severity EventSeverity) {
inc.Severity = severity
}
inc.UpdatedAt = time.Now()
inc.Timeline = append(inc.Timeline, TimelineEntry{
Timestamp: inc.UpdatedAt,
Type: "event",
Actor: "system",
Description: fmt.Sprintf("Event %s correlated (severity: %s)", eventID, severity),
})
}
// SetAnchor sets the Decision Logger chain anchor for forensics (§5.6).
@ -72,11 +111,72 @@ func (inc *Incident) SetAnchor(hash string, chainLength int) {
}
// Resolve marks the incident as resolved.
func (inc *Incident) Resolve(status IncidentStatus) {
func (inc *Incident) Resolve(status IncidentStatus, actor string) {
now := time.Now()
oldStatus := inc.Status
inc.Status = status
inc.ResolvedAt = &now
inc.UpdatedAt = now
inc.Timeline = append(inc.Timeline, TimelineEntry{
Timestamp: now,
Type: "status_change",
Actor: actor,
Description: fmt.Sprintf("Status changed: %s → %s", oldStatus, status),
})
}
// Assign assigns an analyst to the incident.
func (inc *Incident) Assign(analyst string) {
prev := inc.AssignedTo
inc.AssignedTo = analyst
inc.UpdatedAt = time.Now()
desc := fmt.Sprintf("Assigned to %s", analyst)
if prev != "" {
desc = fmt.Sprintf("Reassigned: %s → %s", prev, analyst)
}
inc.Timeline = append(inc.Timeline, TimelineEntry{
Timestamp: inc.UpdatedAt,
Type: "assign",
Actor: analyst,
Description: desc,
})
}
// ChangeStatus updates incident status without resolving.
func (inc *Incident) ChangeStatus(status IncidentStatus, actor string) {
old := inc.Status
inc.Status = status
inc.UpdatedAt = time.Now()
if status == StatusResolved || status == StatusFalsePositive {
now := time.Now()
inc.ResolvedAt = &now
}
inc.Timeline = append(inc.Timeline, TimelineEntry{
Timestamp: inc.UpdatedAt,
Type: "status_change",
Actor: actor,
Description: fmt.Sprintf("Status: %s → %s", old, status),
})
}
// AddNote adds an investigation note from an analyst.
func (inc *Incident) AddNote(author, content string) IncidentNote {
seq := noteCounter.Add(1)
note := IncidentNote{
ID: fmt.Sprintf("note-%d", seq),
Author: author,
Content: content,
CreatedAt: time.Now(),
}
inc.Notes = append(inc.Notes, note)
inc.UpdatedAt = note.CreatedAt
inc.Timeline = append(inc.Timeline, TimelineEntry{
Timestamp: note.CreatedAt,
Type: "note",
Actor: author,
Description: content,
})
return note
}
// IsOpen returns true if the incident is not resolved.
@ -98,3 +198,4 @@ func (inc *Incident) MTTR() time.Duration {
}
return inc.ResolvedAt.Sub(inc.CreatedAt)
}

View file

@ -0,0 +1,179 @@
package soc
import (
"sort"
"time"
)
// KillChainPhases defines the standard Cyber Kill Chain phases (Lockheed Martin + MITRE ATT&CK).
var KillChainPhases = []string{
"Reconnaissance",
"Weaponization",
"Delivery",
"Exploitation",
"Installation",
"Command & Control",
"Actions on Objectives",
// AI-specific additions:
"Defense Evasion",
"Persistence",
"Exfiltration",
"Impact",
}
// KillChainStep represents one step in a reconstructed attack chain.
type KillChainStep struct {
Phase string `json:"phase"`
EventIDs []string `json:"event_ids"`
Severity string `json:"severity"`
Categories []string `json:"categories"`
FirstSeen time.Time `json:"first_seen"`
LastSeen time.Time `json:"last_seen"`
RuleID string `json:"rule_id,omitempty"`
}
// KillChain represents a reconstructed attack chain from correlated incidents.
type KillChain struct {
ID string `json:"id"`
IncidentID string `json:"incident_id"`
Steps []KillChainStep `json:"steps"`
Coverage float64 `json:"coverage"` // 0.0-1.0: fraction of Kill Chain phases observed
MaxPhase string `json:"max_phase"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
Duration string `json:"duration"`
}
// ReconstructKillChain builds an attack chain from an incident and its events.
func ReconstructKillChain(incident Incident, events []SOCEvent, rules []SOCCorrelationRule) *KillChain {
if len(events) == 0 {
return nil
}
// Map rule ID → kill chain phase
rulePhases := make(map[string]string)
for _, r := range rules {
rulePhases[r.ID] = r.KillChainPhase
}
// Group events by kill chain phase
phaseEvents := make(map[string][]SOCEvent)
for _, e := range events {
phase := categorizePhase(e.Category, rulePhases, incident.CorrelationRule)
if phase != "" {
phaseEvents[phase] = append(phaseEvents[phase], e)
}
}
// Build steps
var steps []KillChainStep
for _, phase := range KillChainPhases {
evts, ok := phaseEvents[phase]
if !ok {
continue
}
cats := uniqueCategories(evts)
ids := make([]string, len(evts))
var firstSeen, lastSeen time.Time
maxSev := SeverityInfo
for i, e := range evts {
ids[i] = e.ID
if firstSeen.IsZero() || e.Timestamp.Before(firstSeen) {
firstSeen = e.Timestamp
}
if e.Timestamp.After(lastSeen) {
lastSeen = e.Timestamp
}
if e.Severity.Rank() > maxSev.Rank() {
maxSev = e.Severity
}
}
steps = append(steps, KillChainStep{
Phase: phase,
EventIDs: ids,
Severity: string(maxSev),
Categories: cats,
FirstSeen: firstSeen,
LastSeen: lastSeen,
RuleID: incident.CorrelationRule,
})
}
if len(steps) == 0 {
return nil
}
// Sort by first seen
sort.Slice(steps, func(i, j int) bool {
return steps[i].FirstSeen.Before(steps[j].FirstSeen)
})
coverage := float64(len(steps)) / float64(len(KillChainPhases))
startTime := steps[0].FirstSeen
endTime := steps[len(steps)-1].LastSeen
duration := endTime.Sub(startTime)
return &KillChain{
ID: "KC-" + incident.ID,
IncidentID: incident.ID,
Steps: steps,
Coverage: coverage,
MaxPhase: steps[len(steps)-1].Phase,
StartTime: startTime,
EndTime: endTime,
Duration: duration.String(),
}
}
// categorizePhase maps event category → Kill Chain phase.
func categorizePhase(category string, rulePhases map[string]string, ruleID string) string {
// First check if the triggering rule has a phase
if phase, ok := rulePhases[ruleID]; ok && phase != "" {
// Use rule phase for events matching the rule's categories
}
// Category → phase mapping
switch category {
case "reconnaissance", "scanning", "enumeration":
return "Reconnaissance"
case "weaponization", "payload_crafting":
return "Weaponization"
case "delivery", "phishing", "social_engineering":
return "Delivery"
case "jailbreak", "prompt_injection", "injection", "exploitation":
return "Exploitation"
case "persistence", "backdoor":
return "Persistence"
case "command_control", "c2", "beacon":
return "Command & Control"
case "tool_abuse", "unauthorized_tool_use":
return "Actions on Objectives"
case "defense_evasion", "evasion", "obfuscation", "encoding":
return "Defense Evasion"
case "exfiltration", "data_leak", "data_theft":
return "Exfiltration"
case "auth_bypass", "brute_force", "credential_theft":
return "Exploitation"
case "sensor_anomaly", "sensor_manipulation":
return "Defense Evasion"
case "data_poisoning", "model_manipulation":
return "Impact"
default:
return "Actions on Objectives"
}
}
func uniqueCategories(events []SOCEvent) []string {
seen := make(map[string]bool)
var result []string
for _, e := range events {
if !seen[e.Category] {
seen[e.Category] = true
result = append(result, e.Category)
}
}
return result
}

View file

@ -0,0 +1,206 @@
package soc
import (
"encoding/json"
"fmt"
"sync"
"time"
)
// P2PSyncService implements §14 — SOC-to-SOC event synchronization over P2P mesh.
// Enables multi-site SOC deployments to share events, incidents, and IOCs.
type P2PSyncService struct {
mu sync.RWMutex
peers map[string]*SOCPeer
outbox []SyncMessage
inbox []SyncMessage
maxBuf int
enabled bool
}
// SOCPeer represents a connected SOC peer node.
type SOCPeer struct {
ID string `json:"id"`
Name string `json:"name"`
Endpoint string `json:"endpoint"`
Status string `json:"status"` // connected, disconnected, syncing
LastSync time.Time `json:"last_sync"`
EventsSent int `json:"events_sent"`
EventsRecv int `json:"events_recv"`
TrustLevel string `json:"trust_level"` // full, partial, readonly
}
// SyncMessage is a SOC data unit exchanged between peers.
type SyncMessage struct {
ID string `json:"id"`
Type SyncMessageType `json:"type"`
PeerID string `json:"peer_id"`
Payload json.RawMessage `json:"payload"`
Timestamp time.Time `json:"timestamp"`
}
// SyncMessageType categorizes P2P messages.
type SyncMessageType string
const (
SyncEvent SyncMessageType = "EVENT"
SyncIncident SyncMessageType = "INCIDENT"
SyncIOC SyncMessageType = "IOC"
SyncRule SyncMessageType = "RULE"
SyncHeartbeat SyncMessageType = "HEARTBEAT"
)
// NewP2PSyncService creates the inter-SOC sync engine.
func NewP2PSyncService() *P2PSyncService {
return &P2PSyncService{
peers: make(map[string]*SOCPeer),
maxBuf: 1000,
}
}
// Enable activates P2P sync.
func (p *P2PSyncService) Enable() {
p.mu.Lock()
defer p.mu.Unlock()
p.enabled = true
}
// Disable deactivates P2P sync.
func (p *P2PSyncService) Disable() {
p.mu.Lock()
defer p.mu.Unlock()
p.enabled = false
}
// IsEnabled returns whether P2P sync is active.
func (p *P2PSyncService) IsEnabled() bool {
p.mu.RLock()
defer p.mu.RUnlock()
return p.enabled
}
// AddPeer registers a SOC peer for synchronization.
func (p *P2PSyncService) AddPeer(id, name, endpoint, trustLevel string) {
p.mu.Lock()
defer p.mu.Unlock()
p.peers[id] = &SOCPeer{
ID: id,
Name: name,
Endpoint: endpoint,
Status: "disconnected",
TrustLevel: trustLevel,
}
}
// RemovePeer deregisters a SOC peer.
func (p *P2PSyncService) RemovePeer(id string) {
p.mu.Lock()
defer p.mu.Unlock()
delete(p.peers, id)
}
// ListPeers returns all known SOC peers.
func (p *P2PSyncService) ListPeers() []SOCPeer {
p.mu.RLock()
defer p.mu.RUnlock()
result := make([]SOCPeer, 0, len(p.peers))
for _, peer := range p.peers {
result = append(result, *peer)
}
return result
}
// EnqueueOutbound adds a message to the outbound sync queue.
func (p *P2PSyncService) EnqueueOutbound(msgType SyncMessageType, payload any) error {
p.mu.Lock()
defer p.mu.Unlock()
if !p.enabled {
return nil
}
data, err := json.Marshal(payload)
if err != nil {
return fmt.Errorf("p2p: marshal failed: %w", err)
}
msg := SyncMessage{
ID: fmt.Sprintf("sync-%d", time.Now().UnixNano()),
Type: msgType,
Payload: data,
Timestamp: time.Now(),
}
if len(p.outbox) >= p.maxBuf {
p.outbox = p.outbox[1:] // drop oldest
}
p.outbox = append(p.outbox, msg)
return nil
}
// ReceiveInbound processes an incoming sync message from a peer.
func (p *P2PSyncService) ReceiveInbound(peerID string, msg SyncMessage) error {
p.mu.Lock()
defer p.mu.Unlock()
if !p.enabled {
return fmt.Errorf("p2p sync disabled")
}
peer, ok := p.peers[peerID]
if !ok {
return fmt.Errorf("unknown peer: %s", peerID)
}
if peer.TrustLevel == "readonly" && msg.Type != SyncHeartbeat {
return fmt.Errorf("peer %s is readonly, cannot receive %s", peerID, msg.Type)
}
msg.PeerID = peerID
peer.EventsRecv++
peer.LastSync = time.Now()
peer.Status = "connected"
if len(p.inbox) >= p.maxBuf {
p.inbox = p.inbox[1:]
}
p.inbox = append(p.inbox, msg)
return nil
}
// DrainOutbox returns and clears pending outbound messages.
func (p *P2PSyncService) DrainOutbox() []SyncMessage {
p.mu.Lock()
defer p.mu.Unlock()
result := make([]SyncMessage, len(p.outbox))
copy(result, p.outbox)
p.outbox = p.outbox[:0]
return result
}
// Stats returns P2P sync statistics.
func (p *P2PSyncService) Stats() map[string]any {
p.mu.RLock()
defer p.mu.RUnlock()
totalSent := 0
totalRecv := 0
connected := 0
for _, peer := range p.peers {
totalSent += peer.EventsSent
totalRecv += peer.EventsRecv
if peer.Status == "connected" {
connected++
}
}
return map[string]any{
"enabled": p.enabled,
"total_peers": len(p.peers),
"connected_peers": connected,
"outbox_depth": len(p.outbox),
"inbox_depth": len(p.inbox),
"total_sent": totalSent,
"total_received": totalRecv,
}
}

View file

@ -0,0 +1,124 @@
package soc
import (
"testing"
)
func TestP2PSync_Disabled(t *testing.T) {
p := NewP2PSyncService()
err := p.EnqueueOutbound(SyncEvent, map[string]string{"id": "evt-1"})
if err != nil {
t.Fatalf("disabled enqueue should return nil, got %v", err)
}
msgs := p.DrainOutbox()
if len(msgs) != 0 {
t.Fatal("disabled should produce no outbox messages")
}
}
func TestP2PSync_AddAndListPeers(t *testing.T) {
p := NewP2PSyncService()
p.AddPeer("soc-2", "Site-B", "http://soc-b:9100", "full")
p.AddPeer("soc-3", "Site-C", "http://soc-c:9100", "readonly")
peers := p.ListPeers()
if len(peers) != 2 {
t.Fatalf("expected 2 peers, got %d", len(peers))
}
p.RemovePeer("soc-3")
peers = p.ListPeers()
if len(peers) != 1 {
t.Fatalf("expected 1 peer after remove, got %d", len(peers))
}
}
func TestP2PSync_EnqueueAndDrain(t *testing.T) {
p := NewP2PSyncService()
p.Enable()
p.EnqueueOutbound(SyncEvent, map[string]string{"event_id": "evt-1"})
p.EnqueueOutbound(SyncIncident, map[string]string{"incident_id": "inc-1"})
p.EnqueueOutbound(SyncIOC, map[string]string{"ioc": "1.2.3.4"})
msgs := p.DrainOutbox()
if len(msgs) != 3 {
t.Fatalf("expected 3 outbox messages, got %d", len(msgs))
}
// After drain, outbox should be empty
msgs2 := p.DrainOutbox()
if len(msgs2) != 0 {
t.Fatalf("outbox should be empty after drain, got %d", len(msgs2))
}
}
func TestP2PSync_ReceiveInbound(t *testing.T) {
p := NewP2PSyncService()
p.Enable()
p.AddPeer("soc-2", "Site-B", "http://soc-b:9100", "full")
msg := SyncMessage{
ID: "sync-1",
Type: SyncEvent,
}
err := p.ReceiveInbound("soc-2", msg)
if err != nil {
t.Fatalf("receive should succeed: %v", err)
}
peers := p.ListPeers()
for _, peer := range peers {
if peer.ID == "soc-2" {
if peer.EventsRecv != 1 {
t.Fatalf("expected 1 received, got %d", peer.EventsRecv)
}
if peer.Status != "connected" {
t.Fatalf("expected connected, got %s", peer.Status)
}
}
}
}
func TestP2PSync_ReadonlyPeer(t *testing.T) {
p := NewP2PSyncService()
p.Enable()
p.AddPeer("soc-ro", "ReadOnly-SOC", "http://ro:9100", "readonly")
// Heartbeat should be allowed
err := p.ReceiveInbound("soc-ro", SyncMessage{Type: SyncHeartbeat})
if err != nil {
t.Fatalf("heartbeat should be allowed from readonly: %v", err)
}
// Event should be denied
err = p.ReceiveInbound("soc-ro", SyncMessage{Type: SyncEvent})
if err == nil {
t.Fatal("event from readonly peer should be denied")
}
}
func TestP2PSync_UnknownPeer(t *testing.T) {
p := NewP2PSyncService()
p.Enable()
err := p.ReceiveInbound("unknown", SyncMessage{Type: SyncEvent})
if err == nil {
t.Fatal("should reject unknown peer")
}
}
func TestP2PSync_Stats(t *testing.T) {
p := NewP2PSyncService()
p.Enable()
p.AddPeer("soc-2", "B", "http://b:9100", "full")
stats := p.Stats()
if stats["enabled"] != true {
t.Fatal("should be enabled")
}
if stats["total_peers"].(int) != 1 {
t.Fatal("should have 1 peer")
}
}

View file

@ -1,115 +1,277 @@
package soc
// PlaybookAction defines automated responses triggered by playbook rules.
type PlaybookAction string
const (
ActionAutoBlock PlaybookAction = "auto_block" // Block source via shield
ActionAutoReview PlaybookAction = "auto_review" // Flag for human review
ActionNotify PlaybookAction = "notify" // Send notification
ActionIsolate PlaybookAction = "isolate" // Isolate affected session
ActionEscalate PlaybookAction = "escalate" // Escalate to senior analyst
import (
"fmt"
"log/slog"
"sync"
"time"
)
// PlaybookCondition defines when a playbook fires.
type PlaybookCondition struct {
MinSeverity EventSeverity `json:"min_severity" yaml:"min_severity"` // Minimum severity to trigger
Categories []string `json:"categories" yaml:"categories"` // Matching categories
Sources []EventSource `json:"sources,omitempty" yaml:"sources"` // Restrict to specific sources
MinEvents int `json:"min_events" yaml:"min_events"` // Minimum events before trigger
// PlaybookEngine implements §10 — automated incident response.
// Executes predefined response actions when incidents match playbook triggers.
type PlaybookEngine struct {
mu sync.RWMutex
playbooks map[string]*Playbook
execLog []PlaybookExecution
maxLog int
handler ActionHandler
}
// Playbook is a YAML-defined automated response rule (§10).
// ActionHandler executes playbook actions. Implement for real integrations.
type ActionHandler interface {
Handle(action PlaybookAction, incidentID string) error
}
// LogHandler is the default action handler — logs what would be executed.
type LogHandler struct{}
func (h LogHandler) Handle(action PlaybookAction, incidentID string) error {
slog.Info("playbook action", "action", action.Type, "incident", incidentID, "params", action.Params)
return nil
}
// Playbook defines an automated response procedure.
type Playbook struct {
ID string `json:"id" yaml:"id"`
Name string `json:"name" yaml:"name"`
Description string `json:"description" yaml:"description"`
Enabled bool `json:"enabled" yaml:"enabled"`
Condition PlaybookCondition `json:"condition" yaml:"condition"`
Actions []PlaybookAction `json:"actions" yaml:"actions"`
Priority int `json:"priority" yaml:"priority"` // Higher = runs first
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
Trigger PlaybookTrigger `json:"trigger"`
Actions []PlaybookAction `json:"actions"`
Enabled bool `json:"enabled"`
Priority int `json:"priority"`
CreatedAt time.Time `json:"created_at"`
}
// Matches checks if a SOC event matches this playbook's conditions.
func (p *Playbook) Matches(event SOCEvent) bool {
if !p.Enabled {
return false
}
// PlaybookTrigger defines when a playbook activates.
type PlaybookTrigger struct {
Severity string `json:"severity,omitempty"`
Categories []string `json:"categories,omitempty"`
Keywords []string `json:"keywords,omitempty"`
KillChainPhase string `json:"kill_chain_phase,omitempty"`
}
// Check severity threshold.
if event.Severity.Rank() < p.Condition.MinSeverity.Rank() {
return false
}
// PlaybookAction is a single response step.
type PlaybookAction struct {
Type string `json:"type"`
Params map[string]string `json:"params"`
Order int `json:"order"`
}
// Check category if specified.
if len(p.Condition.Categories) > 0 {
matched := false
for _, cat := range p.Condition.Categories {
if cat == event.Category {
matched = true
// PlaybookExecution records a playbook run.
type PlaybookExecution struct {
ID string `json:"id"`
PlaybookID string `json:"playbook_id"`
IncidentID string `json:"incident_id"`
Status string `json:"status"`
ActionsRun int `json:"actions_run"`
Duration string `json:"duration"`
Error string `json:"error,omitempty"`
Timestamp time.Time `json:"timestamp"`
}
// NewPlaybookEngine creates the automated response engine with built-in playbooks.
func NewPlaybookEngine() *PlaybookEngine {
pe := &PlaybookEngine{
playbooks: make(map[string]*Playbook),
maxLog: 200,
handler: LogHandler{},
}
pe.loadDefaults()
return pe
}
// SetHandler replaces the action handler (for real integrations: webhook, SOAR, etc.).
func (pe *PlaybookEngine) SetHandler(h ActionHandler) {
pe.mu.Lock()
defer pe.mu.Unlock()
pe.handler = h
}
func (pe *PlaybookEngine) loadDefaults() {
defaults := []Playbook{
{
ID: "pb-block-jailbreak", Name: "Auto-Block Jailbreak Source",
Description: "Blocks source IP on confirmed jailbreak attempts",
Trigger: PlaybookTrigger{Severity: "CRITICAL", Categories: []string{"jailbreak"}},
Actions: []PlaybookAction{
{Type: "log", Params: map[string]string{"message": "Jailbreak detected"}, Order: 1},
{Type: "block_ip", Params: map[string]string{"duration": "3600"}, Order: 2},
{Type: "notify", Params: map[string]string{"channel": "soc-alerts"}, Order: 3},
},
Enabled: true, Priority: 1,
},
{
ID: "pb-quarantine-exfil", Name: "Quarantine Data Exfiltration",
Description: "Isolates sessions on data exfiltration detection",
Trigger: PlaybookTrigger{Severity: "HIGH", Categories: []string{"exfiltration"}},
Actions: []PlaybookAction{
{Type: "quarantine", Params: map[string]string{"scope": "session"}, Order: 1},
{Type: "escalate", Params: map[string]string{"team": "ir-team"}, Order: 2},
},
Enabled: true, Priority: 2,
},
{
ID: "pb-notify-injection", Name: "Alert on Prompt Injection",
Description: "Sends notification on prompt injection detection",
Trigger: PlaybookTrigger{Severity: "MEDIUM", Categories: []string{"injection"}},
Actions: []PlaybookAction{
{Type: "log", Params: map[string]string{"message": "Prompt injection detected"}, Order: 1},
{Type: "notify", Params: map[string]string{"channel": "soc-alerts"}, Order: 2},
},
Enabled: true, Priority: 3,
},
{
ID: "pb-c2-killchain", Name: "Kill Chain C2 Response",
Description: "Immediate response to C2 communication detection",
Trigger: PlaybookTrigger{KillChainPhase: "command_control"},
Actions: []PlaybookAction{
{Type: "block_ip", Params: map[string]string{"duration": "86400"}, Order: 1},
{Type: "quarantine", Params: map[string]string{"scope": "host"}, Order: 2},
{Type: "webhook", Params: map[string]string{"event": "kill_chain_alert"}, Order: 3},
{Type: "escalate", Params: map[string]string{"team": "threat-hunters"}, Order: 4},
},
Enabled: true, Priority: 1,
},
}
for i := range defaults {
defaults[i].CreatedAt = time.Now()
pe.playbooks[defaults[i].ID] = &defaults[i]
}
}
// AddPlaybook registers a custom playbook.
func (pe *PlaybookEngine) AddPlaybook(pb Playbook) {
pe.mu.Lock()
defer pe.mu.Unlock()
if pb.ID == "" {
pb.ID = fmt.Sprintf("pb-%d", time.Now().UnixNano())
}
pb.CreatedAt = time.Now()
pe.playbooks[pb.ID] = &pb
}
// RemovePlaybook deactivates a playbook.
func (pe *PlaybookEngine) RemovePlaybook(id string) {
pe.mu.Lock()
defer pe.mu.Unlock()
if pb, ok := pe.playbooks[id]; ok {
pb.Enabled = false
}
}
// Execute runs matching playbooks for an incident.
func (pe *PlaybookEngine) Execute(incidentID, severity, category, killChainPhase string) []PlaybookExecution {
pe.mu.Lock()
defer pe.mu.Unlock()
var results []PlaybookExecution
for _, pb := range pe.playbooks {
if !pb.Enabled || !pe.matches(pb, severity, category, killChainPhase) {
continue
}
start := time.Now()
exec := PlaybookExecution{
ID: genID("exec"),
PlaybookID: pb.ID,
IncidentID: incidentID,
Status: "success",
ActionsRun: len(pb.Actions),
Timestamp: start,
}
for _, action := range pb.Actions {
if err := pe.handler.Handle(action, incidentID); err != nil {
exec.Status = "partial_failure"
exec.Error = err.Error()
break
}
}
if !matched {
return false
exec.Duration = time.Since(start).String()
if len(pe.execLog) >= pe.maxLog {
copy(pe.execLog, pe.execLog[1:])
pe.execLog[len(pe.execLog)-1] = exec
} else {
pe.execLog = append(pe.execLog, exec)
}
results = append(results, exec)
}
return results
}
// Check source restriction if specified.
if len(p.Condition.Sources) > 0 {
matched := false
for _, src := range p.Condition.Sources {
if src == event.Source {
matched = true
func (pe *PlaybookEngine) matches(pb *Playbook, severity, category, killChainPhase string) bool {
t := pb.Trigger
if t.Severity != "" && severityRank(severity) < severityRank(t.Severity) {
return false
}
if len(t.Categories) > 0 {
found := false
for _, c := range t.Categories {
if c == category {
found = true
break
}
}
if !matched {
if !found {
return false
}
}
if t.KillChainPhase != "" && t.KillChainPhase != killChainPhase {
return false
}
return true
}
// DefaultPlaybooks returns the built-in playbook set (§10 from spec).
func DefaultPlaybooks() []Playbook {
return []Playbook{
{
ID: "pb-auto-block-jailbreak",
Name: "Auto-Block Jailbreak",
Description: "Automatically block confirmed jailbreak attempts",
Enabled: true,
Condition: PlaybookCondition{
MinSeverity: SeverityHigh,
Categories: []string{"jailbreak", "prompt_injection"},
},
Actions: []PlaybookAction{ActionAutoBlock, ActionNotify},
Priority: 100,
},
{
ID: "pb-escalate-exfiltration",
Name: "Escalate Exfiltration",
Description: "Escalate data exfiltration attempts to senior analyst",
Enabled: true,
Condition: PlaybookCondition{
MinSeverity: SeverityCritical,
Categories: []string{"exfiltration", "data_leak"},
},
Actions: []PlaybookAction{ActionIsolate, ActionEscalate, ActionNotify},
Priority: 200,
},
{
ID: "pb-review-tool-abuse",
Name: "Review Tool Abuse",
Description: "Flag tool abuse attempts for human review",
Enabled: true,
Condition: PlaybookCondition{
MinSeverity: SeverityMedium,
Categories: []string{"tool_abuse", "unauthorized_tool_use"},
},
Actions: []PlaybookAction{ActionAutoReview},
Priority: 50,
},
func severityRank(s string) int {
switch s {
case "CRITICAL":
return 4
case "HIGH":
return 3
case "MEDIUM":
return 2
case "LOW":
return 1
default:
return 0
}
}
// ListPlaybooks returns all playbooks.
func (pe *PlaybookEngine) ListPlaybooks() []Playbook {
pe.mu.RLock()
defer pe.mu.RUnlock()
result := make([]Playbook, 0, len(pe.playbooks))
for _, pb := range pe.playbooks {
result = append(result, *pb)
}
return result
}
// ExecutionLog returns recent playbook executions.
func (pe *PlaybookEngine) ExecutionLog(limit int) []PlaybookExecution {
pe.mu.RLock()
defer pe.mu.RUnlock()
if limit <= 0 || limit > len(pe.execLog) {
limit = len(pe.execLog)
}
start := len(pe.execLog) - limit
result := make([]PlaybookExecution, limit)
copy(result, pe.execLog[start:])
return result
}
// PlaybookStats returns engine statistics.
func (pe *PlaybookEngine) PlaybookStats() map[string]any {
pe.mu.RLock()
defer pe.mu.RUnlock()
enabled := 0
for _, pb := range pe.playbooks {
if pb.Enabled {
enabled++
}
}
return map[string]any{
"total_playbooks": len(pe.playbooks),
"enabled": enabled,
"total_executions": len(pe.execLog),
}
}

View file

@ -0,0 +1,129 @@
package soc
import (
"testing"
)
func TestPlaybookEngine_DefaultPlaybooks(t *testing.T) {
pe := NewPlaybookEngine()
pbs := pe.ListPlaybooks()
if len(pbs) != 4 {
t.Fatalf("expected 4 default playbooks, got %d", len(pbs))
}
}
func TestPlaybookEngine_ExecuteJailbreak(t *testing.T) {
pe := NewPlaybookEngine()
execs := pe.Execute("inc-001", "CRITICAL", "jailbreak", "")
if len(execs) == 0 {
t.Fatal("should match jailbreak playbook")
}
found := false
for _, e := range execs {
if e.PlaybookID == "pb-block-jailbreak" {
found = true
if e.Status != "success" {
t.Fatal("execution should be success")
}
if e.ActionsRun != 3 {
t.Fatalf("jailbreak playbook has 3 actions, got %d", e.ActionsRun)
}
}
}
if !found {
t.Fatal("pb-block-jailbreak should have matched")
}
}
func TestPlaybookEngine_NoMatchLowSeverity(t *testing.T) {
pe := NewPlaybookEngine()
// LOW severity jailbreak should not match CRITICAL-threshold playbook
execs := pe.Execute("inc-002", "LOW", "jailbreak", "")
for _, e := range execs {
if e.PlaybookID == "pb-block-jailbreak" {
t.Fatal("LOW severity should not match CRITICAL trigger")
}
}
}
func TestPlaybookEngine_KillChainMatch(t *testing.T) {
pe := NewPlaybookEngine()
execs := pe.Execute("inc-003", "CRITICAL", "c2", "command_control")
found := false
for _, e := range execs {
if e.PlaybookID == "pb-c2-killchain" {
found = true
if e.ActionsRun != 4 {
t.Fatalf("C2 playbook has 4 actions, got %d", e.ActionsRun)
}
}
}
if !found {
t.Fatal("kill chain playbook should match command_control phase")
}
}
func TestPlaybookEngine_DisabledPlaybook(t *testing.T) {
pe := NewPlaybookEngine()
pe.RemovePlaybook("pb-block-jailbreak")
execs := pe.Execute("inc-004", "CRITICAL", "jailbreak", "")
for _, e := range execs {
if e.PlaybookID == "pb-block-jailbreak" {
t.Fatal("disabled playbook should not execute")
}
}
}
func TestPlaybookEngine_AddCustom(t *testing.T) {
pe := NewPlaybookEngine()
pe.AddPlaybook(Playbook{
ID: "pb-custom",
Name: "Custom",
Trigger: PlaybookTrigger{
Categories: []string{"custom-cat"},
},
Actions: []PlaybookAction{
{Type: "log", Params: map[string]string{"msg": "custom"}, Order: 1},
},
Enabled: true,
})
pbs := pe.ListPlaybooks()
if len(pbs) != 5 {
t.Fatalf("expected 5 playbooks, got %d", len(pbs))
}
execs := pe.Execute("inc-005", "HIGH", "custom-cat", "")
found := false
for _, e := range execs {
if e.PlaybookID == "pb-custom" {
found = true
}
}
if !found {
t.Fatal("custom playbook should match")
}
}
func TestPlaybookEngine_ExecutionLog(t *testing.T) {
pe := NewPlaybookEngine()
pe.Execute("inc-001", "CRITICAL", "jailbreak", "")
pe.Execute("inc-002", "HIGH", "exfiltration", "")
log := pe.ExecutionLog(10)
if len(log) < 2 {
t.Fatalf("expected at least 2 executions, got %d", len(log))
}
}
func TestPlaybookEngine_Stats(t *testing.T) {
pe := NewPlaybookEngine()
stats := pe.PlaybookStats()
if stats["total_playbooks"].(int) != 4 {
t.Fatal("should have 4 playbooks")
}
if stats["enabled"].(int) != 4 {
t.Fatal("all 4 should be enabled")
}
}

View file

@ -0,0 +1,36 @@
package soc
import "time"
// SOCRepository defines the persistence contract for the SOC subsystem.
// Implementations: sqlite.SOCRepo (default), postgres.SOCRepo (production).
//
// All methods that list or count data accept a tenantID parameter for multi-tenant
// isolation. Pass "" (empty) for backward compatibility (returns all tenants).
type SOCRepository interface {
// ── Events ──────────────────────────────────────────────
InsertEvent(e SOCEvent) error
GetEvent(id string) (*SOCEvent, error)
ListEvents(tenantID string, limit int) ([]SOCEvent, error)
ListEventsByCategory(tenantID string, category string, limit int) ([]SOCEvent, error)
EventExistsByHash(contentHash string) (bool, error) // §5.2 dedup
CountEvents(tenantID string) (int, error)
CountEventsSince(tenantID string, since time.Time) (int, error)
// ── Incidents ───────────────────────────────────────────
InsertIncident(inc Incident) error
GetIncident(id string) (*Incident, error)
ListIncidents(tenantID string, status string, limit int) ([]Incident, error)
UpdateIncidentStatus(id string, status IncidentStatus) error
UpdateIncident(inc *Incident) error
CountOpenIncidents(tenantID string) (int, error)
// ── Sensors ─────────────────────────────────────────────
UpsertSensor(s Sensor) error
ListSensors(tenantID string) ([]Sensor, error)
CountSensorsByStatus(tenantID string) (map[SensorStatus]int, error)
// ── Retention ───────────────────────────────────────────
PurgeExpiredEvents(retentionDays int) (int64, error)
PurgeExpiredIncidents(retentionDays int) (int64, error)
}

View file

@ -0,0 +1,138 @@
package soc
import (
"sync"
"time"
)
// DataRetentionPolicy implements §19 — configurable data lifecycle management.
// Enforces retention windows and auto-archives/purges old events.
type DataRetentionPolicy struct {
mu sync.RWMutex
policies map[string]RetentionRule
}
// RetentionRule defines how long data of a given type is kept.
type RetentionRule struct {
DataType string `json:"data_type"` // events, incidents, audit, anomaly_alerts
RetainDays int `json:"retain_days"` // Max age in days
Action string `json:"action"` // archive, delete, compress
Enabled bool `json:"enabled"`
LastRun time.Time `json:"last_run"`
ItemsPurged int `json:"items_purged"`
}
// NewDataRetentionPolicy creates default retention rules.
func NewDataRetentionPolicy() *DataRetentionPolicy {
return &DataRetentionPolicy{
policies: map[string]RetentionRule{
"events": {
DataType: "events",
RetainDays: 90,
Action: "archive",
Enabled: true,
},
"incidents": {
DataType: "incidents",
RetainDays: 365,
Action: "archive",
Enabled: true,
},
"audit": {
DataType: "audit",
RetainDays: 730, // 2 years for compliance
Action: "compress",
Enabled: true,
},
"anomaly_alerts": {
DataType: "anomaly_alerts",
RetainDays: 30,
Action: "delete",
Enabled: true,
},
"playbook_log": {
DataType: "playbook_log",
RetainDays: 180,
Action: "archive",
Enabled: true,
},
},
}
}
// SetPolicy updates a retention rule.
func (d *DataRetentionPolicy) SetPolicy(dataType string, retainDays int, action string) {
d.mu.Lock()
defer d.mu.Unlock()
d.policies[dataType] = RetentionRule{
DataType: dataType,
RetainDays: retainDays,
Action: action,
Enabled: true,
}
}
// GetPolicy returns the retention rule for a data type.
func (d *DataRetentionPolicy) GetPolicy(dataType string) (RetentionRule, bool) {
d.mu.RLock()
defer d.mu.RUnlock()
r, ok := d.policies[dataType]
return r, ok
}
// ListPolicies returns all retention policies.
func (d *DataRetentionPolicy) ListPolicies() []RetentionRule {
d.mu.RLock()
defer d.mu.RUnlock()
result := make([]RetentionRule, 0, len(d.policies))
for _, r := range d.policies {
result = append(result, r)
}
return result
}
// IsExpired checks if a timestamp has exceeded the retention window.
func (d *DataRetentionPolicy) IsExpired(dataType string, timestamp time.Time) bool {
d.mu.RLock()
defer d.mu.RUnlock()
r, ok := d.policies[dataType]
if !ok || !r.Enabled {
return false
}
cutoff := time.Now().AddDate(0, 0, -r.RetainDays)
return timestamp.Before(cutoff)
}
// Enforce runs retention checks and returns items to purge.
// In production, this would interact with the database.
func (d *DataRetentionPolicy) Enforce(dataType string, timestamps []time.Time) (expired int) {
d.mu.Lock()
defer d.mu.Unlock()
r, ok := d.policies[dataType]
if !ok || !r.Enabled {
return 0
}
cutoff := time.Now().AddDate(0, 0, -r.RetainDays)
for _, t := range timestamps {
if t.Before(cutoff) {
expired++
}
}
r.LastRun = time.Now()
r.ItemsPurged += expired
d.policies[dataType] = r
return expired
}
// RetentionStats returns retention policy statistics.
func (d *DataRetentionPolicy) RetentionStats() map[string]any {
d.mu.RLock()
defer d.mu.RUnlock()
return map[string]any{
"total_policies": len(d.policies),
"policies": d.policies,
}
}

View file

@ -0,0 +1,84 @@
package soc
import (
"fmt"
"os"
"time"
"gopkg.in/yaml.v3"
)
// RuleConfig is the YAML format for custom correlation rules (§7.5).
//
// Example rules.yaml:
//
// rules:
// - id: CUSTOM-001
// name: API Key Spray
// required_categories: [auth_bypass, brute_force]
// min_events: 5
// time_window: 2m
// severity: HIGH
// kill_chain_phase: Reconnaissance
// mitre_mapping: [T1110]
// cross_sensor: true
type RuleConfig struct {
Rules []YAMLRule `yaml:"rules"`
}
// YAMLRule is a single custom correlation rule loaded from YAML.
type YAMLRule struct {
ID string `yaml:"id"`
Name string `yaml:"name"`
RequiredCategories []string `yaml:"required_categories"`
MinEvents int `yaml:"min_events"`
TimeWindow string `yaml:"time_window"` // e.g., "5m", "10m", "1h"
Severity string `yaml:"severity"`
KillChainPhase string `yaml:"kill_chain_phase"`
MITREMapping []string `yaml:"mitre_mapping"`
Description string `yaml:"description"`
CrossSensor bool `yaml:"cross_sensor"` // Allow cross-sensor correlation
}
// LoadRulesFromYAML loads custom correlation rules from a YAML file.
// Returns nil and no error if the file doesn't exist (optional config).
func LoadRulesFromYAML(path string) ([]SOCCorrelationRule, error) {
data, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return nil, nil // Optional — no custom rules
}
return nil, fmt.Errorf("read rules file: %w", err)
}
var cfg RuleConfig
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, fmt.Errorf("parse rules YAML: %w", err)
}
rules := make([]SOCCorrelationRule, 0, len(cfg.Rules))
for _, yr := range cfg.Rules {
dur, err := time.ParseDuration(yr.TimeWindow)
if err != nil {
return nil, fmt.Errorf("rule %s: invalid time_window %q: %w", yr.ID, yr.TimeWindow, err)
}
if yr.MinEvents == 0 {
yr.MinEvents = 2 // Default
}
rules = append(rules, SOCCorrelationRule{
ID: yr.ID,
Name: yr.Name,
RequiredCategories: yr.RequiredCategories,
MinEvents: yr.MinEvents,
TimeWindow: dur,
Severity: EventSeverity(yr.Severity),
KillChainPhase: yr.KillChainPhase,
MITREMapping: yr.MITREMapping,
Description: yr.Description,
CrossSensor: yr.CrossSensor,
})
}
return rules, nil
}

View file

@ -49,6 +49,7 @@ const (
// Sensor represents a registered sensor in the SOC (§11.3).
type Sensor struct {
SensorID string `json:"sensor_id"`
TenantID string `json:"tenant_id,omitempty"`
SensorType SensorType `json:"sensor_type"`
Status SensorStatus `json:"status"`
FirstSeen time.Time `json:"first_seen"`

View file

@ -116,7 +116,7 @@ func TestIncidentAddEvent(t *testing.T) {
func TestIncidentResolve(t *testing.T) {
inc := NewIncident("Test", SeverityHigh, "test_rule")
inc.Resolve(StatusResolved)
inc.Resolve(StatusResolved, "system")
if inc.IsOpen() {
t.Error("resolved incident should not be open")
@ -146,7 +146,7 @@ func TestIncidentMTTR(t *testing.T) {
t.Error("unresolved MTTR should be 0")
}
time.Sleep(10 * time.Millisecond)
inc.Resolve(StatusResolved)
inc.Resolve(StatusResolved, "system")
if inc.MTTR() <= 0 {
t.Error("resolved MTTR should be positive")
}
@ -229,78 +229,41 @@ func TestSensorHeartbeatRecovery(t *testing.T) {
}
}
// === Playbook Tests ===
// === Playbook Engine Tests (§10) ===
func TestPlaybookMatches(t *testing.T) {
pb := Playbook{
ID: "pb-test",
Enabled: true,
Condition: PlaybookCondition{
MinSeverity: SeverityHigh,
Categories: []string{"jailbreak", "prompt_injection"},
},
Actions: []PlaybookAction{ActionAutoBlock},
func TestPlaybookEngine_Defaults(t *testing.T) {
pe := NewPlaybookEngine()
pbs := pe.ListPlaybooks()
if len(pbs) != 4 {
t.Errorf("expected 4 default playbooks, got %d", len(pbs))
}
// Should match
evt := NewSOCEvent(SourceSentinelCore, SeverityCritical, "jailbreak", "test")
if !pb.Matches(evt) {
t.Error("expected match for jailbreak + CRITICAL")
}
// Should not match — low severity
evt2 := NewSOCEvent(SourceSentinelCore, SeverityLow, "jailbreak", "test")
if pb.Matches(evt2) {
t.Error("should not match LOW severity")
}
// Should not match — wrong category
evt3 := NewSOCEvent(SourceSentinelCore, SeverityCritical, "network_block", "test")
if pb.Matches(evt3) {
t.Error("should not match wrong category")
}
// Disabled playbook
pb.Enabled = false
if pb.Matches(evt) {
t.Error("disabled playbook should not match")
}
}
func TestPlaybookSourceFilter(t *testing.T) {
pb := Playbook{
ID: "pb-shield-only",
Enabled: true,
Condition: PlaybookCondition{
MinSeverity: SeverityMedium,
Categories: []string{"network_block"},
Sources: []EventSource{SourceShield},
},
Actions: []PlaybookAction{ActionNotify},
}
// Shield source should match
evt := NewSOCEvent(SourceShield, SeverityHigh, "network_block", "test")
if !pb.Matches(evt) {
t.Error("expected match for shield source")
}
// Non-shield source should not match
evt2 := NewSOCEvent(SourceSentinelCore, SeverityHigh, "network_block", "test")
if pb.Matches(evt2) {
t.Error("should not match non-shield source")
}
}
func TestDefaultPlaybooks(t *testing.T) {
pbs := DefaultPlaybooks()
if len(pbs) != 3 {
t.Errorf("expected 3 default playbooks, got %d", len(pbs))
}
// Check all are enabled
for _, pb := range pbs {
if !pb.Enabled {
t.Errorf("default playbook %s should be enabled", pb.ID)
t.Errorf("playbook %s should be enabled", pb.ID)
}
}
}
func TestPlaybookEngine_JailbreakMatch(t *testing.T) {
pe := NewPlaybookEngine()
execs := pe.Execute("inc-001", "CRITICAL", "jailbreak", "")
found := false
for _, e := range execs {
if e.PlaybookID == "pb-block-jailbreak" {
found = true
}
}
if !found {
t.Error("expected pb-block-jailbreak to match CRITICAL jailbreak")
}
}
func TestPlaybookEngine_SeverityFilter(t *testing.T) {
pe := NewPlaybookEngine()
execs := pe.Execute("inc-002", "LOW", "jailbreak", "")
for _, e := range execs {
if e.PlaybookID == "pb-block-jailbreak" {
t.Error("LOW severity should not match CRITICAL threshold playbook")
}
}
}

View file

@ -0,0 +1,215 @@
package soc
import (
"strings"
"sync"
"time"
)
// ThreatIntelEngine implements §6 — IOC (Indicator of Compromise) matching.
// Maintains feed subscriptions and in-memory IOC database for real-time matching.
type ThreatIntelEngine struct {
mu sync.RWMutex
iocs map[string]*IOC // key = value (IP, domain, hash)
feeds []Feed
hits []IOCHit
max int
}
// IOCType categorizes the indicator.
type IOCType string
const (
IOCIP IOCType = "ip"
IOCDomain IOCType = "domain"
IOCHash IOCType = "hash"
IOCEmail IOCType = "email"
IOCURL IOCType = "url"
)
// IOC is an individual indicator of compromise.
type IOC struct {
Value string `json:"value"`
Type IOCType `json:"type"`
Severity string `json:"severity"` // CRITICAL, HIGH, MEDIUM, LOW
Source string `json:"source"` // Feed name
Tags []string `json:"tags"`
Description string `json:"description"`
FirstSeen time.Time `json:"first_seen"`
LastSeen time.Time `json:"last_seen"`
HitCount int `json:"hit_count"`
}
// Feed represents a threat intelligence source.
type Feed struct {
Name string `json:"name"`
URL string `json:"url"`
Type string `json:"type"` // stix, csv, json
Enabled bool `json:"enabled"`
IOCCount int `json:"ioc_count"`
LastSync time.Time `json:"last_sync"`
SyncInterval string `json:"sync_interval"`
}
// IOCHit records a match between an event and an IOC.
type IOCHit struct {
IOCValue string `json:"ioc_value"`
IOCType IOCType `json:"ioc_type"`
EventID string `json:"event_id"`
Severity string `json:"severity"`
Source string `json:"source"`
Timestamp time.Time `json:"timestamp"`
}
// NewThreatIntelEngine creates the IOC matching engine with default feeds.
func NewThreatIntelEngine() *ThreatIntelEngine {
t := &ThreatIntelEngine{
iocs: make(map[string]*IOC),
max: 1000,
}
t.loadDefaultFeeds()
t.loadSampleIOCs()
return t
}
func (t *ThreatIntelEngine) loadDefaultFeeds() {
t.feeds = []Feed{
{Name: "AlienVault OTX", URL: "https://otx.alienvault.com/api/v1/pulses/subscribed", Type: "json", Enabled: true, SyncInterval: "1h"},
{Name: "Abuse.ch URLhaus", URL: "https://urlhaus.abuse.ch/downloads/csv_recent/", Type: "csv", Enabled: true, SyncInterval: "30m"},
{Name: "CIRCL MISP", URL: "https://www.circl.lu/doc/misp/feed-osint/", Type: "stix", Enabled: false, SyncInterval: "6h"},
{Name: "Internal STIX", URL: "file:///var/sentinel/iocs/internal.stix", Type: "stix", Enabled: true, SyncInterval: "5m"},
}
}
func (t *ThreatIntelEngine) loadSampleIOCs() {
samples := []IOC{
{Value: "185.220.101.35", Type: IOCIP, Severity: "HIGH", Source: "AlienVault OTX", Tags: []string{"tor-exit", "scanner"}, Description: "Known Tor exit node / mass scanner"},
{Value: "evil-ai-jailbreak.com", Type: IOCDomain, Severity: "CRITICAL", Source: "Internal STIX", Tags: []string{"jailbreak", "c2"}, Description: "Jailbreak prompt C2 domain"},
{Value: "d41d8cd98f00b204e9800998ecf8427e", Type: IOCHash, Severity: "MEDIUM", Source: "Abuse.ch URLhaus", Tags: []string{"malware-hash"}, Description: "Known malware hash (MD5)"},
{Value: "attacker@malicious-prompts.org", Type: IOCEmail, Severity: "HIGH", Source: "Internal STIX", Tags: []string{"phishing", "social-engineering"}, Description: "Known prompt injection author"},
}
now := time.Now()
for _, ioc := range samples {
ioc := ioc // shadow to capture per-iteration (safe for Go <1.22)
ioc.FirstSeen = now.Add(-72 * time.Hour)
ioc.LastSeen = now
t.iocs[ioc.Value] = &ioc
}
for i := range t.feeds {
if t.feeds[i].Enabled {
t.feeds[i].IOCCount = len(samples) / 2
t.feeds[i].LastSync = now.Add(-15 * time.Minute)
}
}
}
// Match checks a string against the IOC database.
// Returns matching IOC or nil.
func (t *ThreatIntelEngine) Match(value string) *IOC {
t.mu.Lock()
defer t.mu.Unlock()
normalized := strings.ToLower(strings.TrimSpace(value))
if ioc, ok := t.iocs[normalized]; ok {
ioc.HitCount++
ioc.LastSeen = time.Now()
copy := *ioc // return safe copy, not mutable internal pointer
return &copy
}
return nil
}
// MatchEvent checks all fields of an event description for IOC matches.
// Returns all hits.
func (t *ThreatIntelEngine) MatchEvent(eventID, text string) []IOCHit {
t.mu.Lock()
defer t.mu.Unlock()
var hits []IOCHit
lower := strings.ToLower(text)
for _, ioc := range t.iocs {
if strings.Contains(lower, strings.ToLower(ioc.Value)) {
hit := IOCHit{
IOCValue: ioc.Value,
IOCType: ioc.Type,
EventID: eventID,
Severity: ioc.Severity,
Source: ioc.Source,
Timestamp: time.Now(),
}
ioc.HitCount++
ioc.LastSeen = time.Now()
hits = append(hits, hit)
if len(t.hits) >= t.max {
copy(t.hits, t.hits[1:])
t.hits[len(t.hits)-1] = hit
} else {
t.hits = append(t.hits, hit)
}
}
}
return hits
}
// AddIOC adds a custom indicator of compromise.
func (t *ThreatIntelEngine) AddIOC(ioc IOC) {
t.mu.Lock()
defer t.mu.Unlock()
if ioc.FirstSeen.IsZero() {
ioc.FirstSeen = time.Now()
}
ioc.LastSeen = time.Now()
t.iocs[strings.ToLower(ioc.Value)] = &ioc
}
// ListIOCs returns all indicators.
func (t *ThreatIntelEngine) ListIOCs() []IOC {
t.mu.RLock()
defer t.mu.RUnlock()
result := make([]IOC, 0, len(t.iocs))
for _, ioc := range t.iocs {
result = append(result, *ioc)
}
return result
}
// ListFeeds returns configured threat intel feeds.
func (t *ThreatIntelEngine) ListFeeds() []Feed {
t.mu.RLock()
defer t.mu.RUnlock()
result := make([]Feed, len(t.feeds))
copy(result, t.feeds)
return result
}
// RecentHits returns recent IOC match hits.
func (t *ThreatIntelEngine) RecentHits(limit int) []IOCHit {
t.mu.RLock()
defer t.mu.RUnlock()
if limit <= 0 || limit > len(t.hits) {
limit = len(t.hits)
}
start := len(t.hits) - limit
result := make([]IOCHit, limit)
copy(result, t.hits[start:])
return result
}
// Stats returns threat intel statistics.
func (t *ThreatIntelEngine) ThreatIntelStats() map[string]any {
t.mu.RLock()
defer t.mu.RUnlock()
enabledFeeds := 0
for _, f := range t.feeds {
if f.Enabled {
enabledFeeds++
}
}
return map[string]any{
"total_iocs": len(t.iocs),
"total_feeds": len(t.feeds),
"enabled_feeds": enabledFeeds,
"total_hits": len(t.hits),
}
}

View file

@ -0,0 +1,131 @@
package soc
import (
"testing"
"time"
)
func TestThreatIntel_SampleIOCs(t *testing.T) {
ti := NewThreatIntelEngine()
iocs := ti.ListIOCs()
if len(iocs) != 4 {
t.Fatalf("expected 4 sample IOCs, got %d", len(iocs))
}
}
func TestThreatIntel_Match(t *testing.T) {
ti := NewThreatIntelEngine()
ioc := ti.Match("185.220.101.35")
if ioc == nil {
t.Fatal("should match known IP IOC")
}
if ioc.Severity != "HIGH" {
t.Fatalf("expected HIGH severity, got %s", ioc.Severity)
}
}
func TestThreatIntel_NoMatch(t *testing.T) {
ti := NewThreatIntelEngine()
ioc := ti.Match("192.168.1.1")
if ioc != nil {
t.Fatal("should not match unknown IP")
}
}
func TestThreatIntel_MatchEvent(t *testing.T) {
ti := NewThreatIntelEngine()
hits := ti.MatchEvent("evt-001", "Detected connection to evil-ai-jailbreak.com from internal host")
if len(hits) != 1 {
t.Fatalf("expected 1 hit, got %d", len(hits))
}
if hits[0].Severity != "CRITICAL" {
t.Fatalf("expected CRITICAL, got %s", hits[0].Severity)
}
}
func TestThreatIntel_AddCustomIOC(t *testing.T) {
ti := NewThreatIntelEngine()
ti.AddIOC(IOC{
Value: "bad-prompt.ai",
Type: IOCDomain,
Severity: "HIGH",
Source: "manual",
})
ioc := ti.Match("bad-prompt.ai")
if ioc == nil {
t.Fatal("should match custom IOC")
}
}
func TestThreatIntel_Feeds(t *testing.T) {
ti := NewThreatIntelEngine()
feeds := ti.ListFeeds()
if len(feeds) != 4 {
t.Fatalf("expected 4 feeds, got %d", len(feeds))
}
}
func TestThreatIntel_Stats(t *testing.T) {
ti := NewThreatIntelEngine()
stats := ti.ThreatIntelStats()
if stats["total_iocs"].(int) != 4 {
t.Fatal("expected 4 IOCs")
}
}
func TestThreatIntel_HitTracking(t *testing.T) {
ti := NewThreatIntelEngine()
ti.MatchEvent("evt-001", "Connection to 185.220.101.35")
ti.MatchEvent("evt-002", "Request from 185.220.101.35")
hits := ti.RecentHits(10)
if len(hits) != 2 {
t.Fatalf("expected 2 hits, got %d", len(hits))
}
}
func TestRetention_DefaultPolicies(t *testing.T) {
rp := NewDataRetentionPolicy()
policies := rp.ListPolicies()
if len(policies) != 5 {
t.Fatalf("expected 5 default policies, got %d", len(policies))
}
}
func TestRetention_Expiration(t *testing.T) {
rp := NewDataRetentionPolicy()
old := time.Now().AddDate(0, 0, -100) // 100 days ago
fresh := time.Now().Add(-1 * time.Hour)
if !rp.IsExpired("events", old) {
t.Fatal("100-day old event should be expired (90d policy)")
}
if rp.IsExpired("events", fresh) {
t.Fatal("1-hour old event should not be expired")
}
}
func TestRetention_Enforce(t *testing.T) {
rp := NewDataRetentionPolicy()
timestamps := []time.Time{
time.Now().AddDate(0, 0, -100),
time.Now().AddDate(0, 0, -95),
time.Now().Add(-1 * time.Hour),
}
expired := rp.Enforce("events", timestamps)
if expired != 2 {
t.Fatalf("expected 2 expired, got %d", expired)
}
}
func TestRetention_CustomPolicy(t *testing.T) {
rp := NewDataRetentionPolicy()
rp.SetPolicy("custom", 7, "delete")
r, ok := rp.GetPolicy("custom")
if !ok {
t.Fatal("custom policy should exist")
}
if r.RetainDays != 7 {
t.Fatalf("expected 7 days, got %d", r.RetainDays)
}
}

View file

@ -0,0 +1,201 @@
package soc
import (
"bytes"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"sync"
"time"
)
// WebhookEventType defines events that trigger webhooks (§15).
type WebhookEventType string
const (
WebhookIncidentCreated WebhookEventType = "incident_created"
WebhookIncidentResolved WebhookEventType = "incident_resolved"
WebhookCriticalEvent WebhookEventType = "critical_event"
WebhookSensorOffline WebhookEventType = "sensor_offline"
WebhookKillChainAlert WebhookEventType = "kill_chain_alert"
)
// WebhookConfig defines a webhook destination.
type WebhookConfig struct {
ID string `yaml:"id" json:"id"`
URL string `yaml:"url" json:"url"`
Events []WebhookEventType `yaml:"events" json:"events"`
Headers map[string]string `yaml:"headers" json:"headers"`
Active bool `yaml:"active" json:"active"`
Retries int `yaml:"retries" json:"retries"`
}
// WebhookPayload is the JSON body sent to webhook endpoints.
type WebhookPayload struct {
EventType WebhookEventType `json:"event_type"`
Timestamp time.Time `json:"timestamp"`
IncidentID string `json:"incident_id,omitempty"`
Severity string `json:"severity"`
Title string `json:"title"`
Description string `json:"description"`
URL string `json:"url,omitempty"` // Link to dashboard
}
// WebhookEngine manages webhook delivery with retry logic (§15).
type WebhookEngine struct {
mu sync.RWMutex
webhooks []WebhookConfig
client *http.Client
// Stats
sent int
failed int
queue chan webhookJob
}
type webhookJob struct {
config WebhookConfig
payload WebhookPayload
attempt int
}
// NewWebhookEngine creates a webhook delivery engine.
func NewWebhookEngine() *WebhookEngine {
e := &WebhookEngine{
client: &http.Client{Timeout: 10 * time.Second},
queue: make(chan webhookJob, 100),
}
// Start async delivery worker
go e.deliveryWorker()
return e
}
// AddWebhook registers a webhook destination.
func (e *WebhookEngine) AddWebhook(wh WebhookConfig) {
e.mu.Lock()
defer e.mu.Unlock()
if wh.Retries == 0 {
wh.Retries = 3
}
if wh.ID == "" {
wh.ID = fmt.Sprintf("wh-%d", time.Now().UnixNano())
}
e.webhooks = append(e.webhooks, wh)
}
// RemoveWebhook deactivates a webhook by ID.
func (e *WebhookEngine) RemoveWebhook(id string) {
e.mu.Lock()
defer e.mu.Unlock()
for i := range e.webhooks {
if e.webhooks[i].ID == id {
e.webhooks[i].Active = false
}
}
}
// Fire sends a webhook payload to all matching subscribers.
func (e *WebhookEngine) Fire(eventType WebhookEventType, payload WebhookPayload) {
payload.EventType = eventType
payload.Timestamp = time.Now()
e.mu.RLock()
defer e.mu.RUnlock()
for _, wh := range e.webhooks {
if !wh.Active {
continue
}
for _, et := range wh.Events {
if et == eventType {
select {
case e.queue <- webhookJob{config: wh, payload: payload, attempt: 0}:
default:
slog.Warn("webhook queue full, dropping event", "event_type", eventType, "url", wh.URL)
}
break
}
}
}
}
// deliveryWorker processes webhook jobs with retries.
func (e *WebhookEngine) deliveryWorker() {
for job := range e.queue {
err := e.deliver(job.config, job.payload)
if err != nil {
job.attempt++
if job.attempt < job.config.Retries {
// Exponential backoff: 1s, 2s, 4s
go func(j webhookJob) {
time.Sleep(time.Duration(1<<j.attempt) * time.Second)
select {
case e.queue <- j:
default:
}
}(job)
} else {
e.mu.Lock()
e.failed++
e.mu.Unlock()
slog.Error("webhook delivery failed", "attempts", job.attempt, "url", job.config.URL, "error", err)
}
} else {
e.mu.Lock()
e.sent++
e.mu.Unlock()
}
}
}
// deliver sends the HTTP request.
func (e *WebhookEngine) deliver(wh WebhookConfig, payload WebhookPayload) error {
body, err := json.Marshal(payload)
if err != nil {
return err
}
req, err := http.NewRequest("POST", wh.URL, bytes.NewReader(body))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", "SYNTREX-SOAR/1.0")
for k, v := range wh.Headers {
req.Header.Set(k, v)
}
resp, err := e.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
return fmt.Errorf("webhook returned %d", resp.StatusCode)
}
return nil
}
// Stats returns webhook delivery statistics.
func (e *WebhookEngine) Stats() map[string]any {
e.mu.RLock()
defer e.mu.RUnlock()
return map[string]any{
"webhooks_configured": len(e.webhooks),
"sent": e.sent,
"failed": e.failed,
"queue_depth": len(e.queue),
}
}
// Webhooks returns all configured webhooks.
func (e *WebhookEngine) Webhooks() []WebhookConfig {
e.mu.RLock()
defer e.mu.RUnlock()
result := make([]WebhookConfig, len(e.webhooks))
copy(result, e.webhooks)
return result
}

View file

@ -0,0 +1,134 @@
package soc
import (
"encoding/json"
"net/http"
"net/http/httptest"
"sync/atomic"
"testing"
"time"
)
func TestWebhookEngine_Fire(t *testing.T) {
var received atomic.Int32
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
received.Add(1)
var payload WebhookPayload
json.NewDecoder(r.Body).Decode(&payload)
if payload.EventType == "" {
t.Error("missing event_type in payload")
}
w.WriteHeader(http.StatusOK)
}))
defer srv.Close()
engine := NewWebhookEngine()
engine.AddWebhook(WebhookConfig{
ID: "wh-1",
URL: srv.URL,
Events: []WebhookEventType{WebhookIncidentCreated, WebhookCriticalEvent},
Active: true,
Retries: 1,
})
// Fire matching event
engine.Fire(WebhookIncidentCreated, WebhookPayload{
IncidentID: "inc-001",
Severity: "CRITICAL",
Title: "Test incident",
})
// Fire non-matching event — should NOT trigger
engine.Fire(WebhookSensorOffline, WebhookPayload{
Title: "Sensor down",
})
// Wait for async delivery
time.Sleep(300 * time.Millisecond)
if received.Load() != 1 {
t.Fatalf("expected 1 webhook delivery, got %d", received.Load())
}
}
func TestWebhookEngine_Stats(t *testing.T) {
engine := NewWebhookEngine()
engine.AddWebhook(WebhookConfig{
ID: "wh-stats",
URL: "http://localhost:1/nope",
Events: []WebhookEventType{WebhookCriticalEvent},
Active: true,
})
stats := engine.Stats()
if stats["webhooks_configured"].(int) != 1 {
t.Fatalf("expected 1 configured, got %v", stats["webhooks_configured"])
}
}
func TestWebhookEngine_InactiveSkipped(t *testing.T) {
var received atomic.Int32
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
received.Add(1)
w.WriteHeader(http.StatusOK)
}))
defer srv.Close()
engine := NewWebhookEngine()
engine.AddWebhook(WebhookConfig{
ID: "wh-inactive",
URL: srv.URL,
Events: []WebhookEventType{WebhookKillChainAlert},
Active: false, // Inactive!
})
engine.Fire(WebhookKillChainAlert, WebhookPayload{Title: "Kill chain C2"})
time.Sleep(200 * time.Millisecond)
if received.Load() != 0 {
t.Fatalf("inactive webhook should not fire, got %d", received.Load())
}
}
func TestWebhookEngine_RemoveWebhook(t *testing.T) {
var received atomic.Int32
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
received.Add(1)
w.WriteHeader(http.StatusOK)
}))
defer srv.Close()
engine := NewWebhookEngine()
engine.AddWebhook(WebhookConfig{
ID: "wh-remove",
URL: srv.URL,
Events: []WebhookEventType{WebhookIncidentResolved},
Active: true,
})
engine.RemoveWebhook("wh-remove")
engine.Fire(WebhookIncidentResolved, WebhookPayload{Title: "Resolved"})
time.Sleep(200 * time.Millisecond)
if received.Load() != 0 {
t.Fatalf("removed webhook should not fire, got %d", received.Load())
}
}
func TestWebhookEngine_ListWebhooks(t *testing.T) {
engine := NewWebhookEngine()
engine.AddWebhook(WebhookConfig{URL: "http://a.com", Active: true})
engine.AddWebhook(WebhookConfig{URL: "http://b.com", Active: true})
webhooks := engine.Webhooks()
if len(webhooks) != 2 {
t.Fatalf("expected 2, got %d", len(webhooks))
}
}

View file

@ -0,0 +1,184 @@
package soc
import (
"fmt"
"sync"
"time"
)
// ZeroGMode implements §13.4 — manual approval workflow for Strike Force operations.
// Events in Zero-G mode require explicit analyst approval before auto-response executes.
type ZeroGMode struct {
mu sync.RWMutex
enabled bool
queue []ZeroGRequest
resolved []ZeroGRequest
maxQueue int
}
// ZeroGRequest represents a pending approval request.
type ZeroGRequest struct {
ID string `json:"id"`
EventID string `json:"event_id"`
IncidentID string `json:"incident_id,omitempty"`
Action string `json:"action"` // What would auto-execute
Severity string `json:"severity"`
Description string `json:"description"`
Status ZeroGStatus `json:"status"`
CreatedAt time.Time `json:"created_at"`
ResolvedAt *time.Time `json:"resolved_at,omitempty"`
ResolvedBy string `json:"resolved_by,omitempty"`
Verdict ZeroGVerdict `json:"verdict,omitempty"`
}
// ZeroGStatus tracks the request lifecycle.
type ZeroGStatus string
const (
ZeroGPending ZeroGStatus = "PENDING"
ZeroGApproved ZeroGStatus = "APPROVED"
ZeroGDenied ZeroGStatus = "DENIED"
ZeroGExpired ZeroGStatus = "EXPIRED"
)
// ZeroGVerdict is the analyst's decision.
type ZeroGVerdict string
const (
ZGVerdictApprove ZeroGVerdict = "APPROVE"
ZGVerdictDeny ZeroGVerdict = "DENY"
ZGVerdictEscalate ZeroGVerdict = "ESCALATE"
)
// NewZeroGMode creates the Zero-G approval engine.
func NewZeroGMode() *ZeroGMode {
return &ZeroGMode{
enabled: false,
maxQueue: 200,
}
}
// Enable activates Zero-G mode (manual approval required).
func (z *ZeroGMode) Enable() {
z.mu.Lock()
defer z.mu.Unlock()
z.enabled = true
}
// Disable deactivates Zero-G mode (auto-response resumes).
func (z *ZeroGMode) Disable() {
z.mu.Lock()
defer z.mu.Unlock()
z.enabled = false
}
// IsEnabled returns whether Zero-G mode is active.
func (z *ZeroGMode) IsEnabled() bool {
z.mu.RLock()
defer z.mu.RUnlock()
return z.enabled
}
// RequestApproval queues an action for manual approval. Returns the request ID.
func (z *ZeroGMode) RequestApproval(eventID, incidentID, action, severity, description string) string {
z.mu.Lock()
defer z.mu.Unlock()
if !z.enabled {
return "" // Not in Zero-G mode, skip
}
reqID := fmt.Sprintf("zg-%d", time.Now().UnixNano())
req := ZeroGRequest{
ID: reqID,
EventID: eventID,
IncidentID: incidentID,
Action: action,
Severity: severity,
Description: description,
Status: ZeroGPending,
CreatedAt: time.Now(),
}
// Enforce max queue size
if len(z.queue) >= z.maxQueue {
// Expire oldest
expired := z.queue[0]
expired.Status = ZeroGExpired
now := time.Now()
expired.ResolvedAt = &now
z.resolved = append(z.resolved, expired)
z.queue = z.queue[1:]
}
z.queue = append(z.queue, req)
return reqID
}
// Resolve processes an analyst's verdict on a pending request.
func (z *ZeroGMode) Resolve(requestID string, verdict ZeroGVerdict, analyst string) error {
z.mu.Lock()
defer z.mu.Unlock()
for i, req := range z.queue {
if req.ID == requestID {
now := time.Now()
z.queue[i].ResolvedAt = &now
z.queue[i].ResolvedBy = analyst
z.queue[i].Verdict = verdict
switch verdict {
case ZGVerdictApprove:
z.queue[i].Status = ZeroGApproved
case ZGVerdictDeny:
z.queue[i].Status = ZeroGDenied
case ZGVerdictEscalate:
z.queue[i].Status = ZeroGPending // Stay pending, but mark escalated
}
// Move to resolved
z.resolved = append(z.resolved, z.queue[i])
z.queue = append(z.queue[:i], z.queue[i+1:]...)
return nil
}
}
return fmt.Errorf("zero-g request %s not found", requestID)
}
// PendingRequests returns all pending approval requests.
func (z *ZeroGMode) PendingRequests() []ZeroGRequest {
z.mu.RLock()
defer z.mu.RUnlock()
result := make([]ZeroGRequest, len(z.queue))
copy(result, z.queue)
return result
}
// Stats returns Zero-G mode statistics.
func (z *ZeroGMode) Stats() map[string]any {
z.mu.RLock()
defer z.mu.RUnlock()
approved := 0
denied := 0
expired := 0
for _, r := range z.resolved {
switch r.Status {
case ZeroGApproved:
approved++
case ZeroGDenied:
denied++
case ZeroGExpired:
expired++
}
}
return map[string]any{
"enabled": z.enabled,
"pending": len(z.queue),
"total_resolved": len(z.resolved),
"approved": approved,
"denied": denied,
"expired": expired,
}
}

View file

@ -0,0 +1,123 @@
package soc
import (
"testing"
)
func TestZeroGMode_Disabled(t *testing.T) {
zg := NewZeroGMode()
id := zg.RequestApproval("evt-1", "", "block_ip", "HIGH", "Block attacker IP")
if id != "" {
t.Fatal("disabled Zero-G should return empty ID")
}
}
func TestZeroGMode_EnableAndRequest(t *testing.T) {
zg := NewZeroGMode()
zg.Enable()
if !zg.IsEnabled() {
t.Fatal("should be enabled")
}
id := zg.RequestApproval("evt-1", "inc-1", "block_ip", "CRITICAL", "Block attacker 1.2.3.4")
if id == "" {
t.Fatal("enabled Zero-G should return request ID")
}
pending := zg.PendingRequests()
if len(pending) != 1 {
t.Fatalf("expected 1 pending, got %d", len(pending))
}
if pending[0].EventID != "evt-1" {
t.Fatalf("expected evt-1, got %s", pending[0].EventID)
}
}
func TestZeroGMode_Approve(t *testing.T) {
zg := NewZeroGMode()
zg.Enable()
id := zg.RequestApproval("evt-1", "", "quarantine", "HIGH", "Quarantine host")
err := zg.Resolve(id, ZGVerdictApprove, "analyst-1")
if err != nil {
t.Fatalf("resolve failed: %v", err)
}
pending := zg.PendingRequests()
if len(pending) != 0 {
t.Fatal("should have 0 pending after resolve")
}
stats := zg.Stats()
if stats["approved"].(int) != 1 {
t.Fatal("should have 1 approved")
}
}
func TestZeroGMode_Deny(t *testing.T) {
zg := NewZeroGMode()
zg.Enable()
id := zg.RequestApproval("evt-2", "", "kill_process", "MEDIUM", "Kill suspicious proc")
err := zg.Resolve(id, ZGVerdictDeny, "analyst-2")
if err != nil {
t.Fatalf("resolve failed: %v", err)
}
stats := zg.Stats()
if stats["denied"].(int) != 1 {
t.Fatal("should have 1 denied")
}
}
func TestZeroGMode_ResolveNotFound(t *testing.T) {
zg := NewZeroGMode()
zg.Enable()
err := zg.Resolve("zg-nonexistent", ZGVerdictApprove, "analyst")
if err == nil {
t.Fatal("should error on non-existent request")
}
}
func TestZeroGMode_QueueOverflow(t *testing.T) {
zg := NewZeroGMode()
zg.Enable()
// Fill queue past max (200)
for i := 0; i < 201; i++ {
zg.RequestApproval("evt", "", "action", "LOW", "test")
}
pending := zg.PendingRequests()
if len(pending) != 200 {
t.Fatalf("expected 200 pending (capped), got %d", len(pending))
}
stats := zg.Stats()
if stats["expired"].(int) != 1 {
t.Fatalf("expected 1 expired, got %d", stats["expired"])
}
}
func TestZeroGMode_Toggle(t *testing.T) {
zg := NewZeroGMode()
if zg.IsEnabled() {
t.Fatal("should start disabled")
}
zg.Enable()
if !zg.IsEnabled() {
t.Fatal("should be enabled")
}
zg.Disable()
if zg.IsEnabled() {
t.Fatal("should be disabled again")
}
}