Updated test suite for explainability & provenance (#696)

* Provenance tests

* Embeddings tests

* Test librarian

* Test triples stream

* Test concurrency

* Entity centric graph writes

* Agent tool service tests

* Structured data tests

* RDF tests

* Addition LLM tests

* Reliability tests
This commit is contained in:
cybermaggedon 2026-03-13 14:27:42 +00:00 committed by GitHub
parent e6623fc915
commit 29b4300808
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
36 changed files with 8799 additions and 0 deletions

View file

@ -0,0 +1 @@

View file

@ -0,0 +1,286 @@
"""
Tests for Consumer concurrency: TaskGroup-based concurrent message processing,
rate-limit retry with backpressure, and message acknowledgement.
"""
import asyncio
import time
import pytest
from unittest.mock import MagicMock, AsyncMock, patch
from trustgraph.base.consumer import Consumer
from trustgraph.exceptions import TooManyRequests
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_consumer(
concurrency=1,
handler=None,
rate_limit_retry_time=0.01,
rate_limit_timeout=1,
):
"""Create a Consumer with mocked infrastructure."""
taskgroup = MagicMock()
flow = MagicMock()
backend = MagicMock()
schema = MagicMock()
handler = handler or AsyncMock()
consumer = Consumer(
taskgroup=taskgroup,
flow=flow,
backend=backend,
topic="test-topic",
subscriber="test-sub",
schema=schema,
handler=handler,
rate_limit_retry_time=rate_limit_retry_time,
rate_limit_timeout=rate_limit_timeout,
concurrency=concurrency,
)
return consumer
def _make_msg():
"""Create a mock Pulsar message."""
return MagicMock()
# ---------------------------------------------------------------------------
# Concurrency configuration tests
# ---------------------------------------------------------------------------
class TestConcurrencyConfiguration:
def test_default_concurrency_is_1(self):
consumer = _make_consumer()
assert consumer.concurrency == 1
def test_custom_concurrency(self):
consumer = _make_consumer(concurrency=10)
assert consumer.concurrency == 10
def test_concurrency_stored(self):
for n in [1, 5, 20, 100]:
consumer = _make_consumer(concurrency=n)
assert consumer.concurrency == n
class TestTaskGroupConcurrency:
@pytest.mark.asyncio
async def test_creates_n_concurrent_tasks(self):
"""consumer_run should create exactly N concurrent consume_from_queue tasks."""
concurrency = 5
consumer = _make_consumer(concurrency=concurrency)
# Track how many consume_from_queue calls are made
call_count = 0
original_running = True
async def mock_consume():
nonlocal call_count
call_count += 1
# Wait a bit to let all tasks start, then signal stop
await asyncio.sleep(0.05)
consumer.running = False
consumer.consume_from_queue = mock_consume
# Mock the backend.create_consumer
consumer.backend.create_consumer = MagicMock(return_value=MagicMock())
# Run consumer_run - it will create TaskGroup with N tasks
consumer.running = True
await consumer.consumer_run()
assert call_count == concurrency
@pytest.mark.asyncio
async def test_single_concurrency_creates_one_task(self):
"""With concurrency=1, only one consume_from_queue task is created."""
consumer = _make_consumer(concurrency=1)
call_count = 0
async def mock_consume():
nonlocal call_count
call_count += 1
await asyncio.sleep(0.01)
consumer.running = False
consumer.consume_from_queue = mock_consume
consumer.backend.create_consumer = MagicMock(return_value=MagicMock())
consumer.running = True
await consumer.consumer_run()
assert call_count == 1
# ---------------------------------------------------------------------------
# Rate-limit retry tests
# ---------------------------------------------------------------------------
class TestRateLimitRetry:
@pytest.mark.asyncio
async def test_rate_limit_retries_then_succeeds(self):
"""TooManyRequests should cause retry, then succeed on next attempt."""
call_count = 0
async def handler_with_retry(msg, consumer_ref, flow):
nonlocal call_count
call_count += 1
if call_count == 1:
raise TooManyRequests("rate limited")
# Second call succeeds
consumer = _make_consumer(
handler=handler_with_retry,
rate_limit_retry_time=0.01,
)
mock_msg = _make_msg()
consumer.consumer = MagicMock()
await consumer.handle_one_from_queue(mock_msg)
assert call_count == 2
consumer.consumer.acknowledge.assert_called_once_with(mock_msg)
@pytest.mark.asyncio
async def test_rate_limit_timeout_negative_acks(self):
"""If rate limit retries exhaust the timeout, message is negative-acked."""
async def always_rate_limited(msg, consumer_ref, flow):
raise TooManyRequests("rate limited")
consumer = _make_consumer(
handler=always_rate_limited,
rate_limit_retry_time=0.01,
rate_limit_timeout=0.05,
)
mock_msg = _make_msg()
consumer.consumer = MagicMock()
await consumer.handle_one_from_queue(mock_msg)
consumer.consumer.negative_acknowledge.assert_called_with(mock_msg)
consumer.consumer.acknowledge.assert_not_called()
@pytest.mark.asyncio
async def test_non_rate_limit_error_negative_acks_immediately(self):
"""Non-TooManyRequests errors should negative-ack immediately (no retry)."""
call_count = 0
async def failing_handler(msg, consumer_ref, flow):
nonlocal call_count
call_count += 1
raise ValueError("bad data")
consumer = _make_consumer(handler=failing_handler)
mock_msg = _make_msg()
consumer.consumer = MagicMock()
await consumer.handle_one_from_queue(mock_msg)
assert call_count == 1
consumer.consumer.negative_acknowledge.assert_called_once_with(mock_msg)
@pytest.mark.asyncio
async def test_successful_message_acknowledged(self):
"""Successfully processed messages are acknowledged."""
consumer = _make_consumer(handler=AsyncMock())
mock_msg = _make_msg()
consumer.consumer = MagicMock()
await consumer.handle_one_from_queue(mock_msg)
consumer.consumer.acknowledge.assert_called_once_with(mock_msg)
# ---------------------------------------------------------------------------
# Metrics integration
# ---------------------------------------------------------------------------
class TestMetricsIntegration:
@pytest.mark.asyncio
async def test_success_metric_on_success(self):
consumer = _make_consumer(handler=AsyncMock())
mock_msg = _make_msg()
consumer.consumer = MagicMock()
mock_metrics = MagicMock()
mock_metrics.record_time.return_value.__enter__ = MagicMock()
mock_metrics.record_time.return_value.__exit__ = MagicMock()
consumer.metrics = mock_metrics
await consumer.handle_one_from_queue(mock_msg)
mock_metrics.process.assert_called_once_with("success")
@pytest.mark.asyncio
async def test_error_metric_on_failure(self):
async def failing(msg, c, f):
raise ValueError("fail")
consumer = _make_consumer(handler=failing)
mock_msg = _make_msg()
consumer.consumer = MagicMock()
mock_metrics = MagicMock()
consumer.metrics = mock_metrics
await consumer.handle_one_from_queue(mock_msg)
mock_metrics.process.assert_called_once_with("error")
@pytest.mark.asyncio
async def test_rate_limit_metric_on_too_many_requests(self):
call_count = 0
async def handler(msg, c, f):
nonlocal call_count
call_count += 1
if call_count == 1:
raise TooManyRequests("limited")
consumer = _make_consumer(
handler=handler,
rate_limit_retry_time=0.01,
)
mock_msg = _make_msg()
consumer.consumer = MagicMock()
mock_metrics = MagicMock()
mock_metrics.record_time.return_value.__enter__ = MagicMock()
mock_metrics.record_time.return_value.__exit__ = MagicMock(return_value=False)
consumer.metrics = mock_metrics
await consumer.handle_one_from_queue(mock_msg)
mock_metrics.rate_limit.assert_called_once()
# ---------------------------------------------------------------------------
# Stop / running flag
# ---------------------------------------------------------------------------
class TestStopBehaviour:
@pytest.mark.asyncio
async def test_stop_sets_running_false(self):
consumer = _make_consumer()
consumer.running = True
await consumer.stop()
assert consumer.running is False
def test_initial_running_state(self):
consumer = _make_consumer()
assert consumer.running is True

View file

@ -0,0 +1,136 @@
"""
Tests for MessageDispatcher semaphore-based concurrency enforcement.
Verifies that the dispatcher limits concurrent message processing to
max_workers via asyncio.Semaphore.
"""
import asyncio
import pytest
from unittest.mock import MagicMock, AsyncMock, patch
from trustgraph.rev_gateway.dispatcher import MessageDispatcher
class TestSemaphoreEnforcement:
@pytest.mark.asyncio
async def test_semaphore_limits_concurrent_processing(self):
"""Only max_workers messages should be processed concurrently."""
max_workers = 2
dispatcher = MessageDispatcher(max_workers=max_workers)
concurrent_count = 0
max_concurrent = 0
processing_event = asyncio.Event()
async def slow_process(message):
nonlocal concurrent_count, max_concurrent
concurrent_count += 1
max_concurrent = max(max_concurrent, concurrent_count)
await asyncio.sleep(0.05)
concurrent_count -= 1
return {"id": message.get("id"), "response": {"ok": True}}
dispatcher._process_message = slow_process
# Launch more tasks than max_workers
messages = [
{"id": f"msg-{i}", "service": "test", "request": {}}
for i in range(5)
]
tasks = [
asyncio.create_task(dispatcher.handle_message(m))
for m in messages
]
await asyncio.gather(*tasks)
# At no point should more than max_workers have been active
assert max_concurrent <= max_workers
@pytest.mark.asyncio
async def test_semaphore_value_matches_max_workers(self):
for n in [1, 5, 20]:
dispatcher = MessageDispatcher(max_workers=n)
assert dispatcher.semaphore._value == n
@pytest.mark.asyncio
async def test_active_tasks_tracked(self):
"""Active tasks should be added/removed during processing."""
dispatcher = MessageDispatcher(max_workers=5)
task_was_tracked = False
original_process = dispatcher._process_message
async def tracking_process(message):
nonlocal task_was_tracked
# During processing, our task should be in active_tasks
if len(dispatcher.active_tasks) > 0:
task_was_tracked = True
return {"id": message.get("id"), "response": {"ok": True}}
dispatcher._process_message = tracking_process
await dispatcher.handle_message(
{"id": "test", "service": "test", "request": {}}
)
assert task_was_tracked
# After completion, task should be discarded
assert len(dispatcher.active_tasks) == 0
@pytest.mark.asyncio
async def test_semaphore_released_on_error(self):
"""Semaphore should be released even if processing raises."""
dispatcher = MessageDispatcher(max_workers=2)
async def failing_process(message):
raise RuntimeError("process failed")
dispatcher._process_message = failing_process
# Should not deadlock — semaphore must be released on error
with pytest.raises(RuntimeError):
await dispatcher.handle_message(
{"id": "test", "service": "test", "request": {}}
)
# Semaphore should be back at max
assert dispatcher.semaphore._value == 2
@pytest.mark.asyncio
async def test_single_worker_serializes_processing(self):
"""With max_workers=1, messages are processed one at a time."""
dispatcher = MessageDispatcher(max_workers=1)
order = []
async def ordered_process(message):
msg_id = message["id"]
order.append(f"start-{msg_id}")
await asyncio.sleep(0.02)
order.append(f"end-{msg_id}")
return {"id": msg_id, "response": {"ok": True}}
dispatcher._process_message = ordered_process
messages = [{"id": str(i), "service": "t", "request": {}} for i in range(3)]
tasks = [asyncio.create_task(dispatcher.handle_message(m)) for m in messages]
await asyncio.gather(*tasks)
# With semaphore=1, each message should complete before next starts
# Check that no two "start" entries appear without an intervening "end"
active = 0
max_active = 0
for event in order:
if event.startswith("start"):
active += 1
max_active = max(max_active, active)
elif event.startswith("end"):
active -= 1
assert max_active == 1

View file

@ -0,0 +1,268 @@
"""
Tests for Graph RAG concurrent query execution.
Covers: execute_batch_triple_queries concurrent task spawning,
exception handling in gather, and result aggregation.
"""
import asyncio
import pytest
from unittest.mock import MagicMock, AsyncMock
from trustgraph.retrieval.graph_rag.graph_rag import Query, LRUCacheWithTTL
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_query(
triples_client=None,
entity_limit=50,
triple_limit=30,
max_subgraph_size=1000,
max_path_length=2,
):
"""Create a Query object with mocked rag dependencies."""
rag = MagicMock()
rag.triples_client = triples_client or AsyncMock()
rag.label_cache = LRUCacheWithTTL()
query = Query(
rag=rag,
user="test-user",
collection="test-collection",
verbose=False,
entity_limit=entity_limit,
triple_limit=triple_limit,
max_subgraph_size=max_subgraph_size,
max_path_length=max_path_length,
)
return query
def _make_triple(s, p, o):
"""Create a simple mock triple."""
t = MagicMock()
t.s = s
t.p = p
t.o = o
return t
# ---------------------------------------------------------------------------
# Tests
# ---------------------------------------------------------------------------
class TestBatchTripleQueries:
@pytest.mark.asyncio
async def test_three_queries_per_entity(self):
"""Each entity should generate 3 concurrent queries (s, p, o positions)."""
client = AsyncMock()
client.query_stream = AsyncMock(return_value=[])
query = _make_query(triples_client=client)
entities = ["entity-1"]
await query.execute_batch_triple_queries(entities, limit_per_entity=10)
assert client.query_stream.call_count == 3
@pytest.mark.asyncio
async def test_multiple_entities_multiply_queries(self):
"""N entities should produce N*3 concurrent queries."""
client = AsyncMock()
client.query_stream = AsyncMock(return_value=[])
query = _make_query(triples_client=client)
entities = ["e1", "e2", "e3"]
await query.execute_batch_triple_queries(entities, limit_per_entity=10)
assert client.query_stream.call_count == 9 # 3 * 3
@pytest.mark.asyncio
async def test_queries_executed_concurrently(self):
"""All queries should run concurrently via asyncio.gather."""
concurrent_count = 0
max_concurrent = 0
async def tracking_query(**kwargs):
nonlocal concurrent_count, max_concurrent
concurrent_count += 1
max_concurrent = max(max_concurrent, concurrent_count)
await asyncio.sleep(0.02)
concurrent_count -= 1
return []
client = AsyncMock()
client.query_stream = tracking_query
query = _make_query(triples_client=client)
entities = ["e1", "e2", "e3"]
await query.execute_batch_triple_queries(entities, limit_per_entity=5)
# All 9 queries should have run concurrently
assert max_concurrent == 9
@pytest.mark.asyncio
async def test_results_aggregated(self):
"""Results from all queries should be combined into a single list."""
triple_a = _make_triple("a", "p", "b")
triple_b = _make_triple("c", "p", "d")
call_count = 0
async def alternating_results(**kwargs):
nonlocal call_count
call_count += 1
if call_count % 2 == 0:
return [triple_a]
return [triple_b]
client = AsyncMock()
client.query_stream = alternating_results
query = _make_query(triples_client=client)
result = await query.execute_batch_triple_queries(
["e1"], limit_per_entity=10
)
# 3 queries, alternating results
assert len(result) == 3
@pytest.mark.asyncio
async def test_exception_in_one_query_does_not_block_others(self):
"""If one query raises, other results are still collected."""
good_triple = _make_triple("a", "p", "b")
call_count = 0
async def mixed_results(**kwargs):
nonlocal call_count
call_count += 1
if call_count == 2:
raise RuntimeError("query failed")
return [good_triple]
client = AsyncMock()
client.query_stream = mixed_results
query = _make_query(triples_client=client)
result = await query.execute_batch_triple_queries(
["e1"], limit_per_entity=10
)
# 3 queries: 2 succeed, 1 fails → 2 triples
assert len(result) == 2
@pytest.mark.asyncio
async def test_none_results_filtered(self):
"""None results from queries should be filtered out."""
call_count = 0
async def sometimes_none(**kwargs):
nonlocal call_count
call_count += 1
if call_count == 1:
return None
return [_make_triple("a", "p", "b")]
client = AsyncMock()
client.query_stream = sometimes_none
query = _make_query(triples_client=client)
result = await query.execute_batch_triple_queries(
["e1"], limit_per_entity=10
)
# 3 queries: 1 returns None, 2 return triples
assert len(result) == 2
@pytest.mark.asyncio
async def test_empty_entities_no_queries(self):
"""Empty entity list should produce no queries."""
client = AsyncMock()
client.query_stream = AsyncMock(return_value=[])
query = _make_query(triples_client=client)
result = await query.execute_batch_triple_queries([], limit_per_entity=10)
assert result == []
client.query_stream.assert_not_called()
@pytest.mark.asyncio
async def test_query_params_correct(self):
"""Each query should use correct s/p/o positions and params."""
client = AsyncMock()
client.query_stream = AsyncMock(return_value=[])
query = _make_query(triples_client=client)
entities = ["ent-1"]
await query.execute_batch_triple_queries(entities, limit_per_entity=15)
calls = client.query_stream.call_args_list
assert len(calls) == 3
# First call: s=entity, p=None, o=None
assert calls[0].kwargs["s"] == "ent-1"
assert calls[0].kwargs["p"] is None
assert calls[0].kwargs["o"] is None
assert calls[0].kwargs["limit"] == 15
assert calls[0].kwargs["user"] == "test-user"
assert calls[0].kwargs["collection"] == "test-collection"
assert calls[0].kwargs["batch_size"] == 20
# Second call: s=None, p=entity, o=None
assert calls[1].kwargs["s"] is None
assert calls[1].kwargs["p"] == "ent-1"
assert calls[1].kwargs["o"] is None
# Third call: s=None, p=None, o=entity
assert calls[2].kwargs["s"] is None
assert calls[2].kwargs["p"] is None
assert calls[2].kwargs["o"] == "ent-1"
class TestLRUCacheWithTTL:
def test_put_and_get(self):
cache = LRUCacheWithTTL(max_size=10, ttl=60)
cache.put("key1", "value1")
assert cache.get("key1") == "value1"
def test_get_missing_returns_none(self):
cache = LRUCacheWithTTL()
assert cache.get("nonexistent") is None
def test_max_size_eviction(self):
cache = LRUCacheWithTTL(max_size=2, ttl=60)
cache.put("a", 1)
cache.put("b", 2)
cache.put("c", 3) # Should evict "a"
assert cache.get("a") is None
assert cache.get("b") == 2
assert cache.get("c") == 3
def test_lru_order(self):
cache = LRUCacheWithTTL(max_size=2, ttl=60)
cache.put("a", 1)
cache.put("b", 2)
cache.get("a") # Access "a" — now "b" is LRU
cache.put("c", 3) # Should evict "b"
assert cache.get("a") == 1
assert cache.get("b") is None
assert cache.get("c") == 3
def test_ttl_expiration(self):
cache = LRUCacheWithTTL(max_size=10, ttl=0) # TTL=0 means instant expiry
cache.put("key", "value")
# With TTL=0, any time check > 0 means expired
import time
time.sleep(0.01)
assert cache.get("key") is None
def test_update_existing_key(self):
cache = LRUCacheWithTTL(max_size=10, ttl=60)
cache.put("key", "v1")
cache.put("key", "v2")
assert cache.get("key") == "v2"