Remove redundant metadata (#685)

The metadata field (list of triples) in the pipeline Metadata class
was redundant. Document metadata triples already flow directly from
librarian to triple-store via emit_document_provenance() - they don't
need to pass through the extraction pipeline.

Additionally, chunker and PDF decoder were overwriting metadata to []
anyway, so any metadata passed through the pipeline was being
discarded.

Changes:
- Remove metadata field from Metadata dataclass
  (schema/core/metadata.py)
- Update all Metadata instantiations to remove metadata=[]
  parameter
- Remove metadata handling from translators (document_loading,
  knowledge)
- Remove metadata consumption from extractors (ontology, agent)
- Update gateway serializers and import handlers
- Update all unit, integration, and contract tests
This commit is contained in:
cybermaggedon 2026-03-11 10:51:39 +00:00 committed by GitHub
parent 1837d73f34
commit aa4f5c6c00
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
37 changed files with 106 additions and 343 deletions

View file

@ -29,11 +29,10 @@ class Triple:
self.o = o
class Metadata:
def __init__(self, id, user, collection, metadata):
def __init__(self, id, user, collection):
self.id = id
self.user = user
self.collection = collection
self.metadata = metadata
class Triples:
def __init__(self, metadata, triples):
@ -110,7 +109,6 @@ def sample_triples(sample_triple):
id="test-doc-123",
user="test_user",
collection="test_collection",
metadata=[]
)
return Triples(
@ -126,7 +124,6 @@ def sample_chunk():
id="test-chunk-456",
user="test_user",
collection="test_collection",
metadata=[]
)
return Chunk(

View file

@ -51,13 +51,6 @@ class TestAgentKgExtractor:
"""Sample metadata for testing"""
return Metadata(
id="doc123",
metadata=[
Triple(
s=Term(type=IRI, iri="doc123"),
p=Term(type=IRI, iri="http://example.org/type"),
o=Term(type=LITERAL, value="document")
)
]
)
@pytest.fixture
@ -274,7 +267,7 @@ This is not JSON at all
def test_process_extraction_data_no_metadata_id(self, agent_extractor):
"""Test processing when metadata has no ID"""
metadata = Metadata(id=None, metadata=[])
metadata = Metadata(id=None)
data = [
{"type": "definition", "entity": "Test Entity", "definition": "Test definition"}
]
@ -345,8 +338,6 @@ This is not JSON at all
assert sent_triples.metadata.id == sample_metadata.id
assert sent_triples.metadata.user == sample_metadata.user
assert sent_triples.metadata.collection == sample_metadata.collection
# Note: metadata.metadata is now empty array in the new implementation
assert sent_triples.metadata.metadata == []
assert len(sent_triples.triples) == 1
assert sent_triples.triples[0].s.iri == "test:subject"
@ -371,8 +362,6 @@ This is not JSON at all
assert sent_contexts.metadata.id == sample_metadata.id
assert sent_contexts.metadata.user == sample_metadata.user
assert sent_contexts.metadata.collection == sample_metadata.collection
# Note: metadata.metadata is now empty array in the new implementation
assert sent_contexts.metadata.metadata == []
assert len(sent_contexts.entities) == 1
assert sent_contexts.entities[0].entity.iri == "test:entity"

View file

@ -177,13 +177,13 @@ class TestAgentKgExtractionEdgeCases:
pass
# Test with metadata without ID
metadata = Metadata(id=None, metadata=[])
metadata = Metadata(id=None)
triples, contexts = agent_extractor.process_extraction_data([], metadata)
assert len(triples) == 0
assert len(contexts) == 0
# Test with metadata with empty string ID
metadata = Metadata(id="", metadata=[])
metadata = Metadata(id="")
data = [{"type": "definition", "entity": "Test", "definition": "Test def"}]
triples, contexts = agent_extractor.process_extraction_data(data, metadata)
@ -193,7 +193,7 @@ class TestAgentKgExtractionEdgeCases:
def test_process_extraction_data_special_entity_names(self, agent_extractor):
"""Test processing with special characters in entity names"""
metadata = Metadata(id="doc123", metadata=[])
metadata = Metadata(id="doc123")
special_entities = [
"Entity with spaces",
@ -225,7 +225,7 @@ class TestAgentKgExtractionEdgeCases:
def test_process_extraction_data_very_long_definitions(self, agent_extractor):
"""Test processing with very long entity definitions"""
metadata = Metadata(id="doc123", metadata=[])
metadata = Metadata(id="doc123")
# Create very long definition
long_definition = "This is a very long definition. " * 1000
@ -247,7 +247,7 @@ class TestAgentKgExtractionEdgeCases:
def test_process_extraction_data_duplicate_entities(self, agent_extractor):
"""Test processing with duplicate entity names"""
metadata = Metadata(id="doc123", metadata=[])
metadata = Metadata(id="doc123")
data = [
{"type": "definition", "entity": "Machine Learning", "definition": "First definition"},
@ -269,7 +269,7 @@ class TestAgentKgExtractionEdgeCases:
def test_process_extraction_data_empty_strings(self, agent_extractor):
"""Test processing with empty strings in data"""
metadata = Metadata(id="doc123", metadata=[])
metadata = Metadata(id="doc123")
data = [
{"type": "definition", "entity": "", "definition": "Definition for empty entity"},
@ -291,7 +291,7 @@ class TestAgentKgExtractionEdgeCases:
def test_process_extraction_data_nested_json_in_strings(self, agent_extractor):
"""Test processing when definitions contain JSON-like strings"""
metadata = Metadata(id="doc123", metadata=[])
metadata = Metadata(id="doc123")
data = [
{
@ -315,7 +315,7 @@ class TestAgentKgExtractionEdgeCases:
def test_process_extraction_data_boolean_object_entity_variations(self, agent_extractor):
"""Test processing with various boolean values for object-entity"""
metadata = Metadata(id="doc123", metadata=[])
metadata = Metadata(id="doc123")
data = [
# Explicit True
@ -343,7 +343,7 @@ class TestAgentKgExtractionEdgeCases:
@pytest.mark.asyncio
async def test_emit_empty_collections(self, agent_extractor):
"""Test emitting empty triples and entity contexts"""
metadata = Metadata(id="test", metadata=[])
metadata = Metadata(id="test")
# Test emitting empty triples
mock_publisher = AsyncMock()
@ -389,7 +389,7 @@ class TestAgentKgExtractionEdgeCases:
def test_process_extraction_data_performance_large_dataset(self, agent_extractor):
"""Test performance with large extraction datasets"""
metadata = Metadata(id="large-doc", metadata=[])
metadata = Metadata(id="large-doc")
# Create large dataset in JSONL format
num_definitions = 1000

View file

@ -314,7 +314,6 @@ class TestObjectExtractionBusinessLogic:
id="test-extraction-001",
user="test_user",
collection="test_collection",
metadata=[]
)
values = [{

View file

@ -373,7 +373,6 @@ class TestTripleConstructionLogic:
id="test-doc-123",
user="test_user",
collection="test_collection",
metadata=[]
)
# Act