mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-25 00:16:23 +02:00
Adds a RabbitMQ backend as an alternative to Pulsar, selectable via PUBSUB_BACKEND=rabbitmq. Both backends implement the same PubSubBackend protocol — no application code changes needed to switch. RabbitMQ topology: - Single topic exchange per topicspace (e.g. 'tg') - Routing key derived from queue class and topic name - Shared consumers: named queue bound to exchange (competing, round-robin) - Exclusive consumers: anonymous auto-delete queue (broadcast, each gets every message). Used by Subscriber and config push consumer. - Thread-local producer connections (pika is not thread-safe) - Push-based consumption via basic_consume with process_data_events for heartbeat processing Consumer model changes: - Consumer class creates one backend consumer per concurrent task (required for pika thread safety, harmless for Pulsar) - Consumer class accepts consumer_type parameter - Subscriber passes consumer_type='exclusive' for broadcast semantics - Config push consumer uses consumer_type='exclusive' so every processor instance receives config updates - handle_one_from_queue receives consumer as parameter for correct per-connection ack/nack LibrarianClient: - New shared client class replacing duplicated librarian request-response code across 6+ services (chunking, decoders, RAG, etc.) - Uses stream-document instead of get-document-content for fetching document content in 1MB chunks (avoids broker message size limits) - Standalone object (self.librarian = LibrarianClient(...)) not a mixin - get-document-content marked deprecated in schema and OpenAPI spec Serialisation: - Extracted dataclass_to_dict/dict_to_dataclass to shared serialization.py (used by both Pulsar and RabbitMQ backends) Librarian queues: - Changed from flow class (persistent) back to request/response class now that stream-document eliminates large single messages - API upload chunk size reduced from 5MB to 3MB to stay under broker limits after base64 encoding Factory and CLI: - get_pubsub() handles 'rabbitmq' backend with RabbitMQ connection params - add_pubsub_args() includes RabbitMQ options (host, port, credentials) - add_pubsub_args(standalone=True) defaults to localhost for CLI tools - init_trustgraph skips Pulsar admin setup for non-Pulsar backends - tg-dump-queues and tg-monitor-prompts use backend abstraction - BaseClient and ConfigClient accept generic pubsub config
208 lines
6.2 KiB
Python
Executable file
208 lines
6.2 KiB
Python
Executable file
|
|
"""
|
|
Simple decoder, accepts PDF documents on input, outputs pages from the
|
|
PDF document as text as separate output objects.
|
|
|
|
Supports both inline document data and fetching from librarian via Pulsar
|
|
for large documents.
|
|
"""
|
|
|
|
import base64
|
|
import logging
|
|
import pytesseract
|
|
from pdf2image import convert_from_bytes
|
|
|
|
from ... schema import Document, TextDocument, Metadata
|
|
from ... schema import librarian_request_queue, librarian_response_queue
|
|
from ... schema import Triples
|
|
from ... base import FlowProcessor, ConsumerSpec, ProducerSpec, LibrarianClient
|
|
|
|
from ... provenance import (
|
|
document_uri, page_uri as make_page_uri, derived_entity_triples,
|
|
set_graph, GRAPH_SOURCE,
|
|
)
|
|
|
|
# Component identification for provenance
|
|
COMPONENT_NAME = "tesseract-ocr-decoder"
|
|
COMPONENT_VERSION = "1.0.0"
|
|
|
|
# Module logger
|
|
logger = logging.getLogger(__name__)
|
|
|
|
default_ident = "document-decoder"
|
|
|
|
default_librarian_request_queue = librarian_request_queue
|
|
default_librarian_response_queue = librarian_response_queue
|
|
|
|
class Processor(FlowProcessor):
|
|
|
|
def __init__(self, **params):
|
|
|
|
id = params.get("id", default_ident)
|
|
|
|
super(Processor, self).__init__(
|
|
**params | {
|
|
"id": id,
|
|
}
|
|
)
|
|
|
|
self.register_specification(
|
|
ConsumerSpec(
|
|
name = "input",
|
|
schema = Document,
|
|
handler = self.on_message,
|
|
)
|
|
)
|
|
|
|
self.register_specification(
|
|
ProducerSpec(
|
|
name = "output",
|
|
schema = TextDocument,
|
|
)
|
|
)
|
|
|
|
self.register_specification(
|
|
ProducerSpec(
|
|
name = "triples",
|
|
schema = Triples,
|
|
)
|
|
)
|
|
|
|
# Librarian client
|
|
self.librarian = LibrarianClient(
|
|
id=id, backend=self.pubsub, taskgroup=self.taskgroup,
|
|
)
|
|
|
|
logger.info("PDF OCR processor initialized")
|
|
|
|
async def start(self):
|
|
await super(Processor, self).start()
|
|
await self.librarian.start()
|
|
|
|
async def on_message(self, msg, consumer, flow):
|
|
|
|
logger.info("PDF message received")
|
|
|
|
v = msg.value()
|
|
|
|
logger.info(f"Decoding {v.metadata.id}...")
|
|
|
|
# Check MIME type if fetching from librarian
|
|
if v.document_id:
|
|
doc_meta = await self.librarian.fetch_document_metadata(
|
|
document_id=v.document_id,
|
|
user=v.metadata.user,
|
|
)
|
|
if doc_meta and doc_meta.kind and doc_meta.kind != "application/pdf":
|
|
logger.error(
|
|
f"Unsupported MIME type: {doc_meta.kind}. "
|
|
f"Tesseract OCR decoder only handles application/pdf. "
|
|
f"Ignoring document {v.metadata.id}."
|
|
)
|
|
return
|
|
|
|
# Get PDF content - fetch from librarian or use inline data
|
|
if v.document_id:
|
|
logger.info(f"Fetching document {v.document_id} from librarian...")
|
|
content = await self.librarian.fetch_document_content(
|
|
document_id=v.document_id,
|
|
user=v.metadata.user,
|
|
)
|
|
if isinstance(content, str):
|
|
content = content.encode('utf-8')
|
|
blob = base64.b64decode(content)
|
|
logger.info(f"Fetched {len(blob)} bytes from librarian")
|
|
else:
|
|
blob = base64.b64decode(v.data)
|
|
|
|
# Get the source document ID
|
|
source_doc_id = v.document_id or v.metadata.id
|
|
|
|
pages = convert_from_bytes(blob)
|
|
|
|
for ix, page in enumerate(pages):
|
|
|
|
page_num = ix + 1 # 1-indexed
|
|
|
|
try:
|
|
text = pytesseract.image_to_string(page, lang='eng')
|
|
except Exception as e:
|
|
logger.warning(f"Page {page_num} did not OCR: {e}")
|
|
continue
|
|
|
|
logger.debug(f"Processing page {page_num}")
|
|
|
|
# Generate unique page ID
|
|
pg_uri = make_page_uri()
|
|
page_doc_id = pg_uri
|
|
page_content = text.encode("utf-8")
|
|
|
|
# Save page as child document in librarian
|
|
await self.librarian.save_child_document(
|
|
doc_id=page_doc_id,
|
|
parent_id=source_doc_id,
|
|
user=v.metadata.user,
|
|
content=page_content,
|
|
document_type="page",
|
|
title=f"Page {page_num}",
|
|
)
|
|
|
|
# Emit provenance triples
|
|
doc_uri = document_uri(source_doc_id)
|
|
|
|
prov_triples = derived_entity_triples(
|
|
entity_uri=pg_uri,
|
|
parent_uri=doc_uri,
|
|
component_name=COMPONENT_NAME,
|
|
component_version=COMPONENT_VERSION,
|
|
label=f"Page {page_num}",
|
|
page_number=page_num,
|
|
)
|
|
|
|
await flow("triples").send(Triples(
|
|
metadata=Metadata(
|
|
id=pg_uri,
|
|
root=v.metadata.root,
|
|
user=v.metadata.user,
|
|
collection=v.metadata.collection,
|
|
),
|
|
triples=set_graph(prov_triples, GRAPH_SOURCE),
|
|
))
|
|
|
|
# Forward page document ID to chunker
|
|
# Chunker will fetch content from librarian
|
|
r = TextDocument(
|
|
metadata=Metadata(
|
|
id=pg_uri,
|
|
root=v.metadata.root,
|
|
user=v.metadata.user,
|
|
collection=v.metadata.collection,
|
|
),
|
|
document_id=page_doc_id,
|
|
text=b"", # Empty, chunker will fetch from librarian
|
|
)
|
|
|
|
await flow("output").send(r)
|
|
|
|
logger.info("PDF decoding complete")
|
|
|
|
@staticmethod
|
|
def add_args(parser):
|
|
|
|
FlowProcessor.add_args(parser)
|
|
|
|
parser.add_argument(
|
|
'--librarian-request-queue',
|
|
default=default_librarian_request_queue,
|
|
help=f'Librarian request queue (default: {default_librarian_request_queue})',
|
|
)
|
|
|
|
parser.add_argument(
|
|
'--librarian-response-queue',
|
|
default=default_librarian_response_queue,
|
|
help=f'Librarian response queue (default: {default_librarian_response_queue})',
|
|
)
|
|
|
|
def run():
|
|
|
|
Processor.launch(default_ident, __doc__)
|