mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-25 00:16:23 +02:00
Remove Pulsar-specific concepts from application code so that the pub/sub backend is swappable via configuration. Rename translators: - to_pulsar/from_pulsar → decode/encode across all translator classes, dispatch handlers, and tests (55+ files) - from_response_with_completion → encode_with_completion - Remove pulsar.schema.Record from translator base class Queue naming (CLASS:TOPICSPACE:TOPIC): - Replace topic() helper with queue() using new format: flow:tg:name, request:tg:name, response:tg:name, state:tg:name - Queue class implies persistence/TTL (no QoS in names) - Update Pulsar backend map_topic() to parse new format - Librarian queues use flow class (persistent, for chunking) - Config push uses state class (persistent, last-value) - Remove 15 dead topic imports from schema files - Update init_trustgraph.py namespace: config → state Confine Pulsar to pulsar_backend.py: - Delete legacy PulsarClient class from pubsub.py - Move add_args to add_pubsub_args() with standalone flag for CLI tools (defaults to localhost) - PulsarBackendConsumer.receive() catches _pulsar.Timeout, raises standard TimeoutError - Remove Pulsar imports from: async_processor, flow_processor, log_level, all 11 client files, 4 storage writers, gateway service, gateway config receiver - Remove log_level/LoggerLevel from client API - Rewrite tg-monitor-prompts to use backend abstraction - Update tg-dump-queues to use add_pubsub_args Also: pubsub-abstraction.md tech spec covering problem statement, design goals, as-is requirements, candidate broker assessment, approach, and implementation order.
62 lines
No EOL
2.1 KiB
Python
62 lines
No EOL
2.1 KiB
Python
import json
|
|
from typing import Dict, Any, Tuple
|
|
from ...schema import PromptRequest, PromptResponse
|
|
from .base import MessageTranslator
|
|
|
|
|
|
class PromptRequestTranslator(MessageTranslator):
|
|
"""Translator for PromptRequest schema objects"""
|
|
|
|
def decode(self, data: Dict[str, Any]) -> PromptRequest:
|
|
# Handle both "terms" and "variables" input keys
|
|
terms = data.get("terms", {})
|
|
if "variables" in data:
|
|
# Convert variables to JSON strings as expected by the service
|
|
terms = {
|
|
k: json.dumps(v)
|
|
for k, v in data["variables"].items()
|
|
}
|
|
|
|
return PromptRequest(
|
|
id=data.get("id"),
|
|
terms=terms,
|
|
streaming=data.get("streaming", False)
|
|
)
|
|
|
|
def encode(self, obj: PromptRequest) -> Dict[str, Any]:
|
|
result = {}
|
|
|
|
if obj.id:
|
|
result["id"] = obj.id
|
|
if obj.terms:
|
|
result["terms"] = obj.terms
|
|
|
|
return result
|
|
|
|
|
|
class PromptResponseTranslator(MessageTranslator):
|
|
"""Translator for PromptResponse schema objects"""
|
|
|
|
def decode(self, data: Dict[str, Any]) -> PromptResponse:
|
|
raise NotImplementedError("Response translation to Pulsar not typically needed")
|
|
|
|
def encode(self, obj: PromptResponse) -> Dict[str, Any]:
|
|
result = {}
|
|
|
|
# Include text field if present (even if empty string)
|
|
if obj.text is not None:
|
|
result["text"] = obj.text
|
|
# Include object field if present
|
|
if obj.object is not None:
|
|
result["object"] = obj.object
|
|
|
|
# Always include end_of_stream flag for streaming support
|
|
result["end_of_stream"] = getattr(obj, "end_of_stream", False)
|
|
|
|
return result
|
|
|
|
def encode_with_completion(self, obj: PromptResponse) -> Tuple[Dict[str, Any], bool]:
|
|
"""Returns (response_dict, is_final)"""
|
|
# Check end_of_stream field to determine if this is the final message
|
|
is_final = getattr(obj, 'end_of_stream', True)
|
|
return self.encode(obj), is_final |