mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-25 00:16:23 +02:00
GraphRAG Query-Time Explainability (#677)
Implements full explainability pipeline for GraphRAG queries, enabling
traceability from answers back to source documents.
Renamed throughout for clarity:
- provenance_callback → explain_callback
- provenance_id → explain_id
- provenance_collection → explain_collection
- message_type "provenance" → "explain"
- Queue name "provenance" → "explainability"
GraphRAG queries now emit explainability events as they execute:
1. Session - query text and timestamp
2. Retrieval - edges retrieved from subgraph
3. Selection - selected edges with LLM reasoning (JSONL with id +
reasoning)
4. Answer - reference to synthesized response
Events stream via explain_callback during query(), enabling
real-time UX.
- Answers stored in librarian service (not inline in graph - too large)
- Document ID as URN: urn:trustgraph:answer:{session_id}
- Graph stores tg:document reference (IRI) to librarian document
- Added librarian producer/consumer to graph-rag service
- get_labelgraph() now returns (labeled_edges, uri_map)
- uri_map maps edge_id(label_s, label_p, label_o) →
(uri_s, uri_p, uri_o)
- Explainability data stores original URIs, not labels
- Enables tracing edges back to reifying statements via tg:reifies
- Added serialize_triple() to query service (matches storage format)
- get_term_value() now handles TRIPLE type terms
- Enables querying by quoted triple in object position:
?stmt tg:reifies <<s p o>>
- Displays real-time explainability events during query
- Resolves rdfs:label for edge components (s, p, o)
- Traces source chain via prov:wasDerivedFrom to root document
- Output: "Source: Chunk 1 → Page 2 → Document Title"
- Label caching to avoid repeated queries
GraphRagResponse:
- explain_id: str | None
- explain_collection: str | None
- message_type: str ("chunk" or "explain")
- end_of_session: bool
trustgraph-base/trustgraph/provenance/:
- namespaces.py - Added TG_DOCUMENT predicate
- triples.py - answer_triples() supports document_id reference
- uris.py - Added edge_selection_uri()
trustgraph-base/trustgraph/schema/services/retrieval.py:
- GraphRagResponse with explain_id, explain_collection, end_of_session
trustgraph-flow/trustgraph/retrieval/graph_rag/:
- graph_rag.py - URI preservation, streaming answer accumulation
- rag.py - Librarian integration, real-time explain emission
trustgraph-flow/trustgraph/query/triples/cassandra/service.py:
- Quoted triple serialization for query matching
trustgraph-cli/trustgraph/cli/invoke_graph_rag.py:
- Full explainability display with label resolution and source tracing
This commit is contained in:
parent
d2d71f859d
commit
7a6197d8c3
24 changed files with 2001 additions and 323 deletions
|
|
@ -110,15 +110,25 @@ class AsyncSocketClient:
|
|||
|
||||
# Parse different chunk types
|
||||
chunk = self._parse_chunk(resp)
|
||||
yield chunk
|
||||
if chunk is not None: # Skip provenance messages in streaming
|
||||
yield chunk
|
||||
|
||||
# Check if this is the final chunk
|
||||
if resp.get("end_of_stream") or resp.get("end_of_dialog") or response.get("complete"):
|
||||
# Check if this is the final message
|
||||
# end_of_session indicates entire session is complete (including provenance)
|
||||
# end_of_dialog is for agent dialogs
|
||||
# complete is from the gateway envelope
|
||||
if resp.get("end_of_session") or resp.get("end_of_dialog") or response.get("complete"):
|
||||
break
|
||||
|
||||
def _parse_chunk(self, resp: Dict[str, Any]):
|
||||
"""Parse response chunk into appropriate type"""
|
||||
"""Parse response chunk into appropriate type. Returns None for non-content messages."""
|
||||
chunk_type = resp.get("chunk_type")
|
||||
message_type = resp.get("message_type")
|
||||
|
||||
# Handle new GraphRAG message format with message_type
|
||||
if message_type == "provenance":
|
||||
# Provenance messages are not yielded to user - they're metadata
|
||||
return None
|
||||
|
||||
if chunk_type == "thought":
|
||||
return AgentThought(
|
||||
|
|
@ -143,7 +153,7 @@ class AsyncSocketClient:
|
|||
end_of_message=resp.get("end_of_message", False)
|
||||
)
|
||||
else:
|
||||
# RAG-style chunk (or generic chunk)
|
||||
# RAG-style chunk (or generic chunk with message_type="chunk")
|
||||
# Text-completion uses "response" field, RAG uses "chunk" field, Prompt uses "text" field
|
||||
content = resp.get("response", resp.get("chunk", resp.get("text", "")))
|
||||
return RAGChunk(
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import websockets
|
|||
from typing import Optional, Dict, Any, Iterator, Union, List
|
||||
from threading import Lock
|
||||
|
||||
from . types import AgentThought, AgentObservation, AgentAnswer, RAGChunk, StreamingChunk
|
||||
from . types import AgentThought, AgentObservation, AgentAnswer, RAGChunk, StreamingChunk, ProvenanceEvent
|
||||
from . exceptions import ProtocolException, raise_from_error_dict
|
||||
|
||||
|
||||
|
|
@ -310,15 +310,28 @@ class SocketClient:
|
|||
|
||||
# Parse different chunk types
|
||||
chunk = self._parse_chunk(resp)
|
||||
yield chunk
|
||||
if chunk is not None: # Skip provenance messages in streaming
|
||||
yield chunk
|
||||
|
||||
# Check if this is the final chunk
|
||||
if resp.get("end_of_stream") or resp.get("end_of_dialog") or response.get("complete"):
|
||||
# Check if this is the final message
|
||||
# end_of_session indicates entire session is complete (including provenance)
|
||||
# end_of_dialog is for agent dialogs
|
||||
# complete is from the gateway envelope
|
||||
if resp.get("end_of_session") or resp.get("end_of_dialog") or response.get("complete"):
|
||||
break
|
||||
|
||||
def _parse_chunk(self, resp: Dict[str, Any]) -> StreamingChunk:
|
||||
"""Parse response chunk into appropriate type"""
|
||||
def _parse_chunk(self, resp: Dict[str, Any], include_provenance: bool = False) -> Optional[StreamingChunk]:
|
||||
"""Parse response chunk into appropriate type. Returns None for non-content messages."""
|
||||
chunk_type = resp.get("chunk_type")
|
||||
message_type = resp.get("message_type")
|
||||
|
||||
# Handle new GraphRAG message format with message_type
|
||||
if message_type == "provenance":
|
||||
if include_provenance:
|
||||
# Return provenance event for explainability
|
||||
return ProvenanceEvent(provenance_id=resp.get("provenance_id", ""))
|
||||
# Provenance messages are not yielded to user - they're metadata
|
||||
return None
|
||||
|
||||
if chunk_type == "thought":
|
||||
return AgentThought(
|
||||
|
|
@ -360,7 +373,7 @@ class SocketClient:
|
|||
end_of_dialog=resp.get("end_of_dialog", False)
|
||||
)
|
||||
else:
|
||||
# RAG-style chunk (or generic chunk)
|
||||
# RAG-style chunk (or generic chunk with message_type="chunk")
|
||||
# Text-completion uses "response" field, RAG uses "chunk" field, Prompt uses "text" field
|
||||
content = resp.get("response", resp.get("chunk", resp.get("text", "")))
|
||||
return RAGChunk(
|
||||
|
|
|
|||
|
|
@ -202,3 +202,29 @@ class RAGChunk(StreamingChunk):
|
|||
chunk_type: str = "rag"
|
||||
end_of_stream: bool = False
|
||||
error: Optional[Dict[str, str]] = None
|
||||
|
||||
@dataclasses.dataclass
|
||||
class ProvenanceEvent:
|
||||
"""
|
||||
Provenance event for explainability.
|
||||
|
||||
Emitted during GraphRAG queries when explainable mode is enabled.
|
||||
Each event represents a provenance node created during query processing.
|
||||
|
||||
Attributes:
|
||||
provenance_id: URI of the provenance node (e.g., urn:trustgraph:session:abc123)
|
||||
event_type: Type of provenance event (session, retrieval, selection, answer)
|
||||
"""
|
||||
provenance_id: str
|
||||
event_type: str = "" # Derived from provenance_id (session, retrieval, selection, answer)
|
||||
|
||||
def __post_init__(self):
|
||||
# Extract event type from provenance_id
|
||||
if "session" in self.provenance_id:
|
||||
self.event_type = "session"
|
||||
elif "retrieval" in self.provenance_id:
|
||||
self.event_type = "retrieval"
|
||||
elif "selection" in self.provenance_id:
|
||||
self.event_type = "selection"
|
||||
elif "answer" in self.provenance_id:
|
||||
self.event_type = "answer"
|
||||
|
|
|
|||
|
|
@ -90,13 +90,31 @@ class GraphRagResponseTranslator(MessageTranslator):
|
|||
def from_pulsar(self, obj: GraphRagResponse) -> Dict[str, Any]:
|
||||
result = {}
|
||||
|
||||
# Include response content (even if empty string)
|
||||
# Include message_type
|
||||
message_type = getattr(obj, "message_type", "")
|
||||
if message_type:
|
||||
result["message_type"] = message_type
|
||||
|
||||
# Include response content for chunk messages
|
||||
if obj.response is not None:
|
||||
result["response"] = obj.response
|
||||
|
||||
# Include end_of_stream flag
|
||||
# Include explain_id for explain messages
|
||||
explain_id = getattr(obj, "explain_id", None)
|
||||
if explain_id:
|
||||
result["explain_id"] = explain_id
|
||||
|
||||
# Include explain_collection for explain messages
|
||||
explain_collection = getattr(obj, "explain_collection", None)
|
||||
if explain_collection:
|
||||
result["explain_collection"] = explain_collection
|
||||
|
||||
# Include end_of_stream flag (LLM stream complete)
|
||||
result["end_of_stream"] = getattr(obj, "end_of_stream", False)
|
||||
|
||||
# Include end_of_session flag (entire session complete)
|
||||
result["end_of_session"] = getattr(obj, "end_of_session", False)
|
||||
|
||||
# Always include error if present
|
||||
if hasattr(obj, 'error') and obj.error and obj.error.message:
|
||||
result["error"] = {"message": obj.error.message, "type": obj.error.type}
|
||||
|
|
@ -105,5 +123,6 @@ class GraphRagResponseTranslator(MessageTranslator):
|
|||
|
||||
def from_response_with_completion(self, obj: GraphRagResponse) -> Tuple[Dict[str, Any], bool]:
|
||||
"""Returns (response_dict, is_final)"""
|
||||
is_final = getattr(obj, 'end_of_stream', False)
|
||||
# Session is complete when end_of_session is True
|
||||
is_final = getattr(obj, 'end_of_session', False)
|
||||
return self.from_pulsar(obj), is_final
|
||||
|
|
@ -40,6 +40,11 @@ from . uris import (
|
|||
activity_uri,
|
||||
statement_uri,
|
||||
agent_uri,
|
||||
# Query-time provenance URIs
|
||||
query_session_uri,
|
||||
retrieval_uri,
|
||||
selection_uri,
|
||||
answer_uri,
|
||||
)
|
||||
|
||||
# Namespace constants
|
||||
|
|
@ -58,6 +63,8 @@ from . namespaces import (
|
|||
TG_CHUNK_SIZE, TG_CHUNK_OVERLAP, TG_COMPONENT_VERSION,
|
||||
TG_LLM_MODEL, TG_ONTOLOGY, TG_EMBEDDING_MODEL,
|
||||
TG_SOURCE_TEXT, TG_SOURCE_CHAR_OFFSET, TG_SOURCE_CHAR_LENGTH,
|
||||
# Query-time provenance predicates
|
||||
TG_QUERY, TG_EDGE_COUNT, TG_SELECTED_EDGE, TG_REASONING, TG_CONTENT,
|
||||
)
|
||||
|
||||
# Triple builders
|
||||
|
|
@ -65,6 +72,11 @@ from . triples import (
|
|||
document_triples,
|
||||
derived_entity_triples,
|
||||
triple_provenance_triples,
|
||||
# Query-time provenance triple builders
|
||||
query_session_triples,
|
||||
retrieval_triples,
|
||||
selection_triples,
|
||||
answer_triples,
|
||||
)
|
||||
|
||||
# Vocabulary bootstrap
|
||||
|
|
@ -86,6 +98,11 @@ __all__ = [
|
|||
"activity_uri",
|
||||
"statement_uri",
|
||||
"agent_uri",
|
||||
# Query-time provenance URIs
|
||||
"query_session_uri",
|
||||
"retrieval_uri",
|
||||
"selection_uri",
|
||||
"answer_uri",
|
||||
# Namespaces
|
||||
"PROV", "PROV_ENTITY", "PROV_ACTIVITY", "PROV_AGENT",
|
||||
"PROV_WAS_DERIVED_FROM", "PROV_WAS_GENERATED_BY",
|
||||
|
|
@ -97,10 +114,17 @@ __all__ = [
|
|||
"TG_CHUNK_SIZE", "TG_CHUNK_OVERLAP", "TG_COMPONENT_VERSION",
|
||||
"TG_LLM_MODEL", "TG_ONTOLOGY", "TG_EMBEDDING_MODEL",
|
||||
"TG_SOURCE_TEXT", "TG_SOURCE_CHAR_OFFSET", "TG_SOURCE_CHAR_LENGTH",
|
||||
# Query-time provenance predicates
|
||||
"TG_QUERY", "TG_EDGE_COUNT", "TG_SELECTED_EDGE", "TG_REASONING", "TG_CONTENT",
|
||||
# Triple builders
|
||||
"document_triples",
|
||||
"derived_entity_triples",
|
||||
"triple_provenance_triples",
|
||||
# Query-time provenance triple builders
|
||||
"query_session_triples",
|
||||
"retrieval_triples",
|
||||
"selection_triples",
|
||||
"answer_triples",
|
||||
# Vocabulary
|
||||
"get_vocabulary_triples",
|
||||
"PROV_CLASS_LABELS",
|
||||
|
|
|
|||
|
|
@ -58,3 +58,12 @@ TG_EMBEDDING_MODEL = TG + "embeddingModel"
|
|||
TG_SOURCE_TEXT = TG + "sourceText"
|
||||
TG_SOURCE_CHAR_OFFSET = TG + "sourceCharOffset"
|
||||
TG_SOURCE_CHAR_LENGTH = TG + "sourceCharLength"
|
||||
|
||||
# Query-time provenance predicates
|
||||
TG_QUERY = TG + "query"
|
||||
TG_EDGE_COUNT = TG + "edgeCount"
|
||||
TG_SELECTED_EDGE = TG + "selectedEdge"
|
||||
TG_EDGE = TG + "edge"
|
||||
TG_REASONING = TG + "reasoning"
|
||||
TG_CONTENT = TG + "content"
|
||||
TG_DOCUMENT = TG + "document" # Reference to document in librarian
|
||||
|
|
|
|||
|
|
@ -17,9 +17,12 @@ from . namespaces import (
|
|||
TG_CHUNK_INDEX, TG_CHAR_OFFSET, TG_CHAR_LENGTH,
|
||||
TG_CHUNK_SIZE, TG_CHUNK_OVERLAP, TG_COMPONENT_VERSION,
|
||||
TG_LLM_MODEL, TG_ONTOLOGY, TG_REIFIES,
|
||||
# Query-time provenance predicates
|
||||
TG_QUERY, TG_EDGE_COUNT, TG_SELECTED_EDGE, TG_EDGE, TG_REASONING, TG_CONTENT,
|
||||
TG_DOCUMENT,
|
||||
)
|
||||
|
||||
from . uris import activity_uri, agent_uri
|
||||
from . uris import activity_uri, agent_uri, edge_selection_uri
|
||||
|
||||
|
||||
def _iri(uri: str) -> Term:
|
||||
|
|
@ -252,3 +255,177 @@ def triple_provenance_triples(
|
|||
triples.append(_triple(act_uri, TG_ONTOLOGY, _iri(ontology_uri)))
|
||||
|
||||
return triples
|
||||
|
||||
|
||||
# Query-time provenance triple builders
|
||||
|
||||
def query_session_triples(
|
||||
session_uri: str,
|
||||
query: str,
|
||||
timestamp: Optional[str] = None,
|
||||
) -> List[Triple]:
|
||||
"""
|
||||
Build triples for a query session activity.
|
||||
|
||||
Creates:
|
||||
- Activity declaration for the query session
|
||||
- Query text and timestamp
|
||||
|
||||
Args:
|
||||
session_uri: URI of the session (from query_session_uri)
|
||||
query: The user's query text
|
||||
timestamp: ISO timestamp (defaults to now)
|
||||
|
||||
Returns:
|
||||
List of Triple objects
|
||||
"""
|
||||
if timestamp is None:
|
||||
timestamp = datetime.utcnow().isoformat() + "Z"
|
||||
|
||||
return [
|
||||
_triple(session_uri, RDF_TYPE, _iri(PROV_ACTIVITY)),
|
||||
_triple(session_uri, RDFS_LABEL, _literal("GraphRAG query session")),
|
||||
_triple(session_uri, PROV_STARTED_AT_TIME, _literal(timestamp)),
|
||||
_triple(session_uri, TG_QUERY, _literal(query)),
|
||||
]
|
||||
|
||||
|
||||
def retrieval_triples(
|
||||
retrieval_uri: str,
|
||||
session_uri: str,
|
||||
edge_count: int,
|
||||
) -> List[Triple]:
|
||||
"""
|
||||
Build triples for a retrieval entity (all edges retrieved from subgraph).
|
||||
|
||||
Creates:
|
||||
- Entity declaration for retrieval
|
||||
- wasGeneratedBy link to session
|
||||
- Edge count metadata
|
||||
|
||||
Args:
|
||||
retrieval_uri: URI of the retrieval entity (from retrieval_uri)
|
||||
session_uri: URI of the parent session
|
||||
edge_count: Number of edges retrieved
|
||||
|
||||
Returns:
|
||||
List of Triple objects
|
||||
"""
|
||||
return [
|
||||
_triple(retrieval_uri, RDF_TYPE, _iri(PROV_ENTITY)),
|
||||
_triple(retrieval_uri, RDFS_LABEL, _literal("Retrieved edges")),
|
||||
_triple(retrieval_uri, PROV_WAS_GENERATED_BY, _iri(session_uri)),
|
||||
_triple(retrieval_uri, TG_EDGE_COUNT, _literal(edge_count)),
|
||||
]
|
||||
|
||||
|
||||
def _quoted_triple(s: str, p: str, o: str) -> Term:
|
||||
"""Create a quoted triple term (RDF-star) from string values."""
|
||||
return Term(
|
||||
type=TRIPLE,
|
||||
triple=Triple(s=_iri(s), p=_iri(p), o=_iri(o))
|
||||
)
|
||||
|
||||
|
||||
def selection_triples(
|
||||
selection_uri: str,
|
||||
retrieval_uri: str,
|
||||
selected_edges_with_reasoning: List[dict],
|
||||
session_id: str = "",
|
||||
) -> List[Triple]:
|
||||
"""
|
||||
Build triples for a selection entity (selected edges with reasoning).
|
||||
|
||||
Creates:
|
||||
- Entity declaration for selection
|
||||
- wasDerivedFrom link to retrieval
|
||||
- For each selected edge: an edge selection entity with quoted triple and reasoning
|
||||
|
||||
Structure:
|
||||
<selection> tg:selectedEdge <edge_sel_1> .
|
||||
<edge_sel_1> tg:edge << <s> <p> <o> >> .
|
||||
<edge_sel_1> tg:reasoning "reason" .
|
||||
|
||||
Args:
|
||||
selection_uri: URI of the selection entity (from selection_uri)
|
||||
retrieval_uri: URI of the parent retrieval entity
|
||||
selected_edges_with_reasoning: List of dicts with 'edge' (s,p,o tuple) and 'reasoning'
|
||||
session_id: Session UUID for generating edge selection URIs
|
||||
|
||||
Returns:
|
||||
List of Triple objects
|
||||
"""
|
||||
triples = [
|
||||
_triple(selection_uri, RDF_TYPE, _iri(PROV_ENTITY)),
|
||||
_triple(selection_uri, RDFS_LABEL, _literal("Selected edges")),
|
||||
_triple(selection_uri, PROV_WAS_DERIVED_FROM, _iri(retrieval_uri)),
|
||||
]
|
||||
|
||||
# Add each selected edge with its reasoning via intermediate entity
|
||||
for idx, edge_info in enumerate(selected_edges_with_reasoning):
|
||||
edge = edge_info.get("edge")
|
||||
reasoning = edge_info.get("reasoning", "")
|
||||
|
||||
if edge:
|
||||
s, p, o = edge
|
||||
|
||||
# Create intermediate entity for this edge selection
|
||||
edge_sel_uri = edge_selection_uri(session_id, idx)
|
||||
|
||||
# Link selection to edge selection entity
|
||||
triples.append(
|
||||
_triple(selection_uri, TG_SELECTED_EDGE, _iri(edge_sel_uri))
|
||||
)
|
||||
|
||||
# Attach quoted triple to edge selection entity
|
||||
quoted = _quoted_triple(s, p, o)
|
||||
triples.append(
|
||||
Triple(s=_iri(edge_sel_uri), p=_iri(TG_EDGE), o=quoted)
|
||||
)
|
||||
|
||||
# Attach reasoning to edge selection entity
|
||||
if reasoning:
|
||||
triples.append(
|
||||
_triple(edge_sel_uri, TG_REASONING, _literal(reasoning))
|
||||
)
|
||||
|
||||
return triples
|
||||
|
||||
|
||||
def answer_triples(
|
||||
answer_uri: str,
|
||||
selection_uri: str,
|
||||
answer_text: str = "",
|
||||
document_id: Optional[str] = None,
|
||||
) -> List[Triple]:
|
||||
"""
|
||||
Build triples for an answer entity (final synthesis text).
|
||||
|
||||
Creates:
|
||||
- Entity declaration for answer
|
||||
- wasDerivedFrom link to selection
|
||||
- Either document reference (if document_id provided) or inline content
|
||||
|
||||
Args:
|
||||
answer_uri: URI of the answer entity (from answer_uri)
|
||||
selection_uri: URI of the parent selection entity
|
||||
answer_text: The synthesized answer text (used if no document_id)
|
||||
document_id: Optional librarian document ID (preferred over inline content)
|
||||
|
||||
Returns:
|
||||
List of Triple objects
|
||||
"""
|
||||
triples = [
|
||||
_triple(answer_uri, RDF_TYPE, _iri(PROV_ENTITY)),
|
||||
_triple(answer_uri, RDFS_LABEL, _literal("GraphRAG answer")),
|
||||
_triple(answer_uri, PROV_WAS_DERIVED_FROM, _iri(selection_uri)),
|
||||
]
|
||||
|
||||
if document_id:
|
||||
# Store reference to document in librarian (as IRI)
|
||||
triples.append(_triple(answer_uri, TG_DOCUMENT, _iri(document_id)))
|
||||
elif answer_text:
|
||||
# Fallback: store inline content
|
||||
triples.append(_triple(answer_uri, TG_CONTENT, _literal(answer_text)))
|
||||
|
||||
return triples
|
||||
|
|
|
|||
|
|
@ -60,3 +60,75 @@ def statement_uri(stmt_id: str = None) -> str:
|
|||
def agent_uri(component_name: str) -> str:
|
||||
"""Generate URI for a TrustGraph component agent."""
|
||||
return f"{TRUSTGRAPH_BASE}/agent/{_encode_id(component_name)}"
|
||||
|
||||
|
||||
# Query-time provenance URIs
|
||||
# These URIs use the urn:trustgraph: namespace to distinguish query-time
|
||||
# provenance from extraction-time provenance (which uses https://trustgraph.ai/)
|
||||
|
||||
def query_session_uri(session_id: str = None) -> str:
|
||||
"""
|
||||
Generate URI for a query session activity.
|
||||
|
||||
Args:
|
||||
session_id: Optional UUID string. Auto-generates if not provided.
|
||||
|
||||
Returns:
|
||||
URN in format: urn:trustgraph:session:{uuid}
|
||||
"""
|
||||
if session_id is None:
|
||||
session_id = str(uuid.uuid4())
|
||||
return f"urn:trustgraph:session:{session_id}"
|
||||
|
||||
|
||||
def retrieval_uri(session_id: str) -> str:
|
||||
"""
|
||||
Generate URI for a retrieval entity (edges retrieved from subgraph).
|
||||
|
||||
Args:
|
||||
session_id: The session UUID (same as query_session_uri).
|
||||
|
||||
Returns:
|
||||
URN in format: urn:trustgraph:prov:retrieval:{uuid}
|
||||
"""
|
||||
return f"urn:trustgraph:prov:retrieval:{session_id}"
|
||||
|
||||
|
||||
def selection_uri(session_id: str) -> str:
|
||||
"""
|
||||
Generate URI for a selection entity (selected edges with reasoning).
|
||||
|
||||
Args:
|
||||
session_id: The session UUID (same as query_session_uri).
|
||||
|
||||
Returns:
|
||||
URN in format: urn:trustgraph:prov:selection:{uuid}
|
||||
"""
|
||||
return f"urn:trustgraph:prov:selection:{session_id}"
|
||||
|
||||
|
||||
def answer_uri(session_id: str) -> str:
|
||||
"""
|
||||
Generate URI for an answer entity (final synthesis text).
|
||||
|
||||
Args:
|
||||
session_id: The session UUID (same as query_session_uri).
|
||||
|
||||
Returns:
|
||||
URN in format: urn:trustgraph:prov:answer:{uuid}
|
||||
"""
|
||||
return f"urn:trustgraph:prov:answer:{session_id}"
|
||||
|
||||
|
||||
def edge_selection_uri(session_id: str, edge_index: int) -> str:
|
||||
"""
|
||||
Generate URI for an edge selection item (links edge to reasoning).
|
||||
|
||||
Args:
|
||||
session_id: The session UUID.
|
||||
edge_index: Index of this edge in the selection (0-based).
|
||||
|
||||
Returns:
|
||||
URN in format: urn:trustgraph:prov:edge:{uuid}:{index}
|
||||
"""
|
||||
return f"urn:trustgraph:prov:edge:{session_id}:{edge_index}"
|
||||
|
|
|
|||
|
|
@ -21,7 +21,11 @@ class GraphRagQuery:
|
|||
class GraphRagResponse:
|
||||
error: Error | None = None
|
||||
response: str = ""
|
||||
end_of_stream: bool = False
|
||||
end_of_stream: bool = False # LLM response stream complete
|
||||
explain_id: str | None = None # Single explain URI (announced as created)
|
||||
explain_collection: str | None = None # Collection where explain was stored
|
||||
message_type: str = "" # "chunk" or "explain"
|
||||
end_of_session: bool = False # Entire session complete
|
||||
|
||||
############################################################################
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue