Add agent explainability instrumentation and unify envelope field naming (#795)

Addresses recommendations from the UX developer's agent experience report.
Adds provenance predicates, DAG structure changes, error resilience, and
a published OWL ontology.

Explainability additions:

- Tool candidates: tg:toolCandidate on Analysis events lists the tools
  visible to the LLM for each iteration (names only, descriptions in config)
- Termination reason: tg:terminationReason on Conclusion/Synthesis events
  (final-answer, plan-complete, subagents-complete)
- Step counter: tg:stepNumber on iteration events
- Pattern decision: new tg:PatternDecision entity in the DAG between
  session and first iteration, carrying tg:pattern and tg:taskType
- Latency: tg:llmDurationMs on Analysis events, tg:toolDurationMs on
  Observation events
- Token counts on events: tg:inToken/tg:outToken/tg:llmModel on
  Grounding, Focus, Synthesis, and Analysis events
- Tool/parse errors: tg:toolError on Observation events with tg:Error
  mixin type. Parse failures return as error observations instead of
  crashing the agent, giving it a chance to retry.

Envelope unification:

- Rename chunk_type to message_type across AgentResponse schema,
  translator, SDK types, socket clients, CLI, and all tests.
  Agent and RAG services now both use message_type on the wire.

Ontology:

- specs/ontology/trustgraph.ttl — OWL vocabulary covering all 26 classes,
  7 object properties, and 36+ datatype properties including new predicates.

DAG structure tests:

- tests/unit/test_provenance/test_dag_structure.py verifies the
  wasDerivedFrom chain for GraphRAG, DocumentRAG, and all three agent
  patterns (react, plan, supervisor) including the pattern-decision link.
This commit is contained in:
cybermaggedon 2026-04-13 16:16:42 +01:00 committed by GitHub
parent 14e49d83c7
commit d2751553a3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
42 changed files with 1577 additions and 205 deletions

View file

@ -178,24 +178,23 @@ class AsyncSocketClient:
def _parse_chunk(self, resp: Dict[str, Any]):
"""Parse response chunk into appropriate type. Returns None for non-content messages."""
chunk_type = resp.get("chunk_type")
message_type = resp.get("message_type")
# Handle new GraphRAG message format with message_type
if message_type == "provenance":
return None
if chunk_type == "thought":
if message_type == "thought":
return AgentThought(
content=resp.get("content", ""),
end_of_message=resp.get("end_of_message", False)
)
elif chunk_type == "observation":
elif message_type == "observation":
return AgentObservation(
content=resp.get("content", ""),
end_of_message=resp.get("end_of_message", False)
)
elif chunk_type == "answer" or chunk_type == "final-answer":
elif message_type == "answer" or message_type == "final-answer":
return AgentAnswer(
content=resp.get("content", ""),
end_of_message=resp.get("end_of_message", False),
@ -204,7 +203,7 @@ class AsyncSocketClient:
out_token=resp.get("out_token"),
model=resp.get("model"),
)
elif chunk_type == "action":
elif message_type == "action":
return AgentThought(
content=resp.get("content", ""),
end_of_message=resp.get("end_of_message", False)

View file

@ -360,34 +360,26 @@ class SocketClient:
def _parse_chunk(self, resp: Dict[str, Any], include_provenance: bool = False) -> Optional[StreamingChunk]:
"""Parse response chunk into appropriate type. Returns None for non-content messages."""
chunk_type = resp.get("chunk_type")
message_type = resp.get("message_type")
# Handle GraphRAG/DocRAG message format with message_type
if message_type == "explain":
if include_provenance:
return self._build_provenance_event(resp)
return None
# Handle Agent message format with chunk_type="explain"
if chunk_type == "explain":
if include_provenance:
return self._build_provenance_event(resp)
return None
if chunk_type == "thought":
if message_type == "thought":
return AgentThought(
content=resp.get("content", ""),
end_of_message=resp.get("end_of_message", False),
message_id=resp.get("message_id", ""),
)
elif chunk_type == "observation":
elif message_type == "observation":
return AgentObservation(
content=resp.get("content", ""),
end_of_message=resp.get("end_of_message", False),
message_id=resp.get("message_id", ""),
)
elif chunk_type == "answer" or chunk_type == "final-answer":
elif message_type == "answer" or message_type == "final-answer":
return AgentAnswer(
content=resp.get("content", ""),
end_of_message=resp.get("end_of_message", False),
@ -397,7 +389,7 @@ class SocketClient:
out_token=resp.get("out_token"),
model=resp.get("model"),
)
elif chunk_type == "action":
elif message_type == "action":
return AgentThought(
content=resp.get("content", ""),
end_of_message=resp.get("end_of_message", False)

View file

@ -149,10 +149,10 @@ class AgentThought(StreamingChunk):
Attributes:
content: Agent's thought text
end_of_message: True if this completes the current thought
chunk_type: Always "thought"
message_type: Always "thought"
message_id: Provenance URI of the entity being built
"""
chunk_type: str = "thought"
message_type: str = "thought"
message_id: str = ""
@dataclasses.dataclass
@ -166,10 +166,10 @@ class AgentObservation(StreamingChunk):
Attributes:
content: Observation text describing tool results
end_of_message: True if this completes the current observation
chunk_type: Always "observation"
message_type: Always "observation"
message_id: Provenance URI of the entity being built
"""
chunk_type: str = "observation"
message_type: str = "observation"
message_id: str = ""
@dataclasses.dataclass
@ -184,9 +184,9 @@ class AgentAnswer(StreamingChunk):
content: Answer text
end_of_message: True if this completes the current answer segment
end_of_dialog: True if this completes the entire agent interaction
chunk_type: Always "final-answer"
message_type: Always "final-answer"
"""
chunk_type: str = "final-answer"
message_type: str = "final-answer"
end_of_dialog: bool = False
message_id: str = ""
in_token: Optional[int] = None
@ -208,9 +208,9 @@ class RAGChunk(StreamingChunk):
in_token: Input token count (populated on the final chunk, 0 otherwise)
out_token: Output token count (populated on the final chunk, 0 otherwise)
model: Model identifier (populated on the final chunk, empty otherwise)
chunk_type: Always "rag"
message_type: Always "rag"
"""
chunk_type: str = "rag"
message_type: str = "rag"
end_of_stream: bool = False
error: Optional[Dict[str, str]] = None
in_token: Optional[int] = None

View file

@ -30,19 +30,19 @@ class AgentClient(RequestResponse):
raise RuntimeError(resp.error.message)
# Handle thought chunks
if resp.chunk_type == 'thought':
if resp.message_type == 'thought':
if think:
await think(resp.content, resp.end_of_message)
return False # Continue receiving
# Handle observation chunks
if resp.chunk_type == 'observation':
if resp.message_type == 'observation':
if observe:
await observe(resp.content, resp.end_of_message)
return False # Continue receiving
# Handle answer chunks
if resp.chunk_type == 'answer':
if resp.message_type == 'answer':
if resp.content:
accumulated_answer.append(resp.content)
if answer_callback:

View file

@ -58,23 +58,23 @@ class AgentClient(BaseClient):
def inspect(x):
# Handle errors
if x.chunk_type == 'error' or x.error:
if x.message_type == 'error' or x.error:
if error_callback:
error_callback(x.content or (x.error.message if x.error else ""))
# Continue to check end_of_dialog
# Handle thought chunks
elif x.chunk_type == 'thought':
elif x.message_type == 'thought':
if think:
think(x.content, x.end_of_message)
# Handle observation chunks
elif x.chunk_type == 'observation':
elif x.message_type == 'observation':
if observe:
observe(x.content, x.end_of_message)
# Handle answer chunks
elif x.chunk_type == 'answer':
elif x.message_type == 'answer':
if x.content:
accumulated_answer.append(x.content)
if answer_callback:

View file

@ -60,8 +60,8 @@ class AgentResponseTranslator(MessageTranslator):
def encode(self, obj: AgentResponse) -> Dict[str, Any]:
result = {}
if obj.chunk_type:
result["chunk_type"] = obj.chunk_type
if obj.message_type:
result["message_type"] = obj.message_type
if obj.content:
result["content"] = obj.content
result["end_of_message"] = getattr(obj, "end_of_message", False)

View file

@ -59,6 +59,7 @@ from . uris import (
agent_plan_uri,
agent_step_result_uri,
agent_synthesis_uri,
agent_pattern_decision_uri,
# Document RAG provenance URIs
docrag_question_uri,
docrag_grounding_uri,
@ -102,6 +103,11 @@ from . namespaces import (
# Agent provenance predicates
TG_THOUGHT, TG_ACTION, TG_ARGUMENTS, TG_OBSERVATION,
TG_SUBAGENT_GOAL, TG_PLAN_STEP,
TG_TOOL_CANDIDATE, TG_TERMINATION_REASON,
TG_STEP_NUMBER, TG_PATTERN_DECISION, TG_PATTERN, TG_TASK_TYPE,
TG_LLM_DURATION_MS, TG_TOOL_DURATION_MS, TG_TOOL_ERROR,
TG_IN_TOKEN, TG_OUT_TOKEN,
TG_ERROR_TYPE,
# Orchestrator entity types
TG_DECOMPOSITION, TG_FINDING, TG_PLAN_TYPE, TG_STEP_RESULT,
# Document reference predicate
@ -141,6 +147,7 @@ from . agent import (
agent_plan_triples,
agent_step_result_triples,
agent_synthesis_triples,
agent_pattern_decision_triples,
)
# Vocabulary bootstrap
@ -182,6 +189,7 @@ __all__ = [
"agent_plan_uri",
"agent_step_result_uri",
"agent_synthesis_uri",
"agent_pattern_decision_uri",
# Document RAG provenance URIs
"docrag_question_uri",
"docrag_grounding_uri",
@ -218,6 +226,11 @@ __all__ = [
# Agent provenance predicates
"TG_THOUGHT", "TG_ACTION", "TG_ARGUMENTS", "TG_OBSERVATION",
"TG_SUBAGENT_GOAL", "TG_PLAN_STEP",
"TG_TOOL_CANDIDATE", "TG_TERMINATION_REASON",
"TG_STEP_NUMBER", "TG_PATTERN_DECISION", "TG_PATTERN", "TG_TASK_TYPE",
"TG_LLM_DURATION_MS", "TG_TOOL_DURATION_MS", "TG_TOOL_ERROR",
"TG_IN_TOKEN", "TG_OUT_TOKEN",
"TG_ERROR_TYPE",
# Orchestrator entity types
"TG_DECOMPOSITION", "TG_FINDING", "TG_PLAN_TYPE", "TG_STEP_RESULT",
# Document reference predicate
@ -249,6 +262,7 @@ __all__ = [
"agent_plan_triples",
"agent_step_result_triples",
"agent_synthesis_triples",
"agent_pattern_decision_triples",
# Utility
"set_graph",
# Vocabulary

View file

@ -29,6 +29,11 @@ from . namespaces import (
TG_AGENT_QUESTION,
TG_DECOMPOSITION, TG_FINDING, TG_PLAN_TYPE, TG_STEP_RESULT,
TG_SYNTHESIS, TG_SUBAGENT_GOAL, TG_PLAN_STEP,
TG_TOOL_CANDIDATE, TG_TERMINATION_REASON,
TG_STEP_NUMBER, TG_PATTERN_DECISION, TG_PATTERN, TG_TASK_TYPE,
TG_LLM_DURATION_MS, TG_TOOL_DURATION_MS, TG_TOOL_ERROR,
TG_ERROR_TYPE,
TG_IN_TOKEN, TG_OUT_TOKEN, TG_LLM_MODEL,
)
@ -47,6 +52,17 @@ def _triple(s: str, p: str, o_term: Term) -> Triple:
return Triple(s=_iri(s), p=_iri(p), o=o_term)
def _append_token_triples(triples, uri, in_token=None, out_token=None,
model=None):
"""Append in_token/out_token/model triples when values are present."""
if in_token is not None:
triples.append(_triple(uri, TG_IN_TOKEN, _literal(str(in_token))))
if out_token is not None:
triples.append(_triple(uri, TG_OUT_TOKEN, _literal(str(out_token))))
if model is not None:
triples.append(_triple(uri, TG_LLM_MODEL, _literal(model)))
def agent_session_triples(
session_uri: str,
query: str,
@ -90,6 +106,43 @@ def agent_session_triples(
return triples
def agent_pattern_decision_triples(
uri: str,
session_uri: str,
pattern: str,
task_type: str = "",
) -> List[Triple]:
"""
Build triples for a meta-router pattern decision.
Creates:
- Entity declaration with tg:PatternDecision type
- wasDerivedFrom link to session
- Pattern and task type predicates
Args:
uri: URI of this decision (from agent_pattern_decision_uri)
session_uri: URI of the parent session
pattern: Selected execution pattern (e.g. "react", "plan-then-execute")
task_type: Identified task type (e.g. "general", "research")
Returns:
List of Triple objects
"""
triples = [
_triple(uri, RDF_TYPE, _iri(PROV_ENTITY)),
_triple(uri, RDF_TYPE, _iri(TG_PATTERN_DECISION)),
_triple(uri, RDFS_LABEL, _literal(f"Pattern: {pattern}")),
_triple(uri, TG_PATTERN, _literal(pattern)),
_triple(uri, PROV_WAS_DERIVED_FROM, _iri(session_uri)),
]
if task_type:
triples.append(_triple(uri, TG_TASK_TYPE, _literal(task_type)))
return triples
def agent_iteration_triples(
iteration_uri: str,
question_uri: Optional[str] = None,
@ -98,6 +151,12 @@ def agent_iteration_triples(
arguments: Dict[str, Any] = None,
thought_uri: Optional[str] = None,
thought_document_id: Optional[str] = None,
tool_candidates: Optional[List[str]] = None,
step_number: Optional[int] = None,
llm_duration_ms: Optional[int] = None,
in_token: Optional[int] = None,
out_token: Optional[int] = None,
model: Optional[str] = None,
) -> List[Triple]:
"""
Build triples for one agent iteration (Analysis+ToolUse).
@ -106,6 +165,7 @@ def agent_iteration_triples(
- Entity declaration with tg:Analysis and tg:ToolUse types
- wasDerivedFrom link to question (if first iteration) or previous
- Action and arguments metadata
- Tool candidates (names of tools visible to the LLM)
- Thought sub-entity (tg:Reflection, tg:Thought) with librarian document
Args:
@ -116,6 +176,7 @@ def agent_iteration_triples(
arguments: Arguments passed to the tool (will be JSON-encoded)
thought_uri: URI for the thought sub-entity
thought_document_id: Document URI for thought in librarian
tool_candidates: List of tool names available to the LLM
Returns:
List of Triple objects
@ -132,6 +193,23 @@ def agent_iteration_triples(
_triple(iteration_uri, TG_ARGUMENTS, _literal(json.dumps(arguments))),
]
if tool_candidates:
for name in tool_candidates:
triples.append(
_triple(iteration_uri, TG_TOOL_CANDIDATE, _literal(name))
)
if step_number is not None:
triples.append(
_triple(iteration_uri, TG_STEP_NUMBER, _literal(str(step_number)))
)
if llm_duration_ms is not None:
triples.append(
_triple(iteration_uri, TG_LLM_DURATION_MS,
_literal(str(llm_duration_ms)))
)
if question_uri:
triples.append(
_triple(iteration_uri, PROV_WAS_DERIVED_FROM, _iri(question_uri))
@ -155,6 +233,8 @@ def agent_iteration_triples(
_triple(thought_uri, TG_DOCUMENT, _iri(thought_document_id))
)
_append_token_triples(triples, iteration_uri, in_token, out_token, model)
return triples
@ -162,6 +242,8 @@ def agent_observation_triples(
observation_uri: str,
iteration_uri: str,
document_id: Optional[str] = None,
tool_duration_ms: Optional[int] = None,
tool_error: Optional[str] = None,
) -> List[Triple]:
"""
Build triples for an agent observation (standalone entity).
@ -170,11 +252,15 @@ def agent_observation_triples(
- Entity declaration with prov:Entity and tg:Observation types
- wasDerivedFrom link to the iteration (Analysis+ToolUse)
- Document reference to librarian (if provided)
- Tool execution duration (if provided)
- Tool error message (if the tool failed)
Args:
observation_uri: URI of the observation entity
iteration_uri: URI of the iteration this observation derives from
document_id: Librarian document ID for the observation content
tool_duration_ms: Tool execution time in milliseconds
tool_error: Error message if the tool failed
Returns:
List of Triple objects
@ -191,6 +277,20 @@ def agent_observation_triples(
_triple(observation_uri, TG_DOCUMENT, _iri(document_id))
)
if tool_duration_ms is not None:
triples.append(
_triple(observation_uri, TG_TOOL_DURATION_MS,
_literal(str(tool_duration_ms)))
)
if tool_error:
triples.append(
_triple(observation_uri, TG_TOOL_ERROR, _literal(tool_error))
)
triples.append(
_triple(observation_uri, RDF_TYPE, _iri(TG_ERROR_TYPE))
)
return triples
@ -199,6 +299,10 @@ def agent_final_triples(
question_uri: Optional[str] = None,
previous_uri: Optional[str] = None,
document_id: Optional[str] = None,
termination_reason: Optional[str] = None,
in_token: Optional[int] = None,
out_token: Optional[int] = None,
model: Optional[str] = None,
) -> List[Triple]:
"""
Build triples for an agent final answer (Conclusion).
@ -208,12 +312,15 @@ def agent_final_triples(
- wasGeneratedBy link to question (if no iterations)
- wasDerivedFrom link to last iteration (if iterations exist)
- Document reference to librarian
- Termination reason (why the agent loop stopped)
Args:
final_uri: URI of the final answer (from agent_final_uri)
question_uri: URI of the question activity (if no iterations)
previous_uri: URI of the last iteration (if iterations exist)
document_id: Librarian document ID for the answer content
termination_reason: Why the loop stopped, e.g. "final-answer",
"max-iterations", "error"
Returns:
List of Triple objects
@ -237,6 +344,14 @@ def agent_final_triples(
if document_id:
triples.append(_triple(final_uri, TG_DOCUMENT, _iri(document_id)))
if termination_reason:
triples.append(
_triple(final_uri, TG_TERMINATION_REASON,
_literal(termination_reason))
)
_append_token_triples(triples, final_uri, in_token, out_token, model)
return triples
@ -244,6 +359,9 @@ def agent_decomposition_triples(
uri: str,
session_uri: str,
goals: List[str],
in_token: Optional[int] = None,
out_token: Optional[int] = None,
model: Optional[str] = None,
) -> List[Triple]:
"""Build triples for a supervisor decomposition step."""
triples = [
@ -255,6 +373,7 @@ def agent_decomposition_triples(
]
for goal in goals:
triples.append(_triple(uri, TG_SUBAGENT_GOAL, _literal(goal)))
_append_token_triples(triples, uri, in_token, out_token, model)
return triples
@ -282,6 +401,9 @@ def agent_plan_triples(
uri: str,
session_uri: str,
steps: List[str],
in_token: Optional[int] = None,
out_token: Optional[int] = None,
model: Optional[str] = None,
) -> List[Triple]:
"""Build triples for a plan-then-execute plan."""
triples = [
@ -293,6 +415,7 @@ def agent_plan_triples(
]
for step in steps:
triples.append(_triple(uri, TG_PLAN_STEP, _literal(step)))
_append_token_triples(triples, uri, in_token, out_token, model)
return triples
@ -301,6 +424,9 @@ def agent_step_result_triples(
plan_uri: str,
goal: str,
document_id: Optional[str] = None,
in_token: Optional[int] = None,
out_token: Optional[int] = None,
model: Optional[str] = None,
) -> List[Triple]:
"""Build triples for a plan step result."""
triples = [
@ -313,6 +439,7 @@ def agent_step_result_triples(
]
if document_id:
triples.append(_triple(uri, TG_DOCUMENT, _iri(document_id)))
_append_token_triples(triples, uri, in_token, out_token, model)
return triples
@ -320,6 +447,10 @@ def agent_synthesis_triples(
uri: str,
previous_uris,
document_id: Optional[str] = None,
termination_reason: Optional[str] = None,
in_token: Optional[int] = None,
out_token: Optional[int] = None,
model: Optional[str] = None,
) -> List[Triple]:
"""Build triples for a synthesis answer.
@ -327,6 +458,8 @@ def agent_synthesis_triples(
uri: URI of the synthesis entity
previous_uris: Single URI string or list of URIs to derive from
document_id: Librarian document ID for the answer content
termination_reason: Why the agent loop stopped
in_token/out_token/model: Token usage for the synthesis LLM call
"""
triples = [
_triple(uri, RDF_TYPE, _iri(PROV_ENTITY)),
@ -342,4 +475,12 @@ def agent_synthesis_triples(
if document_id:
triples.append(_triple(uri, TG_DOCUMENT, _iri(document_id)))
if termination_reason:
triples.append(
_triple(uri, TG_TERMINATION_REASON, _literal(termination_reason))
)
_append_token_triples(triples, uri, in_token, out_token, model)
return triples

View file

@ -119,6 +119,18 @@ TG_ARGUMENTS = TG + "arguments"
TG_OBSERVATION = TG + "observation" # Links iteration to observation sub-entity
TG_SUBAGENT_GOAL = TG + "subagentGoal" # Goal string on Decomposition/Finding
TG_PLAN_STEP = TG + "planStep" # Step goal string on Plan/StepResult
TG_TOOL_CANDIDATE = TG + "toolCandidate" # Tool name on Analysis events
TG_TERMINATION_REASON = TG + "terminationReason" # Why the agent loop stopped
TG_STEP_NUMBER = TG + "stepNumber" # Explicit step counter on iteration events
TG_PATTERN_DECISION = TG + "PatternDecision" # Meta-router routing decision entity type
TG_PATTERN = TG + "pattern" # Selected execution pattern
TG_TASK_TYPE = TG + "taskType" # Identified task type
TG_LLM_DURATION_MS = TG + "llmDurationMs" # LLM call duration in milliseconds
TG_TOOL_DURATION_MS = TG + "toolDurationMs" # Tool execution duration in milliseconds
TG_TOOL_ERROR = TG + "toolError" # Error message from a failed tool execution
TG_ERROR_TYPE = TG + "Error" # Mixin type for failure events
TG_IN_TOKEN = TG + "inToken" # Input token count for an LLM call
TG_OUT_TOKEN = TG + "outToken" # Output token count for an LLM call
# Named graph URIs for RDF datasets
# These separate different types of data while keeping them in the same collection

View file

@ -34,6 +34,8 @@ from . namespaces import (
TG_ANSWER_TYPE,
# Question subtypes
TG_GRAPH_RAG_QUESTION, TG_DOC_RAG_QUESTION,
# Token usage
TG_IN_TOKEN, TG_OUT_TOKEN,
)
from . uris import activity_uri, agent_uri, subgraph_uri, edge_selection_uri
@ -74,6 +76,17 @@ def _triple(s: str, p: str, o_term: Term) -> Triple:
return Triple(s=_iri(s), p=_iri(p), o=o_term)
def _append_token_triples(triples, uri, in_token=None, out_token=None,
model=None):
"""Append in_token/out_token/model triples when values are present."""
if in_token is not None:
triples.append(_triple(uri, TG_IN_TOKEN, _literal(str(in_token))))
if out_token is not None:
triples.append(_triple(uri, TG_OUT_TOKEN, _literal(str(out_token))))
if model is not None:
triples.append(_triple(uri, TG_LLM_MODEL, _literal(model)))
def document_triples(
doc_uri: str,
title: Optional[str] = None,
@ -396,6 +409,9 @@ def grounding_triples(
grounding_uri: str,
question_uri: str,
concepts: List[str],
in_token: Optional[int] = None,
out_token: Optional[int] = None,
model: Optional[str] = None,
) -> List[Triple]:
"""
Build triples for a grounding entity (concept decomposition of query).
@ -423,6 +439,8 @@ def grounding_triples(
for concept in concepts:
triples.append(_triple(grounding_uri, TG_CONCEPT, _literal(concept)))
_append_token_triples(triples, grounding_uri, in_token, out_token, model)
return triples
@ -485,6 +503,9 @@ def focus_triples(
exploration_uri: str,
selected_edges_with_reasoning: List[dict],
session_id: str = "",
in_token: Optional[int] = None,
out_token: Optional[int] = None,
model: Optional[str] = None,
) -> List[Triple]:
"""
Build triples for a focus entity (selected edges with reasoning).
@ -543,6 +564,8 @@ def focus_triples(
_triple(edge_sel_uri, TG_REASONING, _literal(reasoning))
)
_append_token_triples(triples, focus_uri, in_token, out_token, model)
return triples
@ -550,6 +573,9 @@ def synthesis_triples(
synthesis_uri: str,
focus_uri: str,
document_id: Optional[str] = None,
in_token: Optional[int] = None,
out_token: Optional[int] = None,
model: Optional[str] = None,
) -> List[Triple]:
"""
Build triples for a synthesis entity (final answer).
@ -578,6 +604,8 @@ def synthesis_triples(
if document_id:
triples.append(_triple(synthesis_uri, TG_DOCUMENT, _iri(document_id)))
_append_token_triples(triples, synthesis_uri, in_token, out_token, model)
return triples
@ -674,6 +702,9 @@ def docrag_synthesis_triples(
synthesis_uri: str,
exploration_uri: str,
document_id: Optional[str] = None,
in_token: Optional[int] = None,
out_token: Optional[int] = None,
model: Optional[str] = None,
) -> List[Triple]:
"""
Build triples for a document RAG synthesis entity (final answer).
@ -702,4 +733,6 @@ def docrag_synthesis_triples(
if document_id:
triples.append(_triple(synthesis_uri, TG_DOCUMENT, _iri(document_id)))
_append_token_triples(triples, synthesis_uri, in_token, out_token, model)
return triples

View file

@ -259,6 +259,11 @@ def agent_synthesis_uri(session_id: str) -> str:
return f"urn:trustgraph:agent:{session_id}/synthesis"
def agent_pattern_decision_uri(session_id: str) -> str:
"""Generate URI for a meta-router pattern decision."""
return f"urn:trustgraph:agent:{session_id}/pattern-decision"
# Document RAG provenance URIs
# These URIs use the urn:trustgraph:docrag: namespace to distinguish
# document RAG provenance from graph RAG provenance

View file

@ -51,8 +51,8 @@ class AgentRequest:
@dataclass
class AgentResponse:
# Streaming-first design
chunk_type: str = "" # "thought", "action", "observation", "answer", "explain", "error"
content: str = "" # The actual content (interpretation depends on chunk_type)
message_type: str = "" # "thought", "action", "observation", "answer", "explain", "error"
content: str = "" # The actual content (interpretation depends on message_type)
end_of_message: bool = False # Current chunk type (thought/action/etc.) is complete
end_of_dialog: bool = False # Entire agent dialog is complete