agent-orchestrator: add explainability provenance for all patterns (#744)

agent-orchestrator: add explainability provenance for all agent
patterns

Extend the provenance/explainability system to provide
human-readable reasoning traces for the orchestrator's three
agent patterns. Previously only ReAct emitted provenance
(session, iteration, conclusion). Now each pattern records its
cognitive steps as typed RDF entities in the knowledge graph,
using composable mixin types (e.g. Finding + Answer).

New provenance chains:
- Supervisor: Question → Decomposition → Finding ×N → Synthesis
- Plan-then-Execute: Question → Plan → StepResult ×N → Synthesis
- ReAct: Question → Analysis ×N → Conclusion (unchanged)

New RDF types: Decomposition, Finding, Plan, StepResult.
New predicates: tg:subagentGoal, tg:planStep.
Reuses existing Synthesis + Answer mixin for final answers.

Provenance library (trustgraph-base):
- Triple builders, URI generators, vocabulary labels for new types
- Client dataclasses with from_triples() dispatch
- fetch_agent_trace() follows branching provenance chains
- API exports updated

Orchestrator (trustgraph-flow):
- PatternBase emit methods for decomposition, finding, plan, step result, and synthesis
- SupervisorPattern emits decomposition during fan-out
- PlanThenExecutePattern emits plan and step results
- Service emits finding triples on subagent completion
- Synthesis provenance replaces generic final triples

CLI (trustgraph-cli):
- invoke_agent -x displays new entity types inline
This commit is contained in:
cybermaggedon 2026-03-31 12:54:51 +01:00 committed by GitHub
parent e65ea217a2
commit 7b734148b3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 560 additions and 82 deletions

View file

@ -82,6 +82,10 @@ from .explainability import (
Reflection, Reflection,
Analysis, Analysis,
Conclusion, Conclusion,
Decomposition,
Finding,
Plan,
StepResult,
EdgeSelection, EdgeSelection,
wire_triples_to_tuples, wire_triples_to_tuples,
extract_term_value, extract_term_value,

View file

@ -44,6 +44,16 @@ TG_GRAPH_RAG_QUESTION = TG + "GraphRagQuestion"
TG_DOC_RAG_QUESTION = TG + "DocRagQuestion" TG_DOC_RAG_QUESTION = TG + "DocRagQuestion"
TG_AGENT_QUESTION = TG + "AgentQuestion" TG_AGENT_QUESTION = TG + "AgentQuestion"
# Orchestrator entity types
TG_DECOMPOSITION = TG + "Decomposition"
TG_FINDING = TG + "Finding"
TG_PLAN_TYPE = TG + "Plan"
TG_STEP_RESULT = TG + "StepResult"
# Orchestrator predicates
TG_SUBAGENT_GOAL = TG + "subagentGoal"
TG_PLAN_STEP = TG + "planStep"
# PROV-O predicates # PROV-O predicates
PROV = "http://www.w3.org/ns/prov#" PROV = "http://www.w3.org/ns/prov#"
PROV_STARTED_AT_TIME = PROV + "startedAtTime" PROV_STARTED_AT_TIME = PROV + "startedAtTime"
@ -82,6 +92,14 @@ class ExplainEntity:
return Exploration.from_triples(uri, triples) return Exploration.from_triples(uri, triples)
elif TG_FOCUS in types: elif TG_FOCUS in types:
return Focus.from_triples(uri, triples) return Focus.from_triples(uri, triples)
elif TG_DECOMPOSITION in types:
return Decomposition.from_triples(uri, triples)
elif TG_FINDING in types:
return Finding.from_triples(uri, triples)
elif TG_PLAN_TYPE in types:
return Plan.from_triples(uri, triples)
elif TG_STEP_RESULT in types:
return StepResult.from_triples(uri, triples)
elif TG_SYNTHESIS in types: elif TG_SYNTHESIS in types:
return Synthesis.from_triples(uri, triples) return Synthesis.from_triples(uri, triples)
elif TG_REFLECTION_TYPE in types: elif TG_REFLECTION_TYPE in types:
@ -314,6 +332,70 @@ class Conclusion(ExplainEntity):
) )
@dataclass
class Decomposition(ExplainEntity):
"""Decomposition entity - supervisor broke question into sub-goals."""
goals: List[str] = field(default_factory=list)
@classmethod
def from_triples(cls, uri: str, triples: List[Tuple[str, str, Any]]) -> "Decomposition":
goals = []
for s, p, o in triples:
if p == TG_SUBAGENT_GOAL:
goals.append(o)
return cls(uri=uri, entity_type="decomposition", goals=goals)
@dataclass
class Finding(ExplainEntity):
"""Finding entity - a subagent's result."""
goal: str = ""
document: str = ""
@classmethod
def from_triples(cls, uri: str, triples: List[Tuple[str, str, Any]]) -> "Finding":
goal = ""
document = ""
for s, p, o in triples:
if p == TG_SUBAGENT_GOAL:
goal = o
elif p == TG_DOCUMENT:
document = o
return cls(uri=uri, entity_type="finding", goal=goal, document=document)
@dataclass
class Plan(ExplainEntity):
"""Plan entity - a structured plan of steps."""
steps: List[str] = field(default_factory=list)
@classmethod
def from_triples(cls, uri: str, triples: List[Tuple[str, str, Any]]) -> "Plan":
steps = []
for s, p, o in triples:
if p == TG_PLAN_STEP:
steps.append(o)
return cls(uri=uri, entity_type="plan", steps=steps)
@dataclass
class StepResult(ExplainEntity):
"""StepResult entity - a plan step's result."""
step: str = ""
document: str = ""
@classmethod
def from_triples(cls, uri: str, triples: List[Tuple[str, str, Any]]) -> "StepResult":
step = ""
document = ""
for s, p, o in triples:
if p == TG_PLAN_STEP:
step = o
elif p == TG_DOCUMENT:
document = o
return cls(uri=uri, entity_type="step-result", step=step, document=document)
def parse_edge_selection_triples(triples: List[Tuple[str, str, Any]]) -> EdgeSelection: def parse_edge_selection_triples(triples: List[Tuple[str, str, Any]]) -> EdgeSelection:
"""Parse triples for an edge selection entity.""" """Parse triples for an edge selection entity."""
uri = triples[0][0] if triples else "" uri = triples[0][0] if triples else ""
@ -895,7 +977,10 @@ class ExplainabilityClient:
""" """
Fetch the complete Agent trace starting from a session URI. Fetch the complete Agent trace starting from a session URI.
Follows the provenance chain: Question -> Analysis(s) -> Conclusion Follows the provenance chain for all patterns:
- ReAct: Question -> Analysis(s) -> Conclusion
- Supervisor: Question -> Decomposition -> Finding(s) -> Synthesis
- Plan-then-Execute: Question -> Plan -> StepResult(s) -> Synthesis
Args: Args:
session_uri: The agent session/question URI session_uri: The agent session/question URI
@ -906,14 +991,15 @@ class ExplainabilityClient:
max_content: Maximum content length for conclusion max_content: Maximum content length for conclusion
Returns: Returns:
Dict with question, iterations (Analysis list), conclusion entities Dict with question, steps (mixed entity list), conclusion/synthesis
""" """
if graph is None: if graph is None:
graph = "urn:graph:retrieval" graph = "urn:graph:retrieval"
trace = { trace = {
"question": None, "question": None,
"iterations": [], "steps": [],
"iterations": [], # Backwards compatibility for ReAct
"conclusion": None, "conclusion": None,
} }
@ -923,64 +1009,79 @@ class ExplainabilityClient:
return trace return trace
trace["question"] = question trace["question"] = question
# Follow the chain: wasGeneratedBy for first hop, wasDerivedFrom after # Follow the provenance chain from the question
current_uri = session_uri self._follow_provenance_chain(
is_first = True session_uri, trace, graph, user, collection,
max_iterations = 50 # Safety limit is_first=True, max_depth=50,
)
for _ in range(max_iterations): # Backwards compat: populate iterations from steps
# First hop uses wasGeneratedBy (entity←activity), trace["iterations"] = [
# subsequent hops use wasDerivedFrom (entity←entity) s for s in trace["steps"] if isinstance(s, Analysis)
if is_first: ]
derived_triples = self.flow.triples_query(
p=PROV_WAS_GENERATED_BY, return trace
o=current_uri,
g=graph, def _follow_provenance_chain(
user=user, self, current_uri, trace, graph, user, collection,
collection=collection, is_first=False, max_depth=50,
limit=10 ):
) """Recursively follow the provenance chain, handling branches."""
# Fall back to wasDerivedFrom for backwards compatibility if max_depth <= 0:
if not derived_triples: return
derived_triples = self.flow.triples_query(
p=PROV_WAS_DERIVED_FROM, # Find entities derived from current_uri
o=current_uri, if is_first:
g=graph, derived_triples = self.flow.triples_query(
user=user, p=PROV_WAS_GENERATED_BY,
collection=collection, o=current_uri,
limit=10 g=graph, user=user, collection=collection,
) limit=20
is_first = False )
else: if not derived_triples:
derived_triples = self.flow.triples_query( derived_triples = self.flow.triples_query(
p=PROV_WAS_DERIVED_FROM, p=PROV_WAS_DERIVED_FROM,
o=current_uri, o=current_uri,
g=graph, g=graph, user=user, collection=collection,
user=user, limit=20
collection=collection,
limit=10
) )
else:
derived_triples = self.flow.triples_query(
p=PROV_WAS_DERIVED_FROM,
o=current_uri,
g=graph, user=user, collection=collection,
limit=20
)
if not derived_triples: if not derived_triples:
break return
derived_uri = extract_term_value(derived_triples[0].get("s", {})) derived_uris = [
extract_term_value(t.get("s", {}))
for t in derived_triples
]
for derived_uri in derived_uris:
if not derived_uri: if not derived_uri:
break continue
entity = self.fetch_entity(derived_uri, graph, user, collection) entity = self.fetch_entity(derived_uri, graph, user, collection)
if entity is None:
continue
if isinstance(entity, Analysis): if isinstance(entity, (Analysis, Decomposition, Finding,
trace["iterations"].append(entity) Plan, StepResult)):
current_uri = derived_uri trace["steps"].append(entity)
elif isinstance(entity, Conclusion):
# Continue following from this entity
self._follow_provenance_chain(
derived_uri, trace, graph, user, collection,
max_depth=max_depth - 1,
)
elif isinstance(entity, (Conclusion, Synthesis)):
trace["steps"].append(entity)
trace["conclusion"] = entity trace["conclusion"] = entity
break
else:
# Unknown entity type, stop
break
return trace
def list_sessions( def list_sessions(
self, self,
@ -1082,7 +1183,7 @@ class ExplainabilityClient:
for child_uri in all_child_uris: for child_uri in all_child_uris:
entity = self.fetch_entity(child_uri, graph, user, collection) entity = self.fetch_entity(child_uri, graph, user, collection)
if isinstance(entity, Analysis): if isinstance(entity, (Analysis, Decomposition, Plan)):
return "agent" return "agent"
if isinstance(entity, Exploration): if isinstance(entity, Exploration):
return "graphrag" return "graphrag"

View file

@ -53,6 +53,12 @@ from . uris import (
agent_thought_uri, agent_thought_uri,
agent_observation_uri, agent_observation_uri,
agent_final_uri, agent_final_uri,
# Orchestrator provenance URIs
agent_decomposition_uri,
agent_finding_uri,
agent_plan_uri,
agent_step_result_uri,
agent_synthesis_uri,
# Document RAG provenance URIs # Document RAG provenance URIs
docrag_question_uri, docrag_question_uri,
docrag_grounding_uri, docrag_grounding_uri,
@ -94,6 +100,9 @@ from . namespaces import (
TG_GRAPH_RAG_QUESTION, TG_DOC_RAG_QUESTION, TG_AGENT_QUESTION, TG_GRAPH_RAG_QUESTION, TG_DOC_RAG_QUESTION, TG_AGENT_QUESTION,
# Agent provenance predicates # Agent provenance predicates
TG_THOUGHT, TG_ACTION, TG_ARGUMENTS, TG_OBSERVATION, TG_THOUGHT, TG_ACTION, TG_ARGUMENTS, TG_OBSERVATION,
TG_SUBAGENT_GOAL, TG_PLAN_STEP,
# Orchestrator entity types
TG_DECOMPOSITION, TG_FINDING, TG_PLAN_TYPE, TG_STEP_RESULT,
# Document reference predicate # Document reference predicate
TG_DOCUMENT, TG_DOCUMENT,
# Named graphs # Named graphs
@ -124,6 +133,12 @@ from . agent import (
agent_session_triples, agent_session_triples,
agent_iteration_triples, agent_iteration_triples,
agent_final_triples, agent_final_triples,
# Orchestrator provenance triple builders
agent_decomposition_triples,
agent_finding_triples,
agent_plan_triples,
agent_step_result_triples,
agent_synthesis_triples,
) )
# Vocabulary bootstrap # Vocabulary bootstrap
@ -159,6 +174,12 @@ __all__ = [
"agent_thought_uri", "agent_thought_uri",
"agent_observation_uri", "agent_observation_uri",
"agent_final_uri", "agent_final_uri",
# Orchestrator provenance URIs
"agent_decomposition_uri",
"agent_finding_uri",
"agent_plan_uri",
"agent_step_result_uri",
"agent_synthesis_uri",
# Document RAG provenance URIs # Document RAG provenance URIs
"docrag_question_uri", "docrag_question_uri",
"docrag_grounding_uri", "docrag_grounding_uri",
@ -193,6 +214,9 @@ __all__ = [
"TG_GRAPH_RAG_QUESTION", "TG_DOC_RAG_QUESTION", "TG_AGENT_QUESTION", "TG_GRAPH_RAG_QUESTION", "TG_DOC_RAG_QUESTION", "TG_AGENT_QUESTION",
# Agent provenance predicates # Agent provenance predicates
"TG_THOUGHT", "TG_ACTION", "TG_ARGUMENTS", "TG_OBSERVATION", "TG_THOUGHT", "TG_ACTION", "TG_ARGUMENTS", "TG_OBSERVATION",
"TG_SUBAGENT_GOAL", "TG_PLAN_STEP",
# Orchestrator entity types
"TG_DECOMPOSITION", "TG_FINDING", "TG_PLAN_TYPE", "TG_STEP_RESULT",
# Document reference predicate # Document reference predicate
"TG_DOCUMENT", "TG_DOCUMENT",
# Named graphs # Named graphs
@ -215,6 +239,12 @@ __all__ = [
"agent_session_triples", "agent_session_triples",
"agent_iteration_triples", "agent_iteration_triples",
"agent_final_triples", "agent_final_triples",
# Orchestrator provenance triple builders
"agent_decomposition_triples",
"agent_finding_triples",
"agent_plan_triples",
"agent_step_result_triples",
"agent_synthesis_triples",
# Utility # Utility
"set_graph", "set_graph",
# Vocabulary # Vocabulary

View file

@ -1,10 +1,15 @@
""" """
Helper functions to build PROV-O triples for agent provenance. Helper functions to build PROV-O triples for agent provenance.
Agent provenance tracks the reasoning trace of ReAct agent sessions: Agent provenance tracks the reasoning trace of agent sessions:
- Question: The root activity with query and timestamp - Question: The root activity with query and timestamp
- Analysis: Each think/act/observe cycle - Analysis: Each think/act/observe cycle (ReAct)
- Conclusion: The final answer - Conclusion: The final answer (ReAct)
- Decomposition: Supervisor broke question into sub-goals
- Finding: A subagent's result (Supervisor)
- Plan: Structured plan of steps (Plan-then-Execute)
- StepResult: A plan step's result (Plan-then-Execute)
- Synthesis: Final synthesised answer (Supervisor, Plan-then-Execute)
""" """
import json import json
@ -21,6 +26,8 @@ from . namespaces import (
TG_QUESTION, TG_ANALYSIS, TG_CONCLUSION, TG_DOCUMENT, TG_QUESTION, TG_ANALYSIS, TG_CONCLUSION, TG_DOCUMENT,
TG_ANSWER_TYPE, TG_REFLECTION_TYPE, TG_THOUGHT_TYPE, TG_OBSERVATION_TYPE, TG_ANSWER_TYPE, TG_REFLECTION_TYPE, TG_THOUGHT_TYPE, TG_OBSERVATION_TYPE,
TG_AGENT_QUESTION, TG_AGENT_QUESTION,
TG_DECOMPOSITION, TG_FINDING, TG_PLAN_TYPE, TG_STEP_RESULT,
TG_SYNTHESIS, TG_SUBAGENT_GOAL, TG_PLAN_STEP,
) )
@ -203,3 +210,97 @@ def agent_final_triples(
triples.append(_triple(final_uri, TG_DOCUMENT, _iri(document_id))) triples.append(_triple(final_uri, TG_DOCUMENT, _iri(document_id)))
return triples return triples
def agent_decomposition_triples(
uri: str,
session_uri: str,
goals: List[str],
) -> List[Triple]:
"""Build triples for a supervisor decomposition step."""
triples = [
_triple(uri, RDF_TYPE, _iri(PROV_ENTITY)),
_triple(uri, RDF_TYPE, _iri(TG_DECOMPOSITION)),
_triple(uri, RDFS_LABEL,
_literal(f"Decomposed into {len(goals)} research threads")),
_triple(uri, PROV_WAS_GENERATED_BY, _iri(session_uri)),
]
for goal in goals:
triples.append(_triple(uri, TG_SUBAGENT_GOAL, _literal(goal)))
return triples
def agent_finding_triples(
uri: str,
decomposition_uri: str,
goal: str,
document_id: Optional[str] = None,
) -> List[Triple]:
"""Build triples for a subagent finding."""
triples = [
_triple(uri, RDF_TYPE, _iri(PROV_ENTITY)),
_triple(uri, RDF_TYPE, _iri(TG_FINDING)),
_triple(uri, RDF_TYPE, _iri(TG_ANSWER_TYPE)),
_triple(uri, RDFS_LABEL, _literal(f"Finding: {goal[:60]}")),
_triple(uri, PROV_WAS_DERIVED_FROM, _iri(decomposition_uri)),
_triple(uri, TG_SUBAGENT_GOAL, _literal(goal)),
]
if document_id:
triples.append(_triple(uri, TG_DOCUMENT, _iri(document_id)))
return triples
def agent_plan_triples(
uri: str,
session_uri: str,
steps: List[str],
) -> List[Triple]:
"""Build triples for a plan-then-execute plan."""
triples = [
_triple(uri, RDF_TYPE, _iri(PROV_ENTITY)),
_triple(uri, RDF_TYPE, _iri(TG_PLAN_TYPE)),
_triple(uri, RDFS_LABEL,
_literal(f"Plan with {len(steps)} steps")),
_triple(uri, PROV_WAS_GENERATED_BY, _iri(session_uri)),
]
for step in steps:
triples.append(_triple(uri, TG_PLAN_STEP, _literal(step)))
return triples
def agent_step_result_triples(
uri: str,
plan_uri: str,
goal: str,
document_id: Optional[str] = None,
) -> List[Triple]:
"""Build triples for a plan step result."""
triples = [
_triple(uri, RDF_TYPE, _iri(PROV_ENTITY)),
_triple(uri, RDF_TYPE, _iri(TG_STEP_RESULT)),
_triple(uri, RDF_TYPE, _iri(TG_ANSWER_TYPE)),
_triple(uri, RDFS_LABEL, _literal(f"Step result: {goal[:60]}")),
_triple(uri, PROV_WAS_DERIVED_FROM, _iri(plan_uri)),
_triple(uri, TG_PLAN_STEP, _literal(goal)),
]
if document_id:
triples.append(_triple(uri, TG_DOCUMENT, _iri(document_id)))
return triples
def agent_synthesis_triples(
uri: str,
previous_uri: str,
document_id: Optional[str] = None,
) -> List[Triple]:
"""Build triples for a synthesis answer."""
triples = [
_triple(uri, RDF_TYPE, _iri(PROV_ENTITY)),
_triple(uri, RDF_TYPE, _iri(TG_SYNTHESIS)),
_triple(uri, RDF_TYPE, _iri(TG_ANSWER_TYPE)),
_triple(uri, RDFS_LABEL, _literal("Synthesis")),
_triple(uri, PROV_WAS_DERIVED_FROM, _iri(previous_uri)),
]
if document_id:
triples.append(_triple(uri, TG_DOCUMENT, _iri(document_id)))
return triples

View file

@ -94,8 +94,14 @@ TG_SYNTHESIS = TG + "Synthesis"
TG_ANALYSIS = TG + "Analysis" TG_ANALYSIS = TG + "Analysis"
TG_CONCLUSION = TG + "Conclusion" TG_CONCLUSION = TG + "Conclusion"
# Orchestrator entity types
TG_DECOMPOSITION = TG + "Decomposition" # Supervisor decomposed into sub-goals
TG_FINDING = TG + "Finding" # Subagent result
TG_PLAN_TYPE = TG + "Plan" # Plan-then-execute plan
TG_STEP_RESULT = TG + "StepResult" # Plan step result
# Unifying types for answer and intermediate commentary # Unifying types for answer and intermediate commentary
TG_ANSWER_TYPE = TG + "Answer" # Final answer (Synthesis, Conclusion) TG_ANSWER_TYPE = TG + "Answer" # Final answer (Synthesis, Conclusion, Finding, StepResult)
TG_REFLECTION_TYPE = TG + "Reflection" # Intermediate commentary (Thought, Observation) TG_REFLECTION_TYPE = TG + "Reflection" # Intermediate commentary (Thought, Observation)
TG_THOUGHT_TYPE = TG + "Thought" # Agent reasoning TG_THOUGHT_TYPE = TG + "Thought" # Agent reasoning
TG_OBSERVATION_TYPE = TG + "Observation" # Agent tool result TG_OBSERVATION_TYPE = TG + "Observation" # Agent tool result
@ -110,6 +116,8 @@ TG_THOUGHT = TG + "thought" # Links iteration to thought sub-entity
TG_ACTION = TG + "action" TG_ACTION = TG + "action"
TG_ARGUMENTS = TG + "arguments" TG_ARGUMENTS = TG + "arguments"
TG_OBSERVATION = TG + "observation" # Links iteration to observation sub-entity TG_OBSERVATION = TG + "observation" # Links iteration to observation sub-entity
TG_SUBAGENT_GOAL = TG + "subagentGoal" # Goal string on Decomposition/Finding
TG_PLAN_STEP = TG + "planStep" # Step goal string on Plan/StepResult
# Named graph URIs for RDF datasets # Named graph URIs for RDF datasets
# These separate different types of data while keeping them in the same collection # These separate different types of data while keeping them in the same collection

View file

@ -234,6 +234,31 @@ def agent_final_uri(session_id: str) -> str:
return f"urn:trustgraph:agent:{session_id}/final" return f"urn:trustgraph:agent:{session_id}/final"
def agent_decomposition_uri(session_id: str) -> str:
"""Generate URI for a supervisor decomposition step."""
return f"urn:trustgraph:agent:{session_id}/decompose"
def agent_finding_uri(session_id: str, index: int) -> str:
"""Generate URI for a subagent finding."""
return f"urn:trustgraph:agent:{session_id}/finding/{index}"
def agent_plan_uri(session_id: str) -> str:
"""Generate URI for a plan-then-execute plan."""
return f"urn:trustgraph:agent:{session_id}/plan"
def agent_step_result_uri(session_id: str, index: int) -> str:
"""Generate URI for a plan step result."""
return f"urn:trustgraph:agent:{session_id}/step/{index}"
def agent_synthesis_uri(session_id: str) -> str:
"""Generate URI for a synthesis answer."""
return f"urn:trustgraph:agent:{session_id}/synthesis"
# Document RAG provenance URIs # Document RAG provenance URIs
# These URIs use the urn:trustgraph:docrag: namespace to distinguish # These URIs use the urn:trustgraph:docrag: namespace to distinguish
# document RAG provenance from graph RAG provenance # document RAG provenance from graph RAG provenance

View file

@ -27,6 +27,8 @@ from . namespaces import (
TG_DOCUMENT_TYPE, TG_PAGE_TYPE, TG_CHUNK_TYPE, TG_SUBGRAPH_TYPE, TG_DOCUMENT_TYPE, TG_PAGE_TYPE, TG_CHUNK_TYPE, TG_SUBGRAPH_TYPE,
TG_CONCEPT, TG_ENTITY, TG_GROUNDING, TG_CONCEPT, TG_ENTITY, TG_GROUNDING,
TG_ANSWER_TYPE, TG_REFLECTION_TYPE, TG_THOUGHT_TYPE, TG_OBSERVATION_TYPE, TG_ANSWER_TYPE, TG_REFLECTION_TYPE, TG_THOUGHT_TYPE, TG_OBSERVATION_TYPE,
TG_DECOMPOSITION, TG_FINDING, TG_PLAN_TYPE, TG_STEP_RESULT,
TG_SUBAGENT_GOAL, TG_PLAN_STEP,
) )
@ -87,6 +89,10 @@ TG_CLASS_LABELS = [
_label_triple(TG_REFLECTION_TYPE, "Reflection"), _label_triple(TG_REFLECTION_TYPE, "Reflection"),
_label_triple(TG_THOUGHT_TYPE, "Thought"), _label_triple(TG_THOUGHT_TYPE, "Thought"),
_label_triple(TG_OBSERVATION_TYPE, "Observation"), _label_triple(TG_OBSERVATION_TYPE, "Observation"),
_label_triple(TG_DECOMPOSITION, "Decomposition"),
_label_triple(TG_FINDING, "Finding"),
_label_triple(TG_PLAN_TYPE, "Plan"),
_label_triple(TG_STEP_RESULT, "Step Result"),
] ]
# TrustGraph predicate labels # TrustGraph predicate labels
@ -109,6 +115,8 @@ TG_PREDICATE_LABELS = [
_label_triple(TG_SOURCE_CHAR_LENGTH, "source character length"), _label_triple(TG_SOURCE_CHAR_LENGTH, "source character length"),
_label_triple(TG_CONCEPT, "concept"), _label_triple(TG_CONCEPT, "concept"),
_label_triple(TG_ENTITY, "entity"), _label_triple(TG_ENTITY, "entity"),
_label_triple(TG_SUBAGENT_GOAL, "subagent goal"),
_label_triple(TG_PLAN_STEP, "plan step"),
] ]

View file

@ -13,6 +13,11 @@ from trustgraph.api import (
Question, Question,
Analysis, Analysis,
Conclusion, Conclusion,
Decomposition,
Finding,
Plan,
StepResult,
Synthesis,
AgentThought, AgentThought,
AgentObservation, AgentObservation,
AgentAnswer, AgentAnswer,
@ -209,6 +214,35 @@ def question_explainable(
if entity.observation: if entity.observation:
print(f" Observation: {entity.observation}", file=sys.stderr) print(f" Observation: {entity.observation}", file=sys.stderr)
elif isinstance(entity, Decomposition):
print(f"\n [decompose] {prov_id}", file=sys.stderr)
for i, goal in enumerate(entity.goals):
print(f" Thread {i}: {goal}", file=sys.stderr)
elif isinstance(entity, Finding):
print(f"\n [finding] {prov_id}", file=sys.stderr)
if entity.goal:
print(f" Goal: {entity.goal}", file=sys.stderr)
if entity.document:
print(f" Document: {entity.document}", file=sys.stderr)
elif isinstance(entity, Plan):
print(f"\n [plan] {prov_id}", file=sys.stderr)
for i, step in enumerate(entity.steps):
print(f" Step {i}: {step}", file=sys.stderr)
elif isinstance(entity, StepResult):
print(f"\n [step-result] {prov_id}", file=sys.stderr)
if entity.step:
print(f" Step: {entity.step}", file=sys.stderr)
if entity.document:
print(f" Document: {entity.document}", file=sys.stderr)
elif isinstance(entity, Synthesis):
print(f"\n [synthesis] {prov_id}", file=sys.stderr)
if entity.document:
print(f" Document: {entity.document}", file=sys.stderr)
elif isinstance(entity, Conclusion): elif isinstance(entity, Conclusion):
print(f"\n [conclusion] {prov_id}", file=sys.stderr) print(f"\n [conclusion] {prov_id}", file=sys.stderr)
if entity.document: if entity.document:

View file

@ -20,9 +20,19 @@ from trustgraph.provenance import (
agent_thought_uri, agent_thought_uri,
agent_observation_uri, agent_observation_uri,
agent_final_uri, agent_final_uri,
agent_decomposition_uri,
agent_finding_uri,
agent_plan_uri,
agent_step_result_uri,
agent_synthesis_uri,
agent_session_triples, agent_session_triples,
agent_iteration_triples, agent_iteration_triples,
agent_final_triples, agent_final_triples,
agent_decomposition_triples,
agent_finding_triples,
agent_plan_triples,
agent_step_result_triples,
agent_synthesis_triples,
set_graph, set_graph,
GRAPH_RETRIEVAL, GRAPH_RETRIEVAL,
) )
@ -359,6 +369,146 @@ class PatternBase:
explain_graph=GRAPH_RETRIEVAL, explain_graph=GRAPH_RETRIEVAL,
)) ))
# ---- Orchestrator provenance helpers ------------------------------------
async def emit_decomposition_triples(
self, flow, session_id, session_uri, goals, user, collection,
respond, streaming,
):
"""Emit provenance for a supervisor decomposition step."""
uri = agent_decomposition_uri(session_id)
triples = set_graph(
agent_decomposition_triples(uri, session_uri, goals),
GRAPH_RETRIEVAL,
)
await flow("explainability").send(Triples(
metadata=Metadata(id=uri, user=user, collection=collection),
triples=triples,
))
if streaming:
await respond(AgentResponse(
chunk_type="explain", content="",
explain_id=uri, explain_graph=GRAPH_RETRIEVAL,
))
async def emit_finding_triples(
self, flow, session_id, index, goal, answer_text, user, collection,
respond, streaming,
):
"""Emit provenance for a subagent finding."""
uri = agent_finding_uri(session_id, index)
decomposition_uri = agent_decomposition_uri(session_id)
doc_id = f"urn:trustgraph:agent:{session_id}/finding/{index}/doc"
try:
await self.processor.save_answer_content(
doc_id=doc_id, user=user,
content=answer_text,
title=f"Finding: {goal[:60]}",
)
except Exception as e:
logger.warning(f"Failed to save finding to librarian: {e}")
doc_id = None
triples = set_graph(
agent_finding_triples(uri, decomposition_uri, goal, doc_id),
GRAPH_RETRIEVAL,
)
await flow("explainability").send(Triples(
metadata=Metadata(id=uri, user=user, collection=collection),
triples=triples,
))
if streaming:
await respond(AgentResponse(
chunk_type="explain", content="",
explain_id=uri, explain_graph=GRAPH_RETRIEVAL,
))
async def emit_plan_triples(
self, flow, session_id, session_uri, steps, user, collection,
respond, streaming,
):
"""Emit provenance for a plan creation."""
uri = agent_plan_uri(session_id)
triples = set_graph(
agent_plan_triples(uri, session_uri, steps),
GRAPH_RETRIEVAL,
)
await flow("explainability").send(Triples(
metadata=Metadata(id=uri, user=user, collection=collection),
triples=triples,
))
if streaming:
await respond(AgentResponse(
chunk_type="explain", content="",
explain_id=uri, explain_graph=GRAPH_RETRIEVAL,
))
async def emit_step_result_triples(
self, flow, session_id, index, goal, answer_text, user, collection,
respond, streaming,
):
"""Emit provenance for a plan step result."""
uri = agent_step_result_uri(session_id, index)
plan_uri = agent_plan_uri(session_id)
doc_id = f"urn:trustgraph:agent:{session_id}/step/{index}/doc"
try:
await self.processor.save_answer_content(
doc_id=doc_id, user=user,
content=answer_text,
title=f"Step result: {goal[:60]}",
)
except Exception as e:
logger.warning(f"Failed to save step result to librarian: {e}")
doc_id = None
triples = set_graph(
agent_step_result_triples(uri, plan_uri, goal, doc_id),
GRAPH_RETRIEVAL,
)
await flow("explainability").send(Triples(
metadata=Metadata(id=uri, user=user, collection=collection),
triples=triples,
))
if streaming:
await respond(AgentResponse(
chunk_type="explain", content="",
explain_id=uri, explain_graph=GRAPH_RETRIEVAL,
))
async def emit_synthesis_triples(
self, flow, session_id, previous_uri, answer_text, user, collection,
respond, streaming,
):
"""Emit provenance for a synthesis answer."""
uri = agent_synthesis_uri(session_id)
doc_id = f"urn:trustgraph:agent:{session_id}/synthesis/doc"
try:
await self.processor.save_answer_content(
doc_id=doc_id, user=user,
content=answer_text,
title="Synthesis",
)
except Exception as e:
logger.warning(f"Failed to save synthesis to librarian: {e}")
doc_id = None
triples = set_graph(
agent_synthesis_triples(uri, previous_uri, doc_id),
GRAPH_RETRIEVAL,
)
await flow("explainability").send(Triples(
metadata=Metadata(id=uri, user=user, collection=collection),
triples=triples,
))
if streaming:
await respond(AgentResponse(
chunk_type="explain", content="",
explain_id=uri, explain_graph=GRAPH_RETRIEVAL,
))
# ---- Response helpers --------------------------------------------------- # ---- Response helpers ---------------------------------------------------
async def prompt_as_answer(self, client, prompt_id, variables, async def prompt_as_answer(self, client, prompt_id, variables,

View file

@ -11,7 +11,7 @@ import uuid
from ... schema import AgentRequest, AgentResponse, AgentStep, PlanStep from ... schema import AgentRequest, AgentResponse, AgentStep, PlanStep
from ..react.types import Action
from . pattern_base import PatternBase from . pattern_base import PatternBase
@ -126,6 +126,13 @@ class PlanThenExecutePattern(PatternBase):
thought_text = f"Created plan with {len(plan_steps)} steps" thought_text = f"Created plan with {len(plan_steps)} steps"
await think(thought_text, is_final=True) await think(thought_text, is_final=True)
# Emit plan provenance
step_goals = [ps.get("goal", "") for ps in plan_steps]
await self.emit_plan_triples(
flow, session_id, session_uri, step_goals,
request.user, collection, respond, streaming,
)
# Build PlanStep objects # Build PlanStep objects
plan_agent_steps = [ plan_agent_steps = [
PlanStep( PlanStep(
@ -263,16 +270,10 @@ class PlanThenExecutePattern(PatternBase):
result=step_result, result=step_result,
) )
# Emit iteration provenance # Emit step result provenance
prov_act = Action( await self.emit_step_result_triples(
thought=f"Plan step {pending_idx}: {goal}", flow, session_id, pending_idx, goal, step_result,
name=tool_name, request.user, collection, respond, streaming,
arguments=tool_arguments,
observation=step_result,
)
await self.emit_iteration_triples(
flow, session_id, iteration_num, session_uri,
prov_act, request, respond, streaming,
) )
# Build execution step for history # Build execution step for history
@ -340,9 +341,12 @@ class PlanThenExecutePattern(PatternBase):
streaming=streaming, streaming=streaming,
) )
await self.emit_final_triples( # Emit synthesis provenance (links back to last step result)
flow, session_id, iteration_num, session_uri, from trustgraph.provenance import agent_step_result_uri
response_text, request, respond, streaming, last_step_uri = agent_step_result_uri(session_id, len(plan) - 1)
await self.emit_synthesis_triples(
flow, session_id, last_step_uri,
response_text, request.user, collection, respond, streaming,
) )
if self.is_subagent(request): if self.is_subagent(request):

View file

@ -427,6 +427,7 @@ class Processor(AgentService):
correlation_id = request.correlation_id correlation_id = request.correlation_id
subagent_goal = getattr(request, 'subagent_goal', '') subagent_goal = getattr(request, 'subagent_goal', '')
parent_session_id = getattr(request, 'parent_session_id', '')
# Extract the answer from the completion step # Extract the answer from the completion step
answer_text = "" answer_text = ""
@ -451,13 +452,26 @@ class Processor(AgentService):
) )
return return
# Emit finding provenance for this subagent
template = self.aggregator.get_original_request(correlation_id)
if template and parent_session_id:
entry = self.aggregator.correlations.get(correlation_id)
finding_index = len(entry["results"]) - 1 if entry else 0
collection = getattr(template, 'collection', 'default')
await self.supervisor_pattern.emit_finding_triples(
flow, parent_session_id, finding_index,
subagent_goal, answer_text,
template.user, collection,
respond, template.streaming,
)
if all_done: if all_done:
logger.info( logger.info(
f"All subagents complete for {correlation_id}, " f"All subagents complete for {correlation_id}, "
f"dispatching synthesis" f"dispatching synthesis"
) )
template = self.aggregator.get_original_request(correlation_id)
if template is None: if template is None:
logger.error( logger.error(
f"No template for correlation {correlation_id}" f"No template for correlation {correlation_id}"

View file

@ -16,7 +16,7 @@ import uuid
from ... schema import AgentRequest, AgentResponse, AgentStep from ... schema import AgentRequest, AgentResponse, AgentStep
from ..react.types import Action, Final from trustgraph.provenance import agent_finding_uri
from . pattern_base import PatternBase from . pattern_base import PatternBase
@ -121,15 +121,9 @@ class SupervisorPattern(PatternBase):
correlation_id = str(uuid.uuid4()) correlation_id = str(uuid.uuid4())
# Emit decomposition provenance # Emit decomposition provenance
decompose_act = Action( await self.emit_decomposition_triples(
thought=f"Decomposed into {len(goals)} sub-goals", flow, session_id, session_uri, goals,
name="decompose", request.user, collection, respond, streaming,
arguments={"goals": json.dumps(goals), "correlation_id": correlation_id},
observation=f"Fanning out {len(goals)} subagents",
)
await self.emit_iteration_triples(
flow, session_id, iteration_num, session_uri,
decompose_act, request, respond, streaming,
) )
# Fan out: emit a subagent request for each goal # Fan out: emit a subagent request for each goal
@ -207,10 +201,15 @@ class SupervisorPattern(PatternBase):
streaming=streaming, streaming=streaming,
) )
await self.emit_final_triples( # Emit synthesis provenance (links back to last finding)
flow, session_id, iteration_num, session_uri, last_finding_uri = agent_finding_uri(
response_text, request, respond, streaming, session_id, len(subagent_results) - 1
) )
await self.emit_synthesis_triples(
flow, session_id, last_finding_uri,
response_text, request.user, collection, respond, streaming,
)
await self.send_final_response( await self.send_final_response(
respond, streaming, response_text, already_streamed=streaming, respond, streaming, response_text, already_streamed=streaming,
) )