Add agent explainability instrumentation and unify envelope field naming (#795)

Addresses recommendations from the UX developer's agent experience report.
Adds provenance predicates, DAG structure changes, error resilience, and
a published OWL ontology.

Explainability additions:

- Tool candidates: tg:toolCandidate on Analysis events lists the tools
  visible to the LLM for each iteration (names only, descriptions in config)
- Termination reason: tg:terminationReason on Conclusion/Synthesis events
  (final-answer, plan-complete, subagents-complete)
- Step counter: tg:stepNumber on iteration events
- Pattern decision: new tg:PatternDecision entity in the DAG between
  session and first iteration, carrying tg:pattern and tg:taskType
- Latency: tg:llmDurationMs on Analysis events, tg:toolDurationMs on
  Observation events
- Token counts on events: tg:inToken/tg:outToken/tg:llmModel on
  Grounding, Focus, Synthesis, and Analysis events
- Tool/parse errors: tg:toolError on Observation events with tg:Error
  mixin type. Parse failures return as error observations instead of
  crashing the agent, giving it a chance to retry.

Envelope unification:

- Rename chunk_type to message_type across AgentResponse schema,
  translator, SDK types, socket clients, CLI, and all tests.
  Agent and RAG services now both use message_type on the wire.

Ontology:

- specs/ontology/trustgraph.ttl — OWL vocabulary covering all 26 classes,
  7 object properties, and 36+ datatype properties including new predicates.

DAG structure tests:

- tests/unit/test_provenance/test_dag_structure.py verifies the
  wasDerivedFrom chain for GraphRAG, DocumentRAG, and all three agent
  patterns (react, plan, supervisor) including the pattern-decision link.
This commit is contained in:
cybermaggedon 2026-04-13 16:16:42 +01:00 committed by GitHub
parent 14e49d83c7
commit d2751553a3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
42 changed files with 1577 additions and 205 deletions

View file

@ -126,7 +126,7 @@ def question_explainable(
try:
# Track last chunk type for formatting
last_chunk_type = None
last_message_type = None
current_outputter = None
# Stream agent with explainability - process events as they arrive
@ -138,7 +138,7 @@ def question_explainable(
group=group,
):
if isinstance(item, AgentThought):
if last_chunk_type != "thought":
if last_message_type != "thought":
if current_outputter:
current_outputter.__exit__(None, None, None)
current_outputter = None
@ -146,7 +146,7 @@ def question_explainable(
if verbose:
current_outputter = Outputter(width=78, prefix="\U0001f914 ")
current_outputter.__enter__()
last_chunk_type = "thought"
last_message_type = "thought"
if current_outputter:
current_outputter.output(item.content)
if current_outputter.word_buffer:
@ -155,7 +155,7 @@ def question_explainable(
current_outputter.word_buffer = ""
elif isinstance(item, AgentObservation):
if last_chunk_type != "observation":
if last_message_type != "observation":
if current_outputter:
current_outputter.__exit__(None, None, None)
current_outputter = None
@ -163,7 +163,7 @@ def question_explainable(
if verbose:
current_outputter = Outputter(width=78, prefix="\U0001f4a1 ")
current_outputter.__enter__()
last_chunk_type = "observation"
last_message_type = "observation"
if current_outputter:
current_outputter.output(item.content)
if current_outputter.word_buffer:
@ -172,12 +172,12 @@ def question_explainable(
current_outputter.word_buffer = ""
elif isinstance(item, AgentAnswer):
if last_chunk_type != "answer":
if last_message_type != "answer":
if current_outputter:
current_outputter.__exit__(None, None, None)
current_outputter = None
print()
last_chunk_type = "answer"
last_message_type = "answer"
# Print answer content directly
print(item.content, end="", flush=True)
@ -261,7 +261,7 @@ def question_explainable(
current_outputter = None
# Final newline if we ended with answer
if last_chunk_type == "answer":
if last_message_type == "answer":
print()
finally:
@ -322,16 +322,16 @@ def question(
# Handle streaming response
if streaming:
# Track last chunk type and current outputter for streaming
last_chunk_type = None
last_message_type = None
current_outputter = None
last_answer_chunk = None
for chunk in response:
chunk_type = chunk.chunk_type
message_type = chunk.message_type
content = chunk.content
# Check if we're switching to a new message type
if last_chunk_type != chunk_type:
if last_message_type != message_type:
# Close previous outputter if exists
if current_outputter:
current_outputter.__exit__(None, None, None)
@ -339,15 +339,15 @@ def question(
print() # Blank line between message types
# Create new outputter for new message type
if chunk_type == "thought" and verbose:
if message_type == "thought" and verbose:
current_outputter = Outputter(width=78, prefix="\U0001f914 ")
current_outputter.__enter__()
elif chunk_type == "observation" and verbose:
elif message_type == "observation" and verbose:
current_outputter = Outputter(width=78, prefix="\U0001f4a1 ")
current_outputter.__enter__()
# For answer, don't use Outputter - just print as-is
last_chunk_type = chunk_type
last_message_type = message_type
# Output the chunk
if current_outputter:
@ -357,7 +357,7 @@ def question(
print(current_outputter.word_buffer, end="", flush=True)
current_outputter.column += len(current_outputter.word_buffer)
current_outputter.word_buffer = ""
elif chunk_type == "final-answer":
elif message_type == "final-answer":
print(content, end="", flush=True)
last_answer_chunk = chunk
@ -366,7 +366,7 @@ def question(
current_outputter.__exit__(None, None, None)
current_outputter = None
# Add final newline if we were outputting answer
elif last_chunk_type == "final-answer":
elif last_message_type == "final-answer":
print()
if show_usage and last_answer_chunk:
@ -382,17 +382,17 @@ def question(
# so we iterate through the chunks (which are complete messages, not text chunks)
for chunk in response:
# Display thoughts if verbose
if chunk.chunk_type == "thought" and verbose:
if chunk.message_type == "thought" and verbose:
output(wrap(chunk.content), "\U0001f914 ")
print()
# Display observations if verbose
elif chunk.chunk_type == "observation" and verbose:
elif chunk.message_type == "observation" and verbose:
output(wrap(chunk.content), "\U0001f4a1 ")
print()
# Display answer
elif chunk.chunk_type == "final-answer" or chunk.chunk_type == "answer":
elif chunk.message_type == "final-answer" or chunk.message_type == "answer":
print(chunk.content)
finally: