2025-11-26 19:47:39 +00:00
|
|
|
"""
|
|
|
|
|
Integration tests for DocumentRAG streaming functionality
|
|
|
|
|
|
|
|
|
|
These tests verify the streaming behavior of DocumentRAG, testing token-by-token
|
|
|
|
|
response delivery through the complete pipeline.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
from unittest.mock import AsyncMock
|
|
|
|
|
from trustgraph.retrieval.document_rag.document_rag import DocumentRag
|
2026-03-09 10:53:44 +00:00
|
|
|
from trustgraph.schema import ChunkMatch
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
from trustgraph.base import PromptResult
|
2025-11-26 19:47:39 +00:00
|
|
|
from tests.utils.streaming_assertions import (
|
|
|
|
|
assert_streaming_chunks_valid,
|
|
|
|
|
assert_callback_invoked,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
2026-03-07 23:38:09 +00:00
|
|
|
# Sample chunk content for testing - maps chunk_id to content
|
|
|
|
|
CHUNK_CONTENT = {
|
|
|
|
|
"doc/c1": "Machine learning is a subset of AI.",
|
|
|
|
|
"doc/c2": "Deep learning uses neural networks.",
|
|
|
|
|
"doc/c3": "Supervised learning needs labeled data.",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2025-11-26 19:47:39 +00:00
|
|
|
@pytest.mark.integration
|
|
|
|
|
class TestDocumentRagStreaming:
|
|
|
|
|
"""Integration tests for DocumentRAG streaming"""
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
|
def mock_embeddings_client(self):
|
|
|
|
|
"""Mock embeddings client"""
|
|
|
|
|
client = AsyncMock()
|
2026-03-08 19:42:26 +00:00
|
|
|
# New batch format: [[[vectors_for_text1]]]
|
|
|
|
|
client.embed.return_value = [[[0.1, 0.2, 0.3, 0.4, 0.5]]]
|
2025-11-26 19:47:39 +00:00
|
|
|
return client
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
|
def mock_doc_embeddings_client(self):
|
2026-03-09 10:53:44 +00:00
|
|
|
"""Mock document embeddings client that returns chunk matches"""
|
2025-11-26 19:47:39 +00:00
|
|
|
client = AsyncMock()
|
2026-03-09 10:53:44 +00:00
|
|
|
# Returns ChunkMatch objects with chunk_id and score
|
|
|
|
|
client.query.return_value = [
|
|
|
|
|
ChunkMatch(chunk_id="doc/c1", score=0.95),
|
|
|
|
|
ChunkMatch(chunk_id="doc/c2", score=0.90),
|
|
|
|
|
ChunkMatch(chunk_id="doc/c3", score=0.85)
|
|
|
|
|
]
|
2025-11-26 19:47:39 +00:00
|
|
|
return client
|
|
|
|
|
|
2026-03-07 23:38:09 +00:00
|
|
|
@pytest.fixture
|
|
|
|
|
def mock_fetch_chunk(self):
|
|
|
|
|
"""Mock fetch_chunk function that retrieves chunk content from librarian"""
|
|
|
|
|
async def fetch(chunk_id, user):
|
|
|
|
|
return CHUNK_CONTENT.get(chunk_id, f"Content for {chunk_id}")
|
|
|
|
|
return fetch
|
|
|
|
|
|
2025-11-26 19:47:39 +00:00
|
|
|
@pytest.fixture
|
|
|
|
|
def mock_streaming_prompt_client(self, mock_streaming_llm_response):
|
|
|
|
|
"""Mock prompt client with streaming support"""
|
|
|
|
|
client = AsyncMock()
|
|
|
|
|
|
|
|
|
|
async def document_prompt_side_effect(query, documents, timeout=600, streaming=False, chunk_callback=None):
|
|
|
|
|
# Both modes return the same text
|
|
|
|
|
full_text = "Machine learning is a subset of artificial intelligence that focuses on algorithms that learn from data."
|
|
|
|
|
|
|
|
|
|
if streaming and chunk_callback:
|
2026-01-06 21:48:05 +00:00
|
|
|
# Simulate streaming chunks with end_of_stream flags
|
|
|
|
|
chunks = []
|
2025-11-26 19:47:39 +00:00
|
|
|
async for chunk in mock_streaming_llm_response():
|
2026-01-06 21:48:05 +00:00
|
|
|
chunks.append(chunk)
|
|
|
|
|
|
|
|
|
|
# Send all chunks with end_of_stream=False except the last
|
|
|
|
|
for i, chunk in enumerate(chunks):
|
|
|
|
|
is_final = (i == len(chunks) - 1)
|
|
|
|
|
await chunk_callback(chunk, is_final)
|
|
|
|
|
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
return PromptResult(response_type="text", text=full_text)
|
2025-11-26 19:47:39 +00:00
|
|
|
else:
|
|
|
|
|
# Non-streaming response - same text
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
return PromptResult(response_type="text", text=full_text)
|
2025-11-26 19:47:39 +00:00
|
|
|
|
|
|
|
|
client.document_prompt.side_effect = document_prompt_side_effect
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
# Mock prompt() for extract-concepts call in DocumentRag
|
|
|
|
|
client.prompt.return_value = PromptResult(response_type="text", text="")
|
2025-11-26 19:47:39 +00:00
|
|
|
return client
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
|
def document_rag_streaming(self, mock_embeddings_client, mock_doc_embeddings_client,
|
2026-03-07 23:38:09 +00:00
|
|
|
mock_streaming_prompt_client, mock_fetch_chunk):
|
2025-11-26 19:47:39 +00:00
|
|
|
"""Create DocumentRag instance with streaming support"""
|
|
|
|
|
return DocumentRag(
|
|
|
|
|
embeddings_client=mock_embeddings_client,
|
|
|
|
|
doc_embeddings_client=mock_doc_embeddings_client,
|
|
|
|
|
prompt_client=mock_streaming_prompt_client,
|
2026-03-07 23:38:09 +00:00
|
|
|
fetch_chunk=mock_fetch_chunk,
|
2025-11-26 19:47:39 +00:00
|
|
|
verbose=True
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_document_rag_streaming_basic(self, document_rag_streaming, streaming_chunk_collector):
|
|
|
|
|
"""Test basic DocumentRAG streaming functionality"""
|
|
|
|
|
# Arrange
|
|
|
|
|
query = "What is machine learning?"
|
|
|
|
|
collector = streaming_chunk_collector()
|
|
|
|
|
|
|
|
|
|
# Act
|
|
|
|
|
result = await document_rag_streaming.query(
|
|
|
|
|
query=query,
|
|
|
|
|
user="test_user",
|
|
|
|
|
collection="test_collection",
|
|
|
|
|
doc_limit=10,
|
|
|
|
|
streaming=True,
|
|
|
|
|
chunk_callback=collector.collect
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Assert
|
|
|
|
|
assert_streaming_chunks_valid(collector.chunks, min_chunks=1)
|
|
|
|
|
assert_callback_invoked(AsyncMock(call_count=len(collector.chunks)), min_calls=1)
|
|
|
|
|
|
2026-01-06 21:48:05 +00:00
|
|
|
# Verify streaming protocol compliance
|
|
|
|
|
collector.verify_streaming_protocol()
|
|
|
|
|
|
2025-11-26 19:47:39 +00:00
|
|
|
# Verify full response matches concatenated chunks
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
result_text, usage = result
|
2025-11-26 19:47:39 +00:00
|
|
|
full_from_chunks = collector.get_full_text()
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
assert result_text == full_from_chunks
|
2025-11-26 19:47:39 +00:00
|
|
|
|
|
|
|
|
# Verify content is reasonable
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
assert len(result_text) > 0
|
2025-11-26 19:47:39 +00:00
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_document_rag_streaming_vs_non_streaming(self, document_rag_streaming):
|
|
|
|
|
"""Test that streaming and non-streaming produce equivalent results"""
|
|
|
|
|
# Arrange
|
|
|
|
|
query = "What is machine learning?"
|
|
|
|
|
user = "test_user"
|
|
|
|
|
collection = "test_collection"
|
|
|
|
|
doc_limit = 10
|
|
|
|
|
|
|
|
|
|
# Act - Non-streaming
|
|
|
|
|
non_streaming_result = await document_rag_streaming.query(
|
|
|
|
|
query=query,
|
|
|
|
|
user=user,
|
|
|
|
|
collection=collection,
|
|
|
|
|
doc_limit=doc_limit,
|
|
|
|
|
streaming=False
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Act - Streaming
|
|
|
|
|
streaming_chunks = []
|
|
|
|
|
|
2026-01-06 21:48:05 +00:00
|
|
|
async def collect(chunk, end_of_stream):
|
2025-11-26 19:47:39 +00:00
|
|
|
streaming_chunks.append(chunk)
|
|
|
|
|
|
|
|
|
|
streaming_result = await document_rag_streaming.query(
|
|
|
|
|
query=query,
|
|
|
|
|
user=user,
|
|
|
|
|
collection=collection,
|
|
|
|
|
doc_limit=doc_limit,
|
|
|
|
|
streaming=True,
|
|
|
|
|
chunk_callback=collect
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Assert - Results should be equivalent
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
non_streaming_text, _ = non_streaming_result
|
|
|
|
|
streaming_text, _ = streaming_result
|
|
|
|
|
assert streaming_text == non_streaming_text
|
2025-11-26 19:47:39 +00:00
|
|
|
assert len(streaming_chunks) > 0
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
assert "".join(streaming_chunks) == streaming_text
|
2025-11-26 19:47:39 +00:00
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_document_rag_streaming_callback_invocation(self, document_rag_streaming):
|
|
|
|
|
"""Test that chunk callback is invoked correctly"""
|
|
|
|
|
# Arrange
|
|
|
|
|
callback = AsyncMock()
|
|
|
|
|
|
|
|
|
|
# Act
|
|
|
|
|
result = await document_rag_streaming.query(
|
|
|
|
|
query="test query",
|
|
|
|
|
user="test_user",
|
|
|
|
|
collection="test_collection",
|
|
|
|
|
doc_limit=5,
|
|
|
|
|
streaming=True,
|
|
|
|
|
chunk_callback=callback
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Assert
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
result_text, usage = result
|
2025-11-26 19:47:39 +00:00
|
|
|
assert callback.call_count > 0
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
assert result_text is not None
|
2025-11-26 19:47:39 +00:00
|
|
|
|
|
|
|
|
# Verify all callback invocations had string arguments
|
|
|
|
|
for call in callback.call_args_list:
|
|
|
|
|
assert isinstance(call.args[0], str)
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_document_rag_streaming_without_callback(self, document_rag_streaming):
|
|
|
|
|
"""Test streaming parameter without callback (should fall back to non-streaming)"""
|
|
|
|
|
# Arrange & Act
|
|
|
|
|
result = await document_rag_streaming.query(
|
|
|
|
|
query="test query",
|
|
|
|
|
user="test_user",
|
|
|
|
|
collection="test_collection",
|
|
|
|
|
doc_limit=5,
|
|
|
|
|
streaming=True,
|
|
|
|
|
chunk_callback=None # No callback provided
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Assert - Should complete without error
|
|
|
|
|
assert result is not None
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
result_text, usage = result
|
|
|
|
|
assert isinstance(result_text, str)
|
2025-11-26 19:47:39 +00:00
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_document_rag_streaming_with_no_documents(self, document_rag_streaming,
|
|
|
|
|
mock_doc_embeddings_client):
|
|
|
|
|
"""Test streaming with no documents found"""
|
|
|
|
|
# Arrange
|
2026-03-07 23:38:09 +00:00
|
|
|
mock_doc_embeddings_client.query.return_value = [] # No chunk_ids
|
2025-11-26 19:47:39 +00:00
|
|
|
callback = AsyncMock()
|
|
|
|
|
|
|
|
|
|
# Act
|
|
|
|
|
result = await document_rag_streaming.query(
|
|
|
|
|
query="unknown topic",
|
|
|
|
|
user="test_user",
|
|
|
|
|
collection="test_collection",
|
|
|
|
|
doc_limit=10,
|
|
|
|
|
streaming=True,
|
|
|
|
|
chunk_callback=callback
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Assert - Should still produce streamed response
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
result_text, usage = result
|
|
|
|
|
assert result_text is not None
|
2025-11-26 19:47:39 +00:00
|
|
|
assert callback.call_count > 0
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_document_rag_streaming_error_propagation(self, document_rag_streaming,
|
|
|
|
|
mock_embeddings_client):
|
|
|
|
|
"""Test that errors during streaming are properly propagated"""
|
|
|
|
|
# Arrange
|
|
|
|
|
mock_embeddings_client.embed.side_effect = Exception("Embeddings error")
|
|
|
|
|
callback = AsyncMock()
|
|
|
|
|
|
|
|
|
|
# Act & Assert
|
|
|
|
|
with pytest.raises(Exception) as exc_info:
|
|
|
|
|
await document_rag_streaming.query(
|
|
|
|
|
query="test query",
|
|
|
|
|
user="test_user",
|
|
|
|
|
collection="test_collection",
|
|
|
|
|
doc_limit=5,
|
|
|
|
|
streaming=True,
|
|
|
|
|
chunk_callback=callback
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
assert "Embeddings error" in str(exc_info.value)
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_document_rag_streaming_with_different_doc_limits(self, document_rag_streaming,
|
|
|
|
|
mock_doc_embeddings_client):
|
|
|
|
|
"""Test streaming with various document limits"""
|
|
|
|
|
# Arrange
|
|
|
|
|
callback = AsyncMock()
|
|
|
|
|
doc_limits = [1, 5, 10, 20]
|
|
|
|
|
|
|
|
|
|
for limit in doc_limits:
|
|
|
|
|
# Reset mocks
|
|
|
|
|
mock_doc_embeddings_client.reset_mock()
|
|
|
|
|
callback.reset_mock()
|
|
|
|
|
|
|
|
|
|
# Act
|
|
|
|
|
result = await document_rag_streaming.query(
|
|
|
|
|
query="test query",
|
|
|
|
|
user="test_user",
|
|
|
|
|
collection="test_collection",
|
|
|
|
|
doc_limit=limit,
|
|
|
|
|
streaming=True,
|
|
|
|
|
chunk_callback=callback
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Assert
|
Expose LLM token usage across all service layers (#782)
Expose LLM token usage (in_token, out_token, model) across all
service layers
Propagate token counts from LLM services through the prompt,
text-completion, graph-RAG, document-RAG, and agent orchestrator
pipelines to the API gateway and Python SDK. All fields are Optional
— None means "not available", distinguishing from a real zero count.
Key changes:
- Schema: Add in_token/out_token/model to TextCompletionResponse,
PromptResponse, GraphRagResponse, DocumentRagResponse,
AgentResponse
- TextCompletionClient: New TextCompletionResult return type. Split
into text_completion() (non-streaming) and
text_completion_stream() (streaming with per-chunk handler
callback)
- PromptClient: New PromptResult with response_type
(text/json/jsonl), typed fields (text/object/objects), and token
usage. All callers updated.
- RAG services: Accumulate token usage across all prompt calls
(extract-concepts, edge-scoring, edge-reasoning,
synthesis). Non-streaming path sends single combined response
instead of chunk + end_of_session.
- Agent orchestrator: UsageTracker accumulates tokens across
meta-router, pattern prompt calls, and react reasoning. Attached
to end_of_dialog.
- Translators: Encode token fields when not None (is not None, not truthy)
- Python SDK: RAG and text-completion methods return
TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with
token fields (streaming)
- CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt,
tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
2026-04-13 14:38:34 +01:00
|
|
|
result_text, usage = result
|
|
|
|
|
assert result_text is not None
|
2025-11-26 19:47:39 +00:00
|
|
|
assert callback.call_count > 0
|
|
|
|
|
|
|
|
|
|
# Verify doc_limit was passed correctly
|
|
|
|
|
call_args = mock_doc_embeddings_client.query.call_args
|
|
|
|
|
assert call_args.kwargs['limit'] == limit
|
|
|
|
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
|
async def test_document_rag_streaming_preserves_user_collection(self, document_rag_streaming,
|
|
|
|
|
mock_doc_embeddings_client):
|
|
|
|
|
"""Test that streaming preserves user/collection isolation"""
|
|
|
|
|
# Arrange
|
|
|
|
|
callback = AsyncMock()
|
|
|
|
|
user = "test_user_123"
|
|
|
|
|
collection = "test_collection_456"
|
|
|
|
|
|
|
|
|
|
# Act
|
|
|
|
|
await document_rag_streaming.query(
|
|
|
|
|
query="test query",
|
|
|
|
|
user=user,
|
|
|
|
|
collection=collection,
|
|
|
|
|
doc_limit=10,
|
|
|
|
|
streaming=True,
|
|
|
|
|
chunk_callback=callback
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Assert - Verify user/collection were passed to document embeddings client
|
|
|
|
|
call_args = mock_doc_embeddings_client.query.call_args
|
|
|
|
|
assert call_args.kwargs['user'] == user
|
|
|
|
|
assert call_args.kwargs['collection'] == collection
|