mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-25 08:26:21 +02:00
Expose LLM token usage (in_token, out_token, model) across all service layers Propagate token counts from LLM services through the prompt, text-completion, graph-RAG, document-RAG, and agent orchestrator pipelines to the API gateway and Python SDK. All fields are Optional — None means "not available", distinguishing from a real zero count. Key changes: - Schema: Add in_token/out_token/model to TextCompletionResponse, PromptResponse, GraphRagResponse, DocumentRagResponse, AgentResponse - TextCompletionClient: New TextCompletionResult return type. Split into text_completion() (non-streaming) and text_completion_stream() (streaming with per-chunk handler callback) - PromptClient: New PromptResult with response_type (text/json/jsonl), typed fields (text/object/objects), and token usage. All callers updated. - RAG services: Accumulate token usage across all prompt calls (extract-concepts, edge-scoring, edge-reasoning, synthesis). Non-streaming path sends single combined response instead of chunk + end_of_session. - Agent orchestrator: UsageTracker accumulates tokens across meta-router, pattern prompt calls, and react reasoning. Attached to end_of_dialog. - Translators: Encode token fields when not None (is not None, not truthy) - Python SDK: RAG and text-completion methods return TextCompletionResult (non-streaming) or RAGChunk/AgentAnswer with token fields (streaming) - CLI: --show-usage flag on tg-invoke-llm, tg-invoke-prompt, tg-invoke-graph-rag, tg-invoke-document-rag, tg-invoke-agent
80 lines
2.3 KiB
Python
80 lines
2.3 KiB
Python
|
|
from dataclasses import dataclass
|
|
from typing import Optional
|
|
|
|
from . request_response_spec import RequestResponse, RequestResponseSpec
|
|
from .. schema import TextCompletionRequest, TextCompletionResponse
|
|
|
|
@dataclass
|
|
class TextCompletionResult:
|
|
text: Optional[str]
|
|
in_token: Optional[int] = None
|
|
out_token: Optional[int] = None
|
|
model: Optional[str] = None
|
|
|
|
class TextCompletionClient(RequestResponse):
|
|
|
|
async def text_completion(self, system, prompt, timeout=600):
|
|
|
|
resp = await self.request(
|
|
TextCompletionRequest(
|
|
system = system, prompt = prompt, streaming = False
|
|
),
|
|
timeout=timeout
|
|
)
|
|
|
|
if resp.error:
|
|
raise RuntimeError(resp.error.message)
|
|
|
|
return TextCompletionResult(
|
|
text = resp.response,
|
|
in_token = resp.in_token,
|
|
out_token = resp.out_token,
|
|
model = resp.model,
|
|
)
|
|
|
|
async def text_completion_stream(
|
|
self, system, prompt, handler, timeout=600,
|
|
):
|
|
"""
|
|
Streaming text completion. `handler` is an async callable invoked
|
|
once per chunk with the chunk's TextCompletionResponse. Returns a
|
|
TextCompletionResult with text=None and token counts / model taken
|
|
from the end_of_stream message.
|
|
"""
|
|
|
|
async def on_chunk(resp):
|
|
|
|
if resp.error:
|
|
raise RuntimeError(resp.error.message)
|
|
|
|
await handler(resp)
|
|
|
|
return getattr(resp, "end_of_stream", False)
|
|
|
|
final = await self.request(
|
|
TextCompletionRequest(
|
|
system = system, prompt = prompt, streaming = True
|
|
),
|
|
recipient=on_chunk,
|
|
timeout=timeout,
|
|
)
|
|
|
|
return TextCompletionResult(
|
|
text = None,
|
|
in_token = final.in_token,
|
|
out_token = final.out_token,
|
|
model = final.model,
|
|
)
|
|
|
|
class TextCompletionClientSpec(RequestResponseSpec):
|
|
def __init__(
|
|
self, request_name, response_name,
|
|
):
|
|
super(TextCompletionClientSpec, self).__init__(
|
|
request_name = request_name,
|
|
request_schema = TextCompletionRequest,
|
|
response_name = response_name,
|
|
response_schema = TextCompletionResponse,
|
|
impl = TextCompletionClient,
|
|
)
|