mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-26 00:46:22 +02:00
Tool services - dynamically pluggable tool implementations for agent frameworks (#658)
* New schema * Tool service implementation * Base class * Joke service, for testing * Update unit tests for tool services
This commit is contained in:
parent
0b83c08ae4
commit
a38ca9474f
13 changed files with 1396 additions and 45 deletions
|
|
@ -32,6 +32,8 @@ from . agent_service import AgentService
|
|||
from . graph_rag_client import GraphRagClientSpec
|
||||
from . tool_service import ToolService
|
||||
from . tool_client import ToolClientSpec
|
||||
from . dynamic_tool_service import DynamicToolService
|
||||
from . tool_service_client import ToolServiceClientSpec
|
||||
from . agent_client import AgentClientSpec
|
||||
from . structured_query_client import StructuredQueryClientSpec
|
||||
from . row_embeddings_query_client import RowEmbeddingsQueryClientSpec
|
||||
|
|
|
|||
184
trustgraph-base/trustgraph/base/dynamic_tool_service.py
Normal file
184
trustgraph-base/trustgraph/base/dynamic_tool_service.py
Normal file
|
|
@ -0,0 +1,184 @@
|
|||
|
||||
"""
|
||||
Base class for dynamically pluggable tool services.
|
||||
|
||||
Tool services are Pulsar services that can be invoked as agent tools.
|
||||
They receive a ToolServiceRequest with user, config, and arguments,
|
||||
and return a ToolServiceResponse with the result.
|
||||
|
||||
Uses direct Pulsar topics (no flow configuration required):
|
||||
- Request: non-persistent://tg/request/{topic}
|
||||
- Response: non-persistent://tg/response/{topic}
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import asyncio
|
||||
import argparse
|
||||
from prometheus_client import Counter
|
||||
|
||||
from .. schema import ToolServiceRequest, ToolServiceResponse, Error
|
||||
from .. exceptions import TooManyRequests
|
||||
from . async_processor import AsyncProcessor
|
||||
from . consumer import Consumer
|
||||
from . producer import Producer
|
||||
from . metrics import ConsumerMetrics, ProducerMetrics
|
||||
|
||||
# Module logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
default_concurrency = 1
|
||||
default_topic = "tool"
|
||||
|
||||
|
||||
class DynamicToolService(AsyncProcessor):
|
||||
"""
|
||||
Base class for implementing dynamic tool services.
|
||||
|
||||
Subclasses should override the `invoke` method to implement
|
||||
the tool's logic.
|
||||
|
||||
The invoke method receives:
|
||||
- user: The user context for multi-tenancy
|
||||
- config: Dict of config values from the tool descriptor
|
||||
- arguments: Dict of arguments from the LLM
|
||||
|
||||
And should return a string response (the observation).
|
||||
"""
|
||||
|
||||
def __init__(self, **params):
|
||||
|
||||
super(DynamicToolService, self).__init__(**params)
|
||||
|
||||
self.id = params.get("id")
|
||||
topic = params.get("topic", default_topic)
|
||||
|
||||
# Build direct Pulsar topic paths
|
||||
request_topic = f"non-persistent://tg/request/{topic}"
|
||||
response_topic = f"non-persistent://tg/response/{topic}"
|
||||
|
||||
logger.info(f"Tool service topics: request={request_topic}, response={response_topic}")
|
||||
|
||||
# Create consumer for requests
|
||||
consumer_metrics = ConsumerMetrics(
|
||||
processor=self.id, flow=None, name="request"
|
||||
)
|
||||
|
||||
self.consumer = Consumer(
|
||||
taskgroup=self.taskgroup,
|
||||
backend=self.pubsub,
|
||||
subscriber=f"{self.id}-request",
|
||||
flow=None,
|
||||
topic=request_topic,
|
||||
schema=ToolServiceRequest,
|
||||
handler=self.on_request,
|
||||
metrics=consumer_metrics,
|
||||
)
|
||||
|
||||
# Create producer for responses
|
||||
producer_metrics = ProducerMetrics(
|
||||
processor=self.id, flow=None, name="response"
|
||||
)
|
||||
|
||||
self.producer = Producer(
|
||||
backend=self.pubsub,
|
||||
topic=response_topic,
|
||||
schema=ToolServiceResponse,
|
||||
metrics=producer_metrics,
|
||||
)
|
||||
|
||||
if not hasattr(__class__, "tool_service_metric"):
|
||||
__class__.tool_service_metric = Counter(
|
||||
'dynamic_tool_service_invocation_count',
|
||||
'Dynamic tool service invocation count',
|
||||
["id"],
|
||||
)
|
||||
|
||||
async def start(self):
|
||||
await super(DynamicToolService, self).start()
|
||||
await self.producer.start()
|
||||
await self.consumer.start()
|
||||
logger.info(f"Tool service {self.id} started")
|
||||
|
||||
async def on_request(self, msg, consumer, flow):
|
||||
|
||||
id = None
|
||||
|
||||
try:
|
||||
|
||||
request = msg.value()
|
||||
|
||||
# Sender-produced ID for correlation
|
||||
id = msg.properties().get("id", "unknown")
|
||||
|
||||
# Parse the request
|
||||
user = request.user or "trustgraph"
|
||||
config = json.loads(request.config) if request.config else {}
|
||||
arguments = json.loads(request.arguments) if request.arguments else {}
|
||||
|
||||
logger.debug(f"Tool service request: user={user}, config={config}, arguments={arguments}")
|
||||
|
||||
# Invoke the tool implementation
|
||||
response = await self.invoke(user, config, arguments)
|
||||
|
||||
# Send success response
|
||||
await self.producer.send(
|
||||
ToolServiceResponse(
|
||||
error=None,
|
||||
response=response if isinstance(response, str) else json.dumps(response),
|
||||
end_of_stream=True,
|
||||
),
|
||||
properties={"id": id}
|
||||
)
|
||||
|
||||
__class__.tool_service_metric.labels(
|
||||
id=self.id,
|
||||
).inc()
|
||||
|
||||
except TooManyRequests as e:
|
||||
raise e
|
||||
|
||||
except Exception as e:
|
||||
|
||||
logger.error(f"Exception in dynamic tool service: {e}", exc_info=True)
|
||||
|
||||
logger.info("Sending error response...")
|
||||
|
||||
await self.producer.send(
|
||||
ToolServiceResponse(
|
||||
error=Error(
|
||||
type="tool-service-error",
|
||||
message=str(e),
|
||||
),
|
||||
response="",
|
||||
end_of_stream=True,
|
||||
),
|
||||
properties={"id": id if id else "unknown"}
|
||||
)
|
||||
|
||||
async def invoke(self, user, config, arguments):
|
||||
"""
|
||||
Invoke the tool service.
|
||||
|
||||
Override this method in subclasses to implement the tool's logic.
|
||||
|
||||
Args:
|
||||
user: The user context for multi-tenancy
|
||||
config: Dict of config values from the tool descriptor
|
||||
arguments: Dict of arguments from the LLM
|
||||
|
||||
Returns:
|
||||
A string response (the observation) or a dict/list that will be JSON-encoded
|
||||
"""
|
||||
raise NotImplementedError("Subclasses must implement invoke()")
|
||||
|
||||
@staticmethod
|
||||
def add_args(parser):
|
||||
|
||||
AsyncProcessor.add_args(parser)
|
||||
|
||||
parser.add_argument(
|
||||
'-t', '--topic',
|
||||
default=default_topic,
|
||||
help=f'Topic name for request/response (default: {default_topic})'
|
||||
)
|
||||
90
trustgraph-base/trustgraph/base/tool_service_client.py
Normal file
90
trustgraph-base/trustgraph/base/tool_service_client.py
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
|
||||
import json
|
||||
import logging
|
||||
|
||||
from . request_response_spec import RequestResponse, RequestResponseSpec
|
||||
from .. schema import ToolServiceRequest, ToolServiceResponse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ToolServiceClient(RequestResponse):
|
||||
"""Client for invoking dynamically configured tool services."""
|
||||
|
||||
async def call(self, user, config, arguments, timeout=600):
|
||||
"""
|
||||
Call a tool service.
|
||||
|
||||
Args:
|
||||
user: User context for multi-tenancy
|
||||
config: Dict of config values (e.g., {"collection": "customers"})
|
||||
arguments: Dict of arguments from LLM
|
||||
timeout: Request timeout in seconds
|
||||
|
||||
Returns:
|
||||
Response string from the tool service
|
||||
"""
|
||||
resp = await self.request(
|
||||
ToolServiceRequest(
|
||||
user=user,
|
||||
config=json.dumps(config) if config else "{}",
|
||||
arguments=json.dumps(arguments) if arguments else "{}",
|
||||
),
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
if resp.error:
|
||||
raise RuntimeError(resp.error.message)
|
||||
|
||||
return resp.response
|
||||
|
||||
async def call_streaming(self, user, config, arguments, callback, timeout=600):
|
||||
"""
|
||||
Call a tool service with streaming response.
|
||||
|
||||
Args:
|
||||
user: User context for multi-tenancy
|
||||
config: Dict of config values
|
||||
arguments: Dict of arguments from LLM
|
||||
callback: Async function called with each response chunk
|
||||
timeout: Request timeout in seconds
|
||||
|
||||
Returns:
|
||||
Final response string
|
||||
"""
|
||||
result = []
|
||||
|
||||
async def handle_response(resp):
|
||||
if resp.error:
|
||||
raise RuntimeError(resp.error.message)
|
||||
|
||||
if resp.response:
|
||||
result.append(resp.response)
|
||||
await callback(resp.response)
|
||||
|
||||
return resp.end_of_stream
|
||||
|
||||
await self.request(
|
||||
ToolServiceRequest(
|
||||
user=user,
|
||||
config=json.dumps(config) if config else "{}",
|
||||
arguments=json.dumps(arguments) if arguments else "{}",
|
||||
),
|
||||
timeout=timeout,
|
||||
recipient=handle_response
|
||||
)
|
||||
|
||||
return "".join(result)
|
||||
|
||||
|
||||
class ToolServiceClientSpec(RequestResponseSpec):
|
||||
"""Specification for a tool service client."""
|
||||
|
||||
def __init__(self, request_name, response_name):
|
||||
super(ToolServiceClientSpec, self).__init__(
|
||||
request_name=request_name,
|
||||
request_schema=ToolServiceRequest,
|
||||
response_name=response_name,
|
||||
response_schema=ToolServiceResponse,
|
||||
impl=ToolServiceClient,
|
||||
)
|
||||
Loading…
Add table
Add a link
Reference in a new issue