Persistent websocket connections for socket clients and CLI tools (#723)

Replace per-request websocket connections in SocketClient and
AsyncSocketClient with a single persistent connection that
multiplexes requests by ID via a background reader task. This
eliminates repeated TCP+WS handshakes which caused significant
latency over proxies.

Convert show_flows, show_flow_blueprints, and
show_parameter_types CLI tools from sequential HTTP requests to
concurrent websocket requests using AsyncSocketClient, reducing
round trips from O(N) sequential to a small number of parallel
batches.

Also fix describe_interfaces bug in show_flows where response
queue was reading the request field instead of the response
field.
This commit is contained in:
cybermaggedon 2026-03-26 16:46:28 +00:00 committed by GitHub
parent 1ec081f42f
commit 9c55a0a0ff
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 654 additions and 1067 deletions

View file

@ -1,5 +1,6 @@
import json
import asyncio
import websockets
from typing import Optional, Dict, Any, AsyncIterator, Union
@ -8,13 +9,29 @@ from . exceptions import ProtocolException, ApplicationException
class AsyncSocketClient:
"""Asynchronous WebSocket client"""
"""Asynchronous WebSocket client with persistent connection.
Maintains a single websocket connection and multiplexes requests
by ID, routing responses via a background reader task.
Use as an async context manager for proper lifecycle management:
async with AsyncSocketClient(url, timeout, token) as client:
result = await client._send_request(...)
Or call connect()/aclose() manually.
"""
def __init__(self, url: str, timeout: int, token: Optional[str]):
self.url = self._convert_to_ws_url(url)
self.timeout = timeout
self.token = token
self._request_counter = 0
self._socket = None
self._connect_cm = None
self._reader_task = None
self._pending = {} # request_id -> asyncio.Queue
self._connected = False
def _convert_to_ws_url(self, url: str) -> str:
"""Convert HTTP URL to WebSocket URL"""
@ -25,82 +42,123 @@ class AsyncSocketClient:
elif url.startswith("ws://") or url.startswith("wss://"):
return url
else:
# Assume ws://
return f"ws://{url}"
def _build_ws_url(self):
ws_url = f"{self.url.rstrip('/')}/api/v1/socket"
if self.token:
ws_url = f"{ws_url}?token={self.token}"
return ws_url
async def connect(self):
"""Establish the persistent websocket connection."""
if self._connected:
return
ws_url = self._build_ws_url()
self._connect_cm = websockets.connect(
ws_url, ping_interval=20, ping_timeout=self.timeout
)
self._socket = await self._connect_cm.__aenter__()
self._connected = True
self._reader_task = asyncio.create_task(self._reader())
async def __aenter__(self):
await self.connect()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.aclose()
async def _ensure_connected(self):
"""Lazily connect if not already connected."""
if not self._connected:
await self.connect()
async def _reader(self):
"""Background task to read responses and route by request ID."""
try:
async for raw_message in self._socket:
response = json.loads(raw_message)
request_id = response.get("id")
if request_id and request_id in self._pending:
await self._pending[request_id].put(response)
# Ignore messages for unknown request IDs
except websockets.exceptions.ConnectionClosed:
pass
except Exception as e:
# Signal error to all pending requests
for queue in self._pending.values():
try:
await queue.put({"error": str(e)})
except:
pass
finally:
self._connected = False
def _next_request_id(self):
self._request_counter += 1
return f"req-{self._request_counter}"
def flow(self, flow_id: str):
"""Get async flow instance for WebSocket operations"""
return AsyncSocketFlowInstance(self, flow_id)
async def _send_request(self, service: str, flow: Optional[str], request: Dict[str, Any]):
"""Async WebSocket request implementation (non-streaming)"""
# Generate unique request ID
self._request_counter += 1
request_id = f"req-{self._request_counter}"
"""Send a request and wait for a single response."""
await self._ensure_connected()
# Build WebSocket URL with optional token
ws_url = f"{self.url}/api/v1/socket"
if self.token:
ws_url = f"{ws_url}?token={self.token}"
request_id = self._next_request_id()
queue = asyncio.Queue()
self._pending[request_id] = queue
# Build request message
message = {
"id": request_id,
"service": service,
"request": request
}
if flow:
message["flow"] = flow
try:
message = {
"id": request_id,
"service": service,
"request": request
}
if flow:
message["flow"] = flow
# Connect and send request
async with websockets.connect(ws_url, ping_interval=20, ping_timeout=self.timeout) as websocket:
await websocket.send(json.dumps(message))
await self._socket.send(json.dumps(message))
# Wait for single response
raw_message = await websocket.recv()
response = json.loads(raw_message)
if response.get("id") != request_id:
raise ProtocolException(f"Response ID mismatch")
response = await queue.get()
if "error" in response:
raise ApplicationException(response["error"])
if "response" not in response:
raise ProtocolException(f"Missing response in message")
raise ProtocolException("Missing response in message")
return response["response"]
finally:
self._pending.pop(request_id, None)
async def _send_request_streaming(self, service: str, flow: Optional[str], request: Dict[str, Any]):
"""Async WebSocket request implementation (streaming)"""
# Generate unique request ID
self._request_counter += 1
request_id = f"req-{self._request_counter}"
"""Send a request and yield streaming response chunks."""
await self._ensure_connected()
# Build WebSocket URL with optional token
ws_url = f"{self.url}/api/v1/socket"
if self.token:
ws_url = f"{ws_url}?token={self.token}"
request_id = self._next_request_id()
queue = asyncio.Queue()
self._pending[request_id] = queue
# Build request message
message = {
"id": request_id,
"service": service,
"request": request
}
if flow:
message["flow"] = flow
try:
message = {
"id": request_id,
"service": service,
"request": request
}
if flow:
message["flow"] = flow
# Connect and send request
async with websockets.connect(ws_url, ping_interval=20, ping_timeout=self.timeout) as websocket:
await websocket.send(json.dumps(message))
await self._socket.send(json.dumps(message))
# Yield chunks as they arrive
async for raw_message in websocket:
response = json.loads(raw_message)
if response.get("id") != request_id:
continue # Ignore messages for other requests
while True:
response = await queue.get()
if "error" in response:
raise ApplicationException(response["error"])
@ -108,18 +166,16 @@ class AsyncSocketClient:
if "response" in response:
resp = response["response"]
# Parse different chunk types
chunk = self._parse_chunk(resp)
if chunk is not None: # Skip provenance messages in streaming
if chunk is not None:
yield chunk
# Check if this is the final message
# end_of_session indicates entire session is complete (including provenance)
# end_of_dialog is for agent dialogs
# complete is from the gateway envelope
if resp.get("end_of_session") or resp.get("end_of_dialog") or response.get("complete"):
break
finally:
self._pending.pop(request_id, None)
def _parse_chunk(self, resp: Dict[str, Any]):
"""Parse response chunk into appropriate type. Returns None for non-content messages."""
chunk_type = resp.get("chunk_type")
@ -127,7 +183,6 @@ class AsyncSocketClient:
# Handle new GraphRAG message format with message_type
if message_type == "provenance":
# Provenance messages are not yielded to user - they're metadata
return None
if chunk_type == "thought":
@ -147,25 +202,41 @@ class AsyncSocketClient:
end_of_dialog=resp.get("end_of_dialog", False)
)
elif chunk_type == "action":
# Agent action chunks - treat as thoughts for display purposes
return AgentThought(
content=resp.get("content", ""),
end_of_message=resp.get("end_of_message", False)
)
else:
# RAG-style chunk (or generic chunk with message_type="chunk")
# Text-completion uses "response" field, RAG uses "chunk" field, Prompt uses "text" field
content = resp.get("response", resp.get("chunk", resp.get("text", "")))
return RAGChunk(
content=content,
end_of_stream=resp.get("end_of_stream", False),
error=None # Errors are always thrown, never stored
error=None
)
async def aclose(self):
"""Close WebSocket connection"""
# Cleanup handled by context manager
pass
"""Close the persistent WebSocket connection cleanly."""
# Wait for reader to finish (socket close will cause it to exit)
if self._reader_task:
self._reader_task.cancel()
try:
await self._reader_task
except asyncio.CancelledError:
pass
self._reader_task = None
# Exit the websockets context manager — this cleanly shuts down
# the connection and its keepalive task
if self._connect_cm:
try:
await self._connect_cm.__aexit__(None, None, None)
except Exception:
pass
self._connect_cm = None
self._socket = None
self._connected = False
self._pending.clear()
class AsyncSocketFlowInstance:
@ -292,7 +363,6 @@ class AsyncSocketFlowInstance:
async def graph_embeddings_query(self, text: str, user: str, collection: str, limit: int = 10, **kwargs):
"""Query graph embeddings for semantic search"""
# First convert text to embedding vector
emb_result = await self.embeddings(texts=[text])
vector = emb_result.get("vectors", [[]])[0]
@ -362,7 +432,6 @@ class AsyncSocketFlowInstance:
limit: int = 10, **kwargs
):
"""Query row embeddings for semantic search on structured data"""
# First convert text to embedding vector
emb_result = await self.embeddings(texts=[text])
vector = emb_result.get("vectors", [[]])[0]

File diff suppressed because it is too large Load diff

View file

@ -58,6 +58,14 @@ def print_json(sessions):
print(json.dumps(sessions, indent=2))
# Map type names for display
TYPE_DISPLAY = {
"graphrag": "GraphRAG",
"docrag": "DocRAG",
"agent": "Agent",
}
def main():
parser = argparse.ArgumentParser(
prog='tg-list-explain-traces',
@ -118,7 +126,7 @@ def main():
explain_client = ExplainabilityClient(flow)
try:
# List all sessions using the API
# List all sessions — uses persistent websocket via SocketClient
questions = explain_client.list_sessions(
graph=RETRIEVAL_GRAPH,
user=args.user,
@ -126,7 +134,8 @@ def main():
limit=args.limit,
)
# Convert to output format
# detect_session_type is mostly a fast URI pattern check,
# only falls back to network calls for unrecognised URIs
sessions = []
for q in questions:
session_type = explain_client.detect_session_type(
@ -136,16 +145,9 @@ def main():
collection=args.collection
)
# Map type names
type_display = {
"graphrag": "GraphRAG",
"docrag": "DocRAG",
"agent": "Agent",
}.get(session_type, session_type.title())
sessions.append({
"id": q.uri,
"type": type_display,
"type": TYPE_DISPLAY.get(session_type, session_type.title()),
"question": q.query,
"time": q.timestamp,
})

View file

@ -3,31 +3,27 @@ Shows all defined flow blueprints.
"""
import argparse
import asyncio
import os
import tabulate
from trustgraph.api import Api, ConfigKey
from trustgraph.api import AsyncSocketClient
import json
default_url = os.getenv("TRUSTGRAPH_URL", 'http://localhost:8088/')
default_token = os.getenv("TRUSTGRAPH_TOKEN", None)
def format_parameters(params_metadata, config_api):
def format_parameters(params_metadata, param_type_defs):
"""
Format parameter metadata for display
Format parameter metadata for display.
Args:
params_metadata: Parameter definitions from flow blueprint
config_api: API client to get parameter type information
Returns:
Formatted string describing parameters
param_type_defs is a dict of type_name -> parsed type definition,
pre-fetched concurrently.
"""
if not params_metadata:
return "None"
param_list = []
# Sort parameters by order if available
sorted_params = sorted(
params_metadata.items(),
key=lambda x: x[1].get("order", 999)
@ -37,41 +33,89 @@ def format_parameters(params_metadata, config_api):
description = param_meta.get("description", param_name)
param_type = param_meta.get("type", "unknown")
# Get type information if available
type_info = param_type
if config_api:
try:
key = ConfigKey("parameter-type", param_type)
type_def_value = config_api.get([key])[0].value
param_type_def = json.loads(type_def_value)
# Add default value if available
default = param_type_def.get("default")
if default is not None:
type_info = f"{param_type} (default: {default})"
except:
# If we can't get type definition, just show the type name
pass
if param_type in param_type_defs:
param_type_def = param_type_defs[param_type]
default = param_type_def.get("default")
if default is not None:
type_info = f"{param_type} (default: {default})"
param_list.append(f" {param_name}: {description} [{type_info}]")
return "\n".join(param_list)
async def fetch_data(client):
"""Fetch all data needed for show_flow_blueprints concurrently."""
# Round 1: list blueprints
resp = await client._send_request("flow", None, {
"operation": "list-blueprints",
})
blueprint_names = resp.get("blueprint-names", [])
if not blueprint_names:
return [], {}, {}
# Round 2: get all blueprints in parallel
blueprint_tasks = [
client._send_request("flow", None, {
"operation": "get-blueprint",
"blueprint-name": name,
})
for name in blueprint_names
]
blueprint_results = await asyncio.gather(*blueprint_tasks)
blueprints = {}
for name, resp in zip(blueprint_names, blueprint_results):
bp_data = resp.get("blueprint-definition", "{}")
blueprints[name] = json.loads(bp_data) if isinstance(bp_data, str) else bp_data
# Round 3: get all parameter type definitions in parallel
param_types_needed = set()
for bp in blueprints.values():
for param_meta in bp.get("parameters", {}).values():
pt = param_meta.get("type", "")
if pt:
param_types_needed.add(pt)
param_type_defs = {}
if param_types_needed:
param_type_tasks = [
client._send_request("config", None, {
"operation": "get",
"keys": [{"type": "parameter-type", "key": pt}],
})
for pt in param_types_needed
]
param_type_results = await asyncio.gather(*param_type_tasks)
for pt, resp in zip(param_types_needed, param_type_results):
values = resp.get("values", [])
if values:
try:
param_type_defs[pt] = json.loads(values[0].get("value", "{}"))
except (json.JSONDecodeError, AttributeError):
pass
return blueprint_names, blueprints, param_type_defs
async def _show_flow_blueprints_async(url, token=None):
async with AsyncSocketClient(url, timeout=60, token=token) as client:
return await fetch_data(client)
def show_flow_blueprints(url, token=None):
api = Api(url, token=token)
flow_api = api.flow()
config_api = api.config()
blueprint_names, blueprints, param_type_defs = asyncio.run(
_show_flow_blueprints_async(url, token=token)
)
blueprint_names = flow_api.list_blueprints()
if len(blueprint_names) == 0:
if not blueprint_names:
print("No flow blueprints.")
return
for blueprint_name in blueprint_names:
cls = flow_api.get_blueprint(blueprint_name)
cls = blueprints[blueprint_name]
table = []
table.append(("name", blueprint_name))
@ -81,10 +125,9 @@ def show_flow_blueprints(url, token=None):
if tags:
table.append(("tags", ", ".join(tags)))
# Show parameters if they exist
parameters = cls.get("parameters", {})
if parameters:
param_str = format_parameters(parameters, config_api)
param_str = format_parameters(parameters, param_type_defs)
table.append(("parameters", param_str))
print(tabulate.tabulate(

View file

@ -3,22 +3,15 @@ Shows configured flows.
"""
import argparse
import asyncio
import os
import tabulate
from trustgraph.api import Api, ConfigKey
from trustgraph.api import Api, AsyncSocketClient
import json
default_url = os.getenv("TRUSTGRAPH_URL", 'http://localhost:8088/')
default_token = os.getenv("TRUSTGRAPH_TOKEN", None)
def get_interface(config_api, i):
key = ConfigKey("interface-description", i)
value = config_api.get([key])[0].value
return json.loads(value)
def describe_interfaces(intdefs, flow):
intfs = flow.get("interfaces", {})
@ -34,7 +27,7 @@ def describe_interfaces(intdefs, flow):
if kind == "request-response":
req = intfs[k]["request"]
resp = intfs[k]["request"]
resp = intfs[k]["response"]
lst.append(f"{k} request: {req}")
lst.append(f"{k} response: {resp}")
@ -49,17 +42,9 @@ def describe_interfaces(intdefs, flow):
def get_enum_description(param_value, param_type_def):
"""
Get the human-readable description for an enum value
Args:
param_value: The actual parameter value (e.g., "gpt-4")
param_type_def: The parameter type definition containing enum objects
Returns:
Human-readable description or the original value if not found
"""
enum_list = param_type_def.get("enum", [])
# Handle both old format (strings) and new format (objects with id/description)
for enum_item in enum_list:
if isinstance(enum_item, dict):
if enum_item.get("id") == param_value:
@ -67,27 +52,20 @@ def get_enum_description(param_value, param_type_def):
elif enum_item == param_value:
return param_value
# If not found in enum, return original value
return param_value
def format_parameters(flow_params, blueprint_params_metadata, config_api):
def format_parameters(flow_params, blueprint_params_metadata, param_type_defs):
"""
Format flow parameters with their human-readable descriptions
Format flow parameters with their human-readable descriptions.
Args:
flow_params: The actual parameter values used in the flow
blueprint_params_metadata: The parameter metadata from the flow blueprint definition
config_api: API client to retrieve parameter type definitions
Returns:
Formatted string of parameters with descriptions
param_type_defs is a dict of type_name -> parsed type definition,
pre-fetched concurrently.
"""
if not flow_params:
return "None"
param_list = []
# Sort parameters by order if available
sorted_params = sorted(
blueprint_params_metadata.items(),
key=lambda x: x[1].get("order", 999)
@ -100,80 +78,165 @@ def format_parameters(flow_params, blueprint_params_metadata, config_api):
param_type = param_meta.get("type", "")
controlled_by = param_meta.get("controlled-by", None)
# Try to get enum description if this parameter has a type definition
display_value = value
if param_type and config_api:
try:
from trustgraph.api import ConfigKey
key = ConfigKey("parameter-type", param_type)
type_def_value = config_api.get([key])[0].value
param_type_def = json.loads(type_def_value)
display_value = get_enum_description(value, param_type_def)
except:
# If we can't get the type definition, just use the original value
display_value = value
if param_type and param_type in param_type_defs:
display_value = get_enum_description(
value, param_type_defs[param_type]
)
# Format the parameter line
line = f"{description}: {display_value}"
# Add controlled-by indicator if present
if controlled_by:
line += f" (controlled by {controlled_by})"
param_list.append(line)
# Add any parameters that aren't in the blueprint metadata (shouldn't happen normally)
for param_name, value in flow_params.items():
if param_name not in blueprint_params_metadata:
param_list.append(f"{param_name}: {value} (undefined)")
return "\n".join(param_list) if param_list else "None"
async def fetch_show_flows(client):
"""Fetch all data needed for show_flows concurrently."""
# Round 1: list interfaces and list flows in parallel
interface_names_resp, flow_ids_resp = await asyncio.gather(
client._send_request("config", None, {
"operation": "list",
"type": "interface-description",
}),
client._send_request("flow", None, {
"operation": "list-flows",
}),
)
interface_names = interface_names_resp.get("directory", [])
flow_ids = flow_ids_resp.get("flow-ids", [])
if not flow_ids:
return {}, [], {}, {}
# Round 2: get all interfaces + all flows in parallel
interface_tasks = [
client._send_request("config", None, {
"operation": "get",
"keys": [{"type": "interface-description", "key": name}],
})
for name in interface_names
]
flow_tasks = [
client._send_request("flow", None, {
"operation": "get-flow",
"flow-id": fid,
})
for fid in flow_ids
]
results = await asyncio.gather(*interface_tasks, *flow_tasks)
# Split results
interface_results = results[:len(interface_names)]
flow_results = results[len(interface_names):]
# Parse interfaces
interface_defs = {}
for name, resp in zip(interface_names, interface_results):
values = resp.get("values", [])
if values:
interface_defs[name] = json.loads(values[0].get("value", "{}"))
# Parse flows
flows = {}
for fid, resp in zip(flow_ids, flow_results):
flow_data = resp.get("flow", "{}")
flows[fid] = json.loads(flow_data) if isinstance(flow_data, str) else flow_data
# Round 3: get all blueprints in parallel
blueprint_names = set()
for flow in flows.values():
bp = flow.get("blueprint-name", "")
if bp:
blueprint_names.add(bp)
blueprint_tasks = [
client._send_request("flow", None, {
"operation": "get-blueprint",
"blueprint-name": bp_name,
})
for bp_name in blueprint_names
]
blueprint_results = await asyncio.gather(*blueprint_tasks)
blueprints = {}
for bp_name, resp in zip(blueprint_names, blueprint_results):
bp_data = resp.get("blueprint-definition", "{}")
blueprints[bp_name] = json.loads(bp_data) if isinstance(bp_data, str) else bp_data
# Round 4: get all parameter type definitions in parallel
param_types_needed = set()
for bp in blueprints.values():
for param_meta in bp.get("parameters", {}).values():
pt = param_meta.get("type", "")
if pt:
param_types_needed.add(pt)
param_type_tasks = [
client._send_request("config", None, {
"operation": "get",
"keys": [{"type": "parameter-type", "key": pt}],
})
for pt in param_types_needed
]
param_type_results = await asyncio.gather(*param_type_tasks)
param_type_defs = {}
for pt, resp in zip(param_types_needed, param_type_results):
values = resp.get("values", [])
if values:
try:
param_type_defs[pt] = json.loads(values[0].get("value", "{}"))
except (json.JSONDecodeError, AttributeError):
pass
return interface_defs, flow_ids, flows, blueprints, param_type_defs
async def _show_flows_async(url, token=None):
async with AsyncSocketClient(url, timeout=60, token=token) as client:
return await fetch_show_flows(client)
def show_flows(url, token=None):
api = Api(url, token=token)
config_api = api.config()
flow_api = api.flow()
result = asyncio.run(_show_flows_async(url, token=token))
interface_names = config_api.list("interface-description")
interface_defs, flow_ids, flows, blueprints, param_type_defs = result
interface_defs = {
i: get_interface(config_api, i)
for i in interface_names
}
flow_ids = flow_api.list()
if len(flow_ids) == 0:
if not flow_ids:
print("No flows.")
return
flows = []
for fid in flow_ids:
for id in flow_ids:
flow = flow_api.get(id)
flow = flows[fid]
table = []
table.append(("id", id))
table.append(("id", fid))
table.append(("blueprint", flow.get("blueprint-name", "")))
table.append(("desc", flow.get("description", "")))
# Display parameters with human-readable descriptions
parameters = flow.get("parameters", {})
if parameters:
# Try to get the flow blueprint definition for parameter metadata
blueprint_name = flow.get("blueprint-name", "")
if blueprint_name:
try:
flow_blueprint = flow_api.get_blueprint(blueprint_name)
blueprint_params_metadata = flow_blueprint.get("parameters", {})
param_str = format_parameters(parameters, blueprint_params_metadata, config_api)
except Exception as e:
# Fallback to JSON if we can't get the blueprint definition
param_str = json.dumps(parameters, indent=2)
if blueprint_name and blueprint_name in blueprints:
blueprint_params_metadata = blueprints[blueprint_name].get("parameters", {})
param_str = format_parameters(
parameters, blueprint_params_metadata, param_type_defs
)
else:
# No blueprint name, fallback to JSON
param_str = json.dumps(parameters, indent=2)
table.append(("parameters", param_str))
@ -220,4 +283,4 @@ def main():
print("Exception:", e, flush=True)
if __name__ == "__main__":
main()
main()

View file

@ -7,9 +7,10 @@ valid enums, and validation rules.
"""
import argparse
import asyncio
import os
import tabulate
from trustgraph.api import Api, ConfigKey
from trustgraph.api import AsyncSocketClient
import json
default_url = os.getenv("TRUSTGRAPH_URL", 'http://localhost:8088/')
@ -17,13 +18,7 @@ default_token = os.getenv("TRUSTGRAPH_TOKEN", None)
def format_enum_values(enum_list):
"""
Format enum values for display, handling both old and new formats
Args:
enum_list: List of enum values (strings or objects with id/description)
Returns:
Formatted string describing enum options
Format enum values for display, handling both old and new formats.
"""
if not enum_list:
return "Any value"
@ -31,7 +26,6 @@ def format_enum_values(enum_list):
enum_items = []
for item in enum_list:
if isinstance(item, dict):
# New format: objects with id and description
enum_id = item.get("id", "")
description = item.get("description", "")
if description:
@ -39,99 +33,146 @@ def format_enum_values(enum_list):
else:
enum_items.append(enum_id)
else:
# Old format: simple strings
enum_items.append(str(item))
return "\n".join(f"{item}" for item in enum_items)
def format_constraints(param_type_def):
"""
Format validation constraints for display
Args:
param_type_def: Parameter type definition
Returns:
Formatted string describing constraints
Format validation constraints for display.
"""
constraints = []
# Handle numeric constraints
if "minimum" in param_type_def:
constraints.append(f"min: {param_type_def['minimum']}")
if "maximum" in param_type_def:
constraints.append(f"max: {param_type_def['maximum']}")
# Handle string constraints
if "minLength" in param_type_def:
constraints.append(f"min length: {param_type_def['minLength']}")
if "maxLength" in param_type_def:
constraints.append(f"max length: {param_type_def['maxLength']}")
if "pattern" in param_type_def:
constraints.append(f"pattern: {param_type_def['pattern']}")
# Handle required field
if param_type_def.get("required", False):
constraints.append("required")
return ", ".join(constraints) if constraints else "None"
def format_param_type(param_type_name, param_type_def):
"""Format a single parameter type for display."""
table = []
table.append(("name", param_type_name))
table.append(("description", param_type_def.get("description", "")))
table.append(("type", param_type_def.get("type", "unknown")))
default = param_type_def.get("default")
if default is not None:
table.append(("default", str(default)))
enum_list = param_type_def.get("enum")
if enum_list:
enum_str = format_enum_values(enum_list)
table.append(("valid values", enum_str))
constraints = format_constraints(param_type_def)
if constraints != "None":
table.append(("constraints", constraints))
return table
async def fetch_all_param_types(client):
"""Fetch all parameter types concurrently."""
# Round 1: list parameter types
resp = await client._send_request("config", None, {
"operation": "list",
"type": "parameter-type",
})
param_type_names = resp.get("directory", [])
if not param_type_names:
return [], {}
# Round 2: get all parameter types in parallel
tasks = [
client._send_request("config", None, {
"operation": "get",
"keys": [{"type": "parameter-type", "key": name}],
})
for name in param_type_names
]
results = await asyncio.gather(*tasks)
param_type_defs = {}
for name, resp in zip(param_type_names, results):
values = resp.get("values", [])
if values:
try:
param_type_defs[name] = json.loads(values[0].get("value", "{}"))
except (json.JSONDecodeError, AttributeError):
pass
return param_type_names, param_type_defs
async def fetch_single_param_type(client, param_type_name):
"""Fetch a single parameter type."""
resp = await client._send_request("config", None, {
"operation": "get",
"keys": [{"type": "parameter-type", "key": param_type_name}],
})
values = resp.get("values", [])
if values:
return json.loads(values[0].get("value", "{}"))
return None
def show_parameter_types(url, token=None):
"""
Show all parameter type definitions
"""
api = Api(url, token=token)
config_api = api.config()
"""Show all parameter type definitions."""
# Get list of all parameter types
try:
param_type_names = config_api.list("parameter-type")
except Exception as e:
print(f"Error retrieving parameter types: {e}")
return
async def _fetch():
async with AsyncSocketClient(url, timeout=60, token=token) as client:
return await fetch_all_param_types(client)
if len(param_type_names) == 0:
param_type_names, param_type_defs = asyncio.run(_fetch())
if not param_type_names:
print("No parameter types defined.")
return
for param_type_name in param_type_names:
try:
# Get the parameter type definition
key = ConfigKey("parameter-type", param_type_name)
type_def_value = config_api.get([key])[0].value
param_type_def = json.loads(type_def_value)
table = []
table.append(("name", param_type_name))
table.append(("description", param_type_def.get("description", "")))
table.append(("type", param_type_def.get("type", "unknown")))
# Show default value if present
default = param_type_def.get("default")
if default is not None:
table.append(("default", str(default)))
# Show enum values if present
enum_list = param_type_def.get("enum")
if enum_list:
enum_str = format_enum_values(enum_list)
table.append(("valid values", enum_str))
# Show constraints
constraints = format_constraints(param_type_def)
if constraints != "None":
table.append(("constraints", constraints))
print(tabulate.tabulate(
table,
tablefmt="pretty",
stralign="left",
))
for name in param_type_names:
if name not in param_type_defs:
print(f"Error retrieving parameter type '{name}'")
print()
continue
except Exception as e:
print(f"Error retrieving parameter type '{param_type_name}': {e}")
print()
table = format_param_type(name, param_type_defs[name])
print(tabulate.tabulate(
table,
tablefmt="pretty",
stralign="left",
))
print()
def show_specific_parameter_type(url, param_type_name, token=None):
"""Show a specific parameter type definition."""
async def _fetch():
async with AsyncSocketClient(url, timeout=60, token=token) as client:
return await fetch_single_param_type(client, param_type_name)
param_type_def = asyncio.run(_fetch())
if param_type_def is None:
print(f"Error retrieving parameter type '{param_type_name}'")
return
table = format_param_type(param_type_name, param_type_def)
print(tabulate.tabulate(
table,
tablefmt="pretty",
stralign="left",
))
def main():
parser = argparse.ArgumentParser(
@ -161,57 +202,12 @@ def main():
try:
if args.type:
# Show specific parameter type
show_specific_parameter_type(args.api_url, args.type, args.token)
else:
# Show all parameter types
show_parameter_types(args.api_url, args.token)
except Exception as e:
print("Exception:", e, flush=True)
def show_specific_parameter_type(url, param_type_name, token=None):
"""
Show a specific parameter type definition
"""
api = Api(url, token=token)
config_api = api.config()
try:
# Get the parameter type definition
key = ConfigKey("parameter-type", param_type_name)
type_def_value = config_api.get([key])[0].value
param_type_def = json.loads(type_def_value)
table = []
table.append(("name", param_type_name))
table.append(("description", param_type_def.get("description", "")))
table.append(("type", param_type_def.get("type", "unknown")))
# Show default value if present
default = param_type_def.get("default")
if default is not None:
table.append(("default", str(default)))
# Show enum values if present
enum_list = param_type_def.get("enum")
if enum_list:
enum_str = format_enum_values(enum_list)
table.append(("valid values", enum_str))
# Show constraints
constraints = format_constraints(param_type_def)
if constraints != "None":
table.append(("constraints", constraints))
print(tabulate.tabulate(
table,
tablefmt="pretty",
stralign="left",
))
except Exception as e:
print(f"Error retrieving parameter type '{param_type_name}': {e}")
if __name__ == "__main__":
main()
main()