2025-04-22 20:21:38 +01:00
|
|
|
|
2025-07-30 23:18:38 +01:00
|
|
|
import logging
|
|
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
from trustgraph.schema import ConfigResponse
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
from trustgraph.schema import ConfigValue, WorkspaceChanges, Error
|
2025-04-22 20:21:38 +01:00
|
|
|
|
2025-05-07 12:58:32 +01:00
|
|
|
from ... tables.config import ConfigTableStore
|
2025-04-22 20:21:38 +01:00
|
|
|
|
2025-07-30 23:18:38 +01:00
|
|
|
# Module logger
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
WORKSPACES_NAMESPACE = "__workspaces__"
|
|
|
|
|
WORKSPACE_TYPE = "workspace"
|
|
|
|
|
TEMPLATE_WORKSPACE = "__template__"
|
|
|
|
|
|
2025-05-07 12:58:32 +01:00
|
|
|
class Configuration:
|
2025-04-22 20:21:38 +01:00
|
|
|
|
2026-05-08 19:48:12 +01:00
|
|
|
def __init__(self, push, host, username, password, keyspace,
|
|
|
|
|
replication_factor=1):
|
2025-04-22 20:21:38 +01:00
|
|
|
|
|
|
|
|
# External function to respond to update
|
|
|
|
|
self.push = push
|
|
|
|
|
|
2025-05-07 12:58:32 +01:00
|
|
|
self.table_store = ConfigTableStore(
|
2026-05-08 19:48:12 +01:00
|
|
|
host, username, password, keyspace, replication_factor
|
2025-05-07 12:58:32 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
async def inc_version(self):
|
|
|
|
|
await self.table_store.inc_version()
|
|
|
|
|
|
|
|
|
|
async def get_version(self):
|
|
|
|
|
return await self.table_store.get_version()
|
|
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
async def handle_get(self, v, workspace):
|
2025-04-22 20:21:38 +01:00
|
|
|
|
|
|
|
|
values = [
|
|
|
|
|
ConfigValue(
|
|
|
|
|
type = k.type,
|
|
|
|
|
key = k.key,
|
feat: workspace-based multi-tenancy, replacing user as tenancy axis (#840)
Introduces `workspace` as the isolation boundary for config, flows,
library, and knowledge data. Removes `user` as a schema-level field
throughout the code, API specs, and tests; workspace provides the
same separation more cleanly at the trusted flow.workspace layer
rather than through client-supplied message fields.
Design
------
- IAM tech spec (docs/tech-specs/iam.md) documents current state,
proposed auth/access model, and migration direction.
- Data ownership model (docs/tech-specs/data-ownership-model.md)
captures the workspace/collection/flow hierarchy.
Schema + messaging
------------------
- Drop `user` field from AgentRequest/Step, GraphRagQuery,
DocumentRagQuery, Triples/Graph/Document/Row EmbeddingsRequest,
Sparql/Rows/Structured QueryRequest, ToolServiceRequest.
- Keep collection/workspace routing via flow.workspace at the
service layer.
- Translators updated to not serialise/deserialise user.
API specs
---------
- OpenAPI schemas and path examples cleaned of user fields.
- Websocket async-api messages updated.
- Removed the unused parameters/User.yaml.
Services + base
---------------
- Librarian, collection manager, knowledge, config: all operations
scoped by workspace. Config client API takes workspace as first
positional arg.
- `flow.workspace` set at flow start time by the infrastructure;
no longer pass-through from clients.
- Tool service drops user-personalisation passthrough.
CLI + SDK
---------
- tg-init-workspace and workspace-aware import/export.
- All tg-* commands drop user args; accept --workspace.
- Python API/SDK (flow, socket_client, async_*, explainability,
library) drop user kwargs from every method signature.
MCP server
----------
- All tool endpoints drop user parameters; socket_manager no longer
keyed per user.
Flow service
------------
- Closure-based topic cleanup on flow stop: only delete topics
whose blueprint template was parameterised AND no remaining
live flow (across all workspaces) still resolves to that topic.
Three scopes fall out naturally from template analysis:
* {id} -> per-flow, deleted on stop
* {blueprint} -> per-blueprint, kept while any flow of the
same blueprint exists
* {workspace} -> per-workspace, kept while any flow in the
workspace exists
* literal -> global, never deleted (e.g. tg.request.librarian)
Fixes a bug where stopping a flow silently destroyed the global
librarian exchange, wedging all library operations until manual
restart.
RabbitMQ backend
----------------
- heartbeat=60, blocked_connection_timeout=300. Catches silently
dead connections (broker restart, orphaned channels, network
partitions) within ~2 heartbeat windows, so the consumer
reconnects and re-binds its queue rather than sitting forever
on a zombie connection.
Tests
-----
- Full test refresh: unit, integration, contract, provenance.
- Dropped user-field assertions and constructor kwargs across
~100 test files.
- Renamed user-collection isolation tests to workspace-collection.
2026-04-21 23:23:01 +01:00
|
|
|
value = await self.table_store.get_value(
|
|
|
|
|
workspace, k.type, k.key
|
|
|
|
|
)
|
2025-04-22 20:21:38 +01:00
|
|
|
)
|
|
|
|
|
for k in v.keys
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
return ConfigResponse(
|
2025-05-07 12:58:32 +01:00
|
|
|
version = await self.get_version(),
|
2025-04-22 20:21:38 +01:00
|
|
|
values = values,
|
|
|
|
|
)
|
|
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
async def handle_list(self, v, workspace):
|
2025-04-22 20:21:38 +01:00
|
|
|
|
|
|
|
|
return ConfigResponse(
|
2025-05-07 12:58:32 +01:00
|
|
|
version = await self.get_version(),
|
feat: workspace-based multi-tenancy, replacing user as tenancy axis (#840)
Introduces `workspace` as the isolation boundary for config, flows,
library, and knowledge data. Removes `user` as a schema-level field
throughout the code, API specs, and tests; workspace provides the
same separation more cleanly at the trusted flow.workspace layer
rather than through client-supplied message fields.
Design
------
- IAM tech spec (docs/tech-specs/iam.md) documents current state,
proposed auth/access model, and migration direction.
- Data ownership model (docs/tech-specs/data-ownership-model.md)
captures the workspace/collection/flow hierarchy.
Schema + messaging
------------------
- Drop `user` field from AgentRequest/Step, GraphRagQuery,
DocumentRagQuery, Triples/Graph/Document/Row EmbeddingsRequest,
Sparql/Rows/Structured QueryRequest, ToolServiceRequest.
- Keep collection/workspace routing via flow.workspace at the
service layer.
- Translators updated to not serialise/deserialise user.
API specs
---------
- OpenAPI schemas and path examples cleaned of user fields.
- Websocket async-api messages updated.
- Removed the unused parameters/User.yaml.
Services + base
---------------
- Librarian, collection manager, knowledge, config: all operations
scoped by workspace. Config client API takes workspace as first
positional arg.
- `flow.workspace` set at flow start time by the infrastructure;
no longer pass-through from clients.
- Tool service drops user-personalisation passthrough.
CLI + SDK
---------
- tg-init-workspace and workspace-aware import/export.
- All tg-* commands drop user args; accept --workspace.
- Python API/SDK (flow, socket_client, async_*, explainability,
library) drop user kwargs from every method signature.
MCP server
----------
- All tool endpoints drop user parameters; socket_manager no longer
keyed per user.
Flow service
------------
- Closure-based topic cleanup on flow stop: only delete topics
whose blueprint template was parameterised AND no remaining
live flow (across all workspaces) still resolves to that topic.
Three scopes fall out naturally from template analysis:
* {id} -> per-flow, deleted on stop
* {blueprint} -> per-blueprint, kept while any flow of the
same blueprint exists
* {workspace} -> per-workspace, kept while any flow in the
workspace exists
* literal -> global, never deleted (e.g. tg.request.librarian)
Fixes a bug where stopping a flow silently destroyed the global
librarian exchange, wedging all library operations until manual
restart.
RabbitMQ backend
----------------
- heartbeat=60, blocked_connection_timeout=300. Catches silently
dead connections (broker restart, orphaned channels, network
partitions) within ~2 heartbeat windows, so the consumer
reconnects and re-binds its queue rather than sitting forever
on a zombie connection.
Tests
-----
- Full test refresh: unit, integration, contract, provenance.
- Dropped user-field assertions and constructor kwargs across
~100 test files.
- Renamed user-collection isolation tests to workspace-collection.
2026-04-21 23:23:01 +01:00
|
|
|
directory = await self.table_store.get_keys(
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
workspace, v.type
|
feat: workspace-based multi-tenancy, replacing user as tenancy axis (#840)
Introduces `workspace` as the isolation boundary for config, flows,
library, and knowledge data. Removes `user` as a schema-level field
throughout the code, API specs, and tests; workspace provides the
same separation more cleanly at the trusted flow.workspace layer
rather than through client-supplied message fields.
Design
------
- IAM tech spec (docs/tech-specs/iam.md) documents current state,
proposed auth/access model, and migration direction.
- Data ownership model (docs/tech-specs/data-ownership-model.md)
captures the workspace/collection/flow hierarchy.
Schema + messaging
------------------
- Drop `user` field from AgentRequest/Step, GraphRagQuery,
DocumentRagQuery, Triples/Graph/Document/Row EmbeddingsRequest,
Sparql/Rows/Structured QueryRequest, ToolServiceRequest.
- Keep collection/workspace routing via flow.workspace at the
service layer.
- Translators updated to not serialise/deserialise user.
API specs
---------
- OpenAPI schemas and path examples cleaned of user fields.
- Websocket async-api messages updated.
- Removed the unused parameters/User.yaml.
Services + base
---------------
- Librarian, collection manager, knowledge, config: all operations
scoped by workspace. Config client API takes workspace as first
positional arg.
- `flow.workspace` set at flow start time by the infrastructure;
no longer pass-through from clients.
- Tool service drops user-personalisation passthrough.
CLI + SDK
---------
- tg-init-workspace and workspace-aware import/export.
- All tg-* commands drop user args; accept --workspace.
- Python API/SDK (flow, socket_client, async_*, explainability,
library) drop user kwargs from every method signature.
MCP server
----------
- All tool endpoints drop user parameters; socket_manager no longer
keyed per user.
Flow service
------------
- Closure-based topic cleanup on flow stop: only delete topics
whose blueprint template was parameterised AND no remaining
live flow (across all workspaces) still resolves to that topic.
Three scopes fall out naturally from template analysis:
* {id} -> per-flow, deleted on stop
* {blueprint} -> per-blueprint, kept while any flow of the
same blueprint exists
* {workspace} -> per-workspace, kept while any flow in the
workspace exists
* literal -> global, never deleted (e.g. tg.request.librarian)
Fixes a bug where stopping a flow silently destroyed the global
librarian exchange, wedging all library operations until manual
restart.
RabbitMQ backend
----------------
- heartbeat=60, blocked_connection_timeout=300. Catches silently
dead connections (broker restart, orphaned channels, network
partitions) within ~2 heartbeat windows, so the consumer
reconnects and re-binds its queue rather than sitting forever
on a zombie connection.
Tests
-----
- Full test refresh: unit, integration, contract, provenance.
- Dropped user-field assertions and constructor kwargs across
~100 test files.
- Renamed user-collection isolation tests to workspace-collection.
2026-04-21 23:23:01 +01:00
|
|
|
),
|
2025-04-22 20:21:38 +01:00
|
|
|
)
|
|
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
async def handle_getvalues(self, v, workspace):
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
vals = await self.table_store.get_values(workspace, v.type)
|
2025-05-07 12:58:32 +01:00
|
|
|
|
|
|
|
|
values = map(
|
|
|
|
|
lambda x: ConfigValue(
|
|
|
|
|
type = v.type, key = x[0], value = x[1]
|
|
|
|
|
),
|
2025-05-08 00:41:45 +01:00
|
|
|
vals
|
2025-05-07 12:58:32 +01:00
|
|
|
)
|
2025-04-22 20:21:38 +01:00
|
|
|
|
|
|
|
|
return ConfigResponse(
|
2025-05-07 12:58:32 +01:00
|
|
|
version = await self.get_version(),
|
2025-05-08 00:41:45 +01:00
|
|
|
values = list(values),
|
2025-04-22 20:21:38 +01:00
|
|
|
)
|
|
|
|
|
|
feat: workspace-based multi-tenancy, replacing user as tenancy axis (#840)
Introduces `workspace` as the isolation boundary for config, flows,
library, and knowledge data. Removes `user` as a schema-level field
throughout the code, API specs, and tests; workspace provides the
same separation more cleanly at the trusted flow.workspace layer
rather than through client-supplied message fields.
Design
------
- IAM tech spec (docs/tech-specs/iam.md) documents current state,
proposed auth/access model, and migration direction.
- Data ownership model (docs/tech-specs/data-ownership-model.md)
captures the workspace/collection/flow hierarchy.
Schema + messaging
------------------
- Drop `user` field from AgentRequest/Step, GraphRagQuery,
DocumentRagQuery, Triples/Graph/Document/Row EmbeddingsRequest,
Sparql/Rows/Structured QueryRequest, ToolServiceRequest.
- Keep collection/workspace routing via flow.workspace at the
service layer.
- Translators updated to not serialise/deserialise user.
API specs
---------
- OpenAPI schemas and path examples cleaned of user fields.
- Websocket async-api messages updated.
- Removed the unused parameters/User.yaml.
Services + base
---------------
- Librarian, collection manager, knowledge, config: all operations
scoped by workspace. Config client API takes workspace as first
positional arg.
- `flow.workspace` set at flow start time by the infrastructure;
no longer pass-through from clients.
- Tool service drops user-personalisation passthrough.
CLI + SDK
---------
- tg-init-workspace and workspace-aware import/export.
- All tg-* commands drop user args; accept --workspace.
- Python API/SDK (flow, socket_client, async_*, explainability,
library) drop user kwargs from every method signature.
MCP server
----------
- All tool endpoints drop user parameters; socket_manager no longer
keyed per user.
Flow service
------------
- Closure-based topic cleanup on flow stop: only delete topics
whose blueprint template was parameterised AND no remaining
live flow (across all workspaces) still resolves to that topic.
Three scopes fall out naturally from template analysis:
* {id} -> per-flow, deleted on stop
* {blueprint} -> per-blueprint, kept while any flow of the
same blueprint exists
* {workspace} -> per-workspace, kept while any flow in the
workspace exists
* literal -> global, never deleted (e.g. tg.request.librarian)
Fixes a bug where stopping a flow silently destroyed the global
librarian exchange, wedging all library operations until manual
restart.
RabbitMQ backend
----------------
- heartbeat=60, blocked_connection_timeout=300. Catches silently
dead connections (broker restart, orphaned channels, network
partitions) within ~2 heartbeat windows, so the consumer
reconnects and re-binds its queue rather than sitting forever
on a zombie connection.
Tests
-----
- Full test refresh: unit, integration, contract, provenance.
- Dropped user-field assertions and constructor kwargs across
~100 test files.
- Renamed user-collection isolation tests to workspace-collection.
2026-04-21 23:23:01 +01:00
|
|
|
async def handle_getvalues_all_ws(self, v):
|
|
|
|
|
"""Fetch all values of a given type across all workspaces.
|
|
|
|
|
Used by shared processors to load type-scoped config at
|
|
|
|
|
startup without enumerating workspaces separately."""
|
|
|
|
|
|
|
|
|
|
vals = await self.table_store.get_values_all_ws(v.type)
|
|
|
|
|
|
|
|
|
|
values = [
|
|
|
|
|
ConfigValue(
|
|
|
|
|
workspace = row[0],
|
|
|
|
|
type = v.type,
|
|
|
|
|
key = row[1],
|
|
|
|
|
value = row[2],
|
|
|
|
|
)
|
|
|
|
|
for row in vals
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
return ConfigResponse(
|
|
|
|
|
version = await self.get_version(),
|
|
|
|
|
values = values,
|
|
|
|
|
)
|
|
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
async def handle_delete(self, v, workspace):
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Config push notify pattern: replace stateful pub/sub with signal+ fetch (#760)
Replace the config push mechanism that broadcast the full config
blob on a 'state' class pub/sub queue with a lightweight notify
signal containing only the version number and affected config
types. Processors fetch the full config via request/response from
the config service when notified.
This eliminates the need for the pub/sub 'state' queue class and
stateful pub/sub services entirely. The config push queue moves
from 'state' to 'flow' class — a simple transient signal rather
than a retained message. This solves the RabbitMQ
late-subscriber problem where restarting processes never received
the current config because their fresh queue had no historical
messages.
Key changes:
- ConfigPush schema: config dict replaced with types list
- Subscribe-then-fetch startup with retry: processors subscribe
to notify queue, fetch config via request/response, then
process buffered notifies with version comparison to avoid race
conditions
- register_config_handler() accepts optional types parameter so
handlers only fire when their config types change
- Short-lived config request/response clients to avoid subscriber
contention on non-persistent response topics
- Config service passes affected types through put/delete/flow
operations
- Gateway ConfigReceiver rewritten with same notify pattern and
retry loop
Tests updated
New tests:
- register_config_handler: without types, with types, multiple
types, multiple handlers
- on_config_notify: old/same version skipped, irrelevant types
skipped (version still updated), relevant type triggers fetch,
handler without types always called, mixed handler filtering,
empty types invokes all, fetch failure handled gracefully
- fetch_config: returns config+version, raises on error response,
stops client even on exception
- fetch_and_apply_config: applies to all handlers on startup,
retries on failure
2026-04-06 16:57:27 +01:00
|
|
|
types = list(set(k.type for k in v.keys))
|
2025-04-22 20:21:38 +01:00
|
|
|
|
|
|
|
|
for k in v.keys:
|
feat: workspace-based multi-tenancy, replacing user as tenancy axis (#840)
Introduces `workspace` as the isolation boundary for config, flows,
library, and knowledge data. Removes `user` as a schema-level field
throughout the code, API specs, and tests; workspace provides the
same separation more cleanly at the trusted flow.workspace layer
rather than through client-supplied message fields.
Design
------
- IAM tech spec (docs/tech-specs/iam.md) documents current state,
proposed auth/access model, and migration direction.
- Data ownership model (docs/tech-specs/data-ownership-model.md)
captures the workspace/collection/flow hierarchy.
Schema + messaging
------------------
- Drop `user` field from AgentRequest/Step, GraphRagQuery,
DocumentRagQuery, Triples/Graph/Document/Row EmbeddingsRequest,
Sparql/Rows/Structured QueryRequest, ToolServiceRequest.
- Keep collection/workspace routing via flow.workspace at the
service layer.
- Translators updated to not serialise/deserialise user.
API specs
---------
- OpenAPI schemas and path examples cleaned of user fields.
- Websocket async-api messages updated.
- Removed the unused parameters/User.yaml.
Services + base
---------------
- Librarian, collection manager, knowledge, config: all operations
scoped by workspace. Config client API takes workspace as first
positional arg.
- `flow.workspace` set at flow start time by the infrastructure;
no longer pass-through from clients.
- Tool service drops user-personalisation passthrough.
CLI + SDK
---------
- tg-init-workspace and workspace-aware import/export.
- All tg-* commands drop user args; accept --workspace.
- Python API/SDK (flow, socket_client, async_*, explainability,
library) drop user kwargs from every method signature.
MCP server
----------
- All tool endpoints drop user parameters; socket_manager no longer
keyed per user.
Flow service
------------
- Closure-based topic cleanup on flow stop: only delete topics
whose blueprint template was parameterised AND no remaining
live flow (across all workspaces) still resolves to that topic.
Three scopes fall out naturally from template analysis:
* {id} -> per-flow, deleted on stop
* {blueprint} -> per-blueprint, kept while any flow of the
same blueprint exists
* {workspace} -> per-workspace, kept while any flow in the
workspace exists
* literal -> global, never deleted (e.g. tg.request.librarian)
Fixes a bug where stopping a flow silently destroyed the global
librarian exchange, wedging all library operations until manual
restart.
RabbitMQ backend
----------------
- heartbeat=60, blocked_connection_timeout=300. Catches silently
dead connections (broker restart, orphaned channels, network
partitions) within ~2 heartbeat windows, so the consumer
reconnects and re-binds its queue rather than sitting forever
on a zombie connection.
Tests
-----
- Full test refresh: unit, integration, contract, provenance.
- Dropped user-field assertions and constructor kwargs across
~100 test files.
- Renamed user-collection isolation tests to workspace-collection.
2026-04-21 23:23:01 +01:00
|
|
|
await self.table_store.delete_key(workspace, k.type, k.key)
|
2025-05-07 12:58:32 +01:00
|
|
|
|
|
|
|
|
await self.inc_version()
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
workspace_changes = None
|
|
|
|
|
if workspace == WORKSPACES_NAMESPACE and WORKSPACE_TYPE in types:
|
|
|
|
|
deleted = [k.key for k in v.keys if k.type == WORKSPACE_TYPE]
|
|
|
|
|
if deleted:
|
|
|
|
|
workspace_changes = WorkspaceChanges(deleted=deleted)
|
|
|
|
|
|
|
|
|
|
await self.push(
|
|
|
|
|
changes={t: [workspace] for t in types},
|
|
|
|
|
workspace_changes=workspace_changes,
|
|
|
|
|
)
|
2025-04-22 20:21:38 +01:00
|
|
|
|
|
|
|
|
return ConfigResponse(
|
|
|
|
|
)
|
|
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
async def handle_put(self, v, workspace):
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Config push notify pattern: replace stateful pub/sub with signal+ fetch (#760)
Replace the config push mechanism that broadcast the full config
blob on a 'state' class pub/sub queue with a lightweight notify
signal containing only the version number and affected config
types. Processors fetch the full config via request/response from
the config service when notified.
This eliminates the need for the pub/sub 'state' queue class and
stateful pub/sub services entirely. The config push queue moves
from 'state' to 'flow' class — a simple transient signal rather
than a retained message. This solves the RabbitMQ
late-subscriber problem where restarting processes never received
the current config because their fresh queue had no historical
messages.
Key changes:
- ConfigPush schema: config dict replaced with types list
- Subscribe-then-fetch startup with retry: processors subscribe
to notify queue, fetch config via request/response, then
process buffered notifies with version comparison to avoid race
conditions
- register_config_handler() accepts optional types parameter so
handlers only fire when their config types change
- Short-lived config request/response clients to avoid subscriber
contention on non-persistent response topics
- Config service passes affected types through put/delete/flow
operations
- Gateway ConfigReceiver rewritten with same notify pattern and
retry loop
Tests updated
New tests:
- register_config_handler: without types, with types, multiple
types, multiple handlers
- on_config_notify: old/same version skipped, irrelevant types
skipped (version still updated), relevant type triggers fetch,
handler without types always called, mixed handler filtering,
empty types invokes all, fetch failure handled gracefully
- fetch_config: returns config+version, raises on error response,
stops client even on exception
- fetch_and_apply_config: applies to all handlers on startup,
retries on failure
2026-04-06 16:57:27 +01:00
|
|
|
types = list(set(k.type for k in v.values))
|
|
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
for k in v.values:
|
feat: workspace-based multi-tenancy, replacing user as tenancy axis (#840)
Introduces `workspace` as the isolation boundary for config, flows,
library, and knowledge data. Removes `user` as a schema-level field
throughout the code, API specs, and tests; workspace provides the
same separation more cleanly at the trusted flow.workspace layer
rather than through client-supplied message fields.
Design
------
- IAM tech spec (docs/tech-specs/iam.md) documents current state,
proposed auth/access model, and migration direction.
- Data ownership model (docs/tech-specs/data-ownership-model.md)
captures the workspace/collection/flow hierarchy.
Schema + messaging
------------------
- Drop `user` field from AgentRequest/Step, GraphRagQuery,
DocumentRagQuery, Triples/Graph/Document/Row EmbeddingsRequest,
Sparql/Rows/Structured QueryRequest, ToolServiceRequest.
- Keep collection/workspace routing via flow.workspace at the
service layer.
- Translators updated to not serialise/deserialise user.
API specs
---------
- OpenAPI schemas and path examples cleaned of user fields.
- Websocket async-api messages updated.
- Removed the unused parameters/User.yaml.
Services + base
---------------
- Librarian, collection manager, knowledge, config: all operations
scoped by workspace. Config client API takes workspace as first
positional arg.
- `flow.workspace` set at flow start time by the infrastructure;
no longer pass-through from clients.
- Tool service drops user-personalisation passthrough.
CLI + SDK
---------
- tg-init-workspace and workspace-aware import/export.
- All tg-* commands drop user args; accept --workspace.
- Python API/SDK (flow, socket_client, async_*, explainability,
library) drop user kwargs from every method signature.
MCP server
----------
- All tool endpoints drop user parameters; socket_manager no longer
keyed per user.
Flow service
------------
- Closure-based topic cleanup on flow stop: only delete topics
whose blueprint template was parameterised AND no remaining
live flow (across all workspaces) still resolves to that topic.
Three scopes fall out naturally from template analysis:
* {id} -> per-flow, deleted on stop
* {blueprint} -> per-blueprint, kept while any flow of the
same blueprint exists
* {workspace} -> per-workspace, kept while any flow in the
workspace exists
* literal -> global, never deleted (e.g. tg.request.librarian)
Fixes a bug where stopping a flow silently destroyed the global
librarian exchange, wedging all library operations until manual
restart.
RabbitMQ backend
----------------
- heartbeat=60, blocked_connection_timeout=300. Catches silently
dead connections (broker restart, orphaned channels, network
partitions) within ~2 heartbeat windows, so the consumer
reconnects and re-binds its queue rather than sitting forever
on a zombie connection.
Tests
-----
- Full test refresh: unit, integration, contract, provenance.
- Dropped user-field assertions and constructor kwargs across
~100 test files.
- Renamed user-collection isolation tests to workspace-collection.
2026-04-21 23:23:01 +01:00
|
|
|
await self.table_store.put_config(
|
|
|
|
|
workspace, k.type, k.key, k.value
|
|
|
|
|
)
|
2025-05-07 12:58:32 +01:00
|
|
|
|
|
|
|
|
await self.inc_version()
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
workspace_changes = None
|
|
|
|
|
if workspace == WORKSPACES_NAMESPACE and WORKSPACE_TYPE in types:
|
|
|
|
|
created = [k.key for k in v.values if k.type == WORKSPACE_TYPE]
|
|
|
|
|
if created:
|
|
|
|
|
workspace_changes = WorkspaceChanges(created=created)
|
|
|
|
|
|
|
|
|
|
await self.push(
|
|
|
|
|
changes={t: [workspace] for t in types},
|
|
|
|
|
workspace_changes=workspace_changes,
|
|
|
|
|
)
|
2025-04-22 20:21:38 +01:00
|
|
|
|
|
|
|
|
return ConfigResponse(
|
|
|
|
|
)
|
|
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
async def provision_from_template(self, workspace):
|
|
|
|
|
"""Copy all config from __template__ into a new workspace,
|
|
|
|
|
skipping keys that already exist (upsert-missing)."""
|
|
|
|
|
|
|
|
|
|
template = await self.get_config(TEMPLATE_WORKSPACE)
|
|
|
|
|
|
|
|
|
|
if not template:
|
|
|
|
|
logger.info(
|
|
|
|
|
f"No template config to provision for {workspace}"
|
|
|
|
|
)
|
|
|
|
|
return 0
|
|
|
|
|
|
|
|
|
|
existing_types = await self.get_config(workspace)
|
|
|
|
|
|
|
|
|
|
written = 0
|
|
|
|
|
for type_name, entries in template.items():
|
|
|
|
|
existing_keys = set(existing_types.get(type_name, {}).keys())
|
|
|
|
|
for key, value in entries.items():
|
|
|
|
|
if key not in existing_keys:
|
|
|
|
|
await self.table_store.put_config(
|
|
|
|
|
workspace, type_name, key, value
|
|
|
|
|
)
|
|
|
|
|
written += 1
|
|
|
|
|
|
|
|
|
|
if written > 0:
|
|
|
|
|
await self.inc_version()
|
|
|
|
|
|
|
|
|
|
return written
|
|
|
|
|
|
feat: workspace-based multi-tenancy, replacing user as tenancy axis (#840)
Introduces `workspace` as the isolation boundary for config, flows,
library, and knowledge data. Removes `user` as a schema-level field
throughout the code, API specs, and tests; workspace provides the
same separation more cleanly at the trusted flow.workspace layer
rather than through client-supplied message fields.
Design
------
- IAM tech spec (docs/tech-specs/iam.md) documents current state,
proposed auth/access model, and migration direction.
- Data ownership model (docs/tech-specs/data-ownership-model.md)
captures the workspace/collection/flow hierarchy.
Schema + messaging
------------------
- Drop `user` field from AgentRequest/Step, GraphRagQuery,
DocumentRagQuery, Triples/Graph/Document/Row EmbeddingsRequest,
Sparql/Rows/Structured QueryRequest, ToolServiceRequest.
- Keep collection/workspace routing via flow.workspace at the
service layer.
- Translators updated to not serialise/deserialise user.
API specs
---------
- OpenAPI schemas and path examples cleaned of user fields.
- Websocket async-api messages updated.
- Removed the unused parameters/User.yaml.
Services + base
---------------
- Librarian, collection manager, knowledge, config: all operations
scoped by workspace. Config client API takes workspace as first
positional arg.
- `flow.workspace` set at flow start time by the infrastructure;
no longer pass-through from clients.
- Tool service drops user-personalisation passthrough.
CLI + SDK
---------
- tg-init-workspace and workspace-aware import/export.
- All tg-* commands drop user args; accept --workspace.
- Python API/SDK (flow, socket_client, async_*, explainability,
library) drop user kwargs from every method signature.
MCP server
----------
- All tool endpoints drop user parameters; socket_manager no longer
keyed per user.
Flow service
------------
- Closure-based topic cleanup on flow stop: only delete topics
whose blueprint template was parameterised AND no remaining
live flow (across all workspaces) still resolves to that topic.
Three scopes fall out naturally from template analysis:
* {id} -> per-flow, deleted on stop
* {blueprint} -> per-blueprint, kept while any flow of the
same blueprint exists
* {workspace} -> per-workspace, kept while any flow in the
workspace exists
* literal -> global, never deleted (e.g. tg.request.librarian)
Fixes a bug where stopping a flow silently destroyed the global
librarian exchange, wedging all library operations until manual
restart.
RabbitMQ backend
----------------
- heartbeat=60, blocked_connection_timeout=300. Catches silently
dead connections (broker restart, orphaned channels, network
partitions) within ~2 heartbeat windows, so the consumer
reconnects and re-binds its queue rather than sitting forever
on a zombie connection.
Tests
-----
- Full test refresh: unit, integration, contract, provenance.
- Dropped user-field assertions and constructor kwargs across
~100 test files.
- Renamed user-collection isolation tests to workspace-collection.
2026-04-21 23:23:01 +01:00
|
|
|
async def get_config(self, workspace):
|
2025-05-07 12:58:32 +01:00
|
|
|
|
feat: workspace-based multi-tenancy, replacing user as tenancy axis (#840)
Introduces `workspace` as the isolation boundary for config, flows,
library, and knowledge data. Removes `user` as a schema-level field
throughout the code, API specs, and tests; workspace provides the
same separation more cleanly at the trusted flow.workspace layer
rather than through client-supplied message fields.
Design
------
- IAM tech spec (docs/tech-specs/iam.md) documents current state,
proposed auth/access model, and migration direction.
- Data ownership model (docs/tech-specs/data-ownership-model.md)
captures the workspace/collection/flow hierarchy.
Schema + messaging
------------------
- Drop `user` field from AgentRequest/Step, GraphRagQuery,
DocumentRagQuery, Triples/Graph/Document/Row EmbeddingsRequest,
Sparql/Rows/Structured QueryRequest, ToolServiceRequest.
- Keep collection/workspace routing via flow.workspace at the
service layer.
- Translators updated to not serialise/deserialise user.
API specs
---------
- OpenAPI schemas and path examples cleaned of user fields.
- Websocket async-api messages updated.
- Removed the unused parameters/User.yaml.
Services + base
---------------
- Librarian, collection manager, knowledge, config: all operations
scoped by workspace. Config client API takes workspace as first
positional arg.
- `flow.workspace` set at flow start time by the infrastructure;
no longer pass-through from clients.
- Tool service drops user-personalisation passthrough.
CLI + SDK
---------
- tg-init-workspace and workspace-aware import/export.
- All tg-* commands drop user args; accept --workspace.
- Python API/SDK (flow, socket_client, async_*, explainability,
library) drop user kwargs from every method signature.
MCP server
----------
- All tool endpoints drop user parameters; socket_manager no longer
keyed per user.
Flow service
------------
- Closure-based topic cleanup on flow stop: only delete topics
whose blueprint template was parameterised AND no remaining
live flow (across all workspaces) still resolves to that topic.
Three scopes fall out naturally from template analysis:
* {id} -> per-flow, deleted on stop
* {blueprint} -> per-blueprint, kept while any flow of the
same blueprint exists
* {workspace} -> per-workspace, kept while any flow in the
workspace exists
* literal -> global, never deleted (e.g. tg.request.librarian)
Fixes a bug where stopping a flow silently destroyed the global
librarian exchange, wedging all library operations until manual
restart.
RabbitMQ backend
----------------
- heartbeat=60, blocked_connection_timeout=300. Catches silently
dead connections (broker restart, orphaned channels, network
partitions) within ~2 heartbeat windows, so the consumer
reconnects and re-binds its queue rather than sitting forever
on a zombie connection.
Tests
-----
- Full test refresh: unit, integration, contract, provenance.
- Dropped user-field assertions and constructor kwargs across
~100 test files.
- Renamed user-collection isolation tests to workspace-collection.
2026-04-21 23:23:01 +01:00
|
|
|
table = await self.table_store.get_all_for_workspace(workspace)
|
2025-05-07 12:58:32 +01:00
|
|
|
|
|
|
|
|
config = {}
|
|
|
|
|
|
|
|
|
|
for row in table:
|
|
|
|
|
if row[0] not in config:
|
|
|
|
|
config[row[0]] = {}
|
|
|
|
|
config[row[0]][row[1]] = row[2]
|
|
|
|
|
|
|
|
|
|
return config
|
|
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
async def handle_config(self, v, workspace):
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
config = await self.get_config(workspace)
|
2025-05-07 12:58:32 +01:00
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
return ConfigResponse(
|
2025-05-07 12:58:32 +01:00
|
|
|
version = await self.get_version(),
|
|
|
|
|
config = config,
|
2025-04-22 20:21:38 +01:00
|
|
|
)
|
|
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
async def handle_workspace(self, msg, workspace):
|
|
|
|
|
"""Handle workspace-scoped config operations.
|
|
|
|
|
Workspace is provided by queue infrastructure."""
|
2025-04-22 20:21:38 +01:00
|
|
|
|
feat: workspace-based multi-tenancy, replacing user as tenancy axis (#840)
Introduces `workspace` as the isolation boundary for config, flows,
library, and knowledge data. Removes `user` as a schema-level field
throughout the code, API specs, and tests; workspace provides the
same separation more cleanly at the trusted flow.workspace layer
rather than through client-supplied message fields.
Design
------
- IAM tech spec (docs/tech-specs/iam.md) documents current state,
proposed auth/access model, and migration direction.
- Data ownership model (docs/tech-specs/data-ownership-model.md)
captures the workspace/collection/flow hierarchy.
Schema + messaging
------------------
- Drop `user` field from AgentRequest/Step, GraphRagQuery,
DocumentRagQuery, Triples/Graph/Document/Row EmbeddingsRequest,
Sparql/Rows/Structured QueryRequest, ToolServiceRequest.
- Keep collection/workspace routing via flow.workspace at the
service layer.
- Translators updated to not serialise/deserialise user.
API specs
---------
- OpenAPI schemas and path examples cleaned of user fields.
- Websocket async-api messages updated.
- Removed the unused parameters/User.yaml.
Services + base
---------------
- Librarian, collection manager, knowledge, config: all operations
scoped by workspace. Config client API takes workspace as first
positional arg.
- `flow.workspace` set at flow start time by the infrastructure;
no longer pass-through from clients.
- Tool service drops user-personalisation passthrough.
CLI + SDK
---------
- tg-init-workspace and workspace-aware import/export.
- All tg-* commands drop user args; accept --workspace.
- Python API/SDK (flow, socket_client, async_*, explainability,
library) drop user kwargs from every method signature.
MCP server
----------
- All tool endpoints drop user parameters; socket_manager no longer
keyed per user.
Flow service
------------
- Closure-based topic cleanup on flow stop: only delete topics
whose blueprint template was parameterised AND no remaining
live flow (across all workspaces) still resolves to that topic.
Three scopes fall out naturally from template analysis:
* {id} -> per-flow, deleted on stop
* {blueprint} -> per-blueprint, kept while any flow of the
same blueprint exists
* {workspace} -> per-workspace, kept while any flow in the
workspace exists
* literal -> global, never deleted (e.g. tg.request.librarian)
Fixes a bug where stopping a flow silently destroyed the global
librarian exchange, wedging all library operations until manual
restart.
RabbitMQ backend
----------------
- heartbeat=60, blocked_connection_timeout=300. Catches silently
dead connections (broker restart, orphaned channels, network
partitions) within ~2 heartbeat windows, so the consumer
reconnects and re-binds its queue rather than sitting forever
on a zombie connection.
Tests
-----
- Full test refresh: unit, integration, contract, provenance.
- Dropped user-field assertions and constructor kwargs across
~100 test files.
- Renamed user-collection isolation tests to workspace-collection.
2026-04-21 23:23:01 +01:00
|
|
|
logger.debug(
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
f"Handling workspace config message: {msg.operation} "
|
|
|
|
|
f"workspace={workspace}"
|
feat: workspace-based multi-tenancy, replacing user as tenancy axis (#840)
Introduces `workspace` as the isolation boundary for config, flows,
library, and knowledge data. Removes `user` as a schema-level field
throughout the code, API specs, and tests; workspace provides the
same separation more cleanly at the trusted flow.workspace layer
rather than through client-supplied message fields.
Design
------
- IAM tech spec (docs/tech-specs/iam.md) documents current state,
proposed auth/access model, and migration direction.
- Data ownership model (docs/tech-specs/data-ownership-model.md)
captures the workspace/collection/flow hierarchy.
Schema + messaging
------------------
- Drop `user` field from AgentRequest/Step, GraphRagQuery,
DocumentRagQuery, Triples/Graph/Document/Row EmbeddingsRequest,
Sparql/Rows/Structured QueryRequest, ToolServiceRequest.
- Keep collection/workspace routing via flow.workspace at the
service layer.
- Translators updated to not serialise/deserialise user.
API specs
---------
- OpenAPI schemas and path examples cleaned of user fields.
- Websocket async-api messages updated.
- Removed the unused parameters/User.yaml.
Services + base
---------------
- Librarian, collection manager, knowledge, config: all operations
scoped by workspace. Config client API takes workspace as first
positional arg.
- `flow.workspace` set at flow start time by the infrastructure;
no longer pass-through from clients.
- Tool service drops user-personalisation passthrough.
CLI + SDK
---------
- tg-init-workspace and workspace-aware import/export.
- All tg-* commands drop user args; accept --workspace.
- Python API/SDK (flow, socket_client, async_*, explainability,
library) drop user kwargs from every method signature.
MCP server
----------
- All tool endpoints drop user parameters; socket_manager no longer
keyed per user.
Flow service
------------
- Closure-based topic cleanup on flow stop: only delete topics
whose blueprint template was parameterised AND no remaining
live flow (across all workspaces) still resolves to that topic.
Three scopes fall out naturally from template analysis:
* {id} -> per-flow, deleted on stop
* {blueprint} -> per-blueprint, kept while any flow of the
same blueprint exists
* {workspace} -> per-workspace, kept while any flow in the
workspace exists
* literal -> global, never deleted (e.g. tg.request.librarian)
Fixes a bug where stopping a flow silently destroyed the global
librarian exchange, wedging all library operations until manual
restart.
RabbitMQ backend
----------------
- heartbeat=60, blocked_connection_timeout=300. Catches silently
dead connections (broker restart, orphaned channels, network
partitions) within ~2 heartbeat windows, so the consumer
reconnects and re-binds its queue rather than sitting forever
on a zombie connection.
Tests
-----
- Full test refresh: unit, integration, contract, provenance.
- Dropped user-field assertions and constructor kwargs across
~100 test files.
- Renamed user-collection isolation tests to workspace-collection.
2026-04-21 23:23:01 +01:00
|
|
|
)
|
|
|
|
|
|
2025-04-22 20:21:38 +01:00
|
|
|
if msg.operation == "get":
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
resp = await self.handle_get(msg, workspace)
|
2025-04-22 20:21:38 +01:00
|
|
|
|
|
|
|
|
elif msg.operation == "list":
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
resp = await self.handle_list(msg, workspace)
|
2025-04-22 20:21:38 +01:00
|
|
|
|
|
|
|
|
elif msg.operation == "getvalues":
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
resp = await self.handle_getvalues(msg, workspace)
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
elif msg.operation == "delete":
|
|
|
|
|
resp = await self.handle_delete(msg, workspace)
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
elif msg.operation == "put":
|
|
|
|
|
resp = await self.handle_put(msg, workspace)
|
feat: workspace-based multi-tenancy, replacing user as tenancy axis (#840)
Introduces `workspace` as the isolation boundary for config, flows,
library, and knowledge data. Removes `user` as a schema-level field
throughout the code, API specs, and tests; workspace provides the
same separation more cleanly at the trusted flow.workspace layer
rather than through client-supplied message fields.
Design
------
- IAM tech spec (docs/tech-specs/iam.md) documents current state,
proposed auth/access model, and migration direction.
- Data ownership model (docs/tech-specs/data-ownership-model.md)
captures the workspace/collection/flow hierarchy.
Schema + messaging
------------------
- Drop `user` field from AgentRequest/Step, GraphRagQuery,
DocumentRagQuery, Triples/Graph/Document/Row EmbeddingsRequest,
Sparql/Rows/Structured QueryRequest, ToolServiceRequest.
- Keep collection/workspace routing via flow.workspace at the
service layer.
- Translators updated to not serialise/deserialise user.
API specs
---------
- OpenAPI schemas and path examples cleaned of user fields.
- Websocket async-api messages updated.
- Removed the unused parameters/User.yaml.
Services + base
---------------
- Librarian, collection manager, knowledge, config: all operations
scoped by workspace. Config client API takes workspace as first
positional arg.
- `flow.workspace` set at flow start time by the infrastructure;
no longer pass-through from clients.
- Tool service drops user-personalisation passthrough.
CLI + SDK
---------
- tg-init-workspace and workspace-aware import/export.
- All tg-* commands drop user args; accept --workspace.
- Python API/SDK (flow, socket_client, async_*, explainability,
library) drop user kwargs from every method signature.
MCP server
----------
- All tool endpoints drop user parameters; socket_manager no longer
keyed per user.
Flow service
------------
- Closure-based topic cleanup on flow stop: only delete topics
whose blueprint template was parameterised AND no remaining
live flow (across all workspaces) still resolves to that topic.
Three scopes fall out naturally from template analysis:
* {id} -> per-flow, deleted on stop
* {blueprint} -> per-blueprint, kept while any flow of the
same blueprint exists
* {workspace} -> per-workspace, kept while any flow in the
workspace exists
* literal -> global, never deleted (e.g. tg.request.librarian)
Fixes a bug where stopping a flow silently destroyed the global
librarian exchange, wedging all library operations until manual
restart.
RabbitMQ backend
----------------
- heartbeat=60, blocked_connection_timeout=300. Catches silently
dead connections (broker restart, orphaned channels, network
partitions) within ~2 heartbeat windows, so the consumer
reconnects and re-binds its queue rather than sitting forever
on a zombie connection.
Tests
-----
- Full test refresh: unit, integration, contract, provenance.
- Dropped user-field assertions and constructor kwargs across
~100 test files.
- Renamed user-collection isolation tests to workspace-collection.
2026-04-21 23:23:01 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
elif msg.operation == "config":
|
|
|
|
|
resp = await self.handle_config(msg, workspace)
|
feat: workspace-based multi-tenancy, replacing user as tenancy axis (#840)
Introduces `workspace` as the isolation boundary for config, flows,
library, and knowledge data. Removes `user` as a schema-level field
throughout the code, API specs, and tests; workspace provides the
same separation more cleanly at the trusted flow.workspace layer
rather than through client-supplied message fields.
Design
------
- IAM tech spec (docs/tech-specs/iam.md) documents current state,
proposed auth/access model, and migration direction.
- Data ownership model (docs/tech-specs/data-ownership-model.md)
captures the workspace/collection/flow hierarchy.
Schema + messaging
------------------
- Drop `user` field from AgentRequest/Step, GraphRagQuery,
DocumentRagQuery, Triples/Graph/Document/Row EmbeddingsRequest,
Sparql/Rows/Structured QueryRequest, ToolServiceRequest.
- Keep collection/workspace routing via flow.workspace at the
service layer.
- Translators updated to not serialise/deserialise user.
API specs
---------
- OpenAPI schemas and path examples cleaned of user fields.
- Websocket async-api messages updated.
- Removed the unused parameters/User.yaml.
Services + base
---------------
- Librarian, collection manager, knowledge, config: all operations
scoped by workspace. Config client API takes workspace as first
positional arg.
- `flow.workspace` set at flow start time by the infrastructure;
no longer pass-through from clients.
- Tool service drops user-personalisation passthrough.
CLI + SDK
---------
- tg-init-workspace and workspace-aware import/export.
- All tg-* commands drop user args; accept --workspace.
- Python API/SDK (flow, socket_client, async_*, explainability,
library) drop user kwargs from every method signature.
MCP server
----------
- All tool endpoints drop user parameters; socket_manager no longer
keyed per user.
Flow service
------------
- Closure-based topic cleanup on flow stop: only delete topics
whose blueprint template was parameterised AND no remaining
live flow (across all workspaces) still resolves to that topic.
Three scopes fall out naturally from template analysis:
* {id} -> per-flow, deleted on stop
* {blueprint} -> per-blueprint, kept while any flow of the
same blueprint exists
* {workspace} -> per-workspace, kept while any flow in the
workspace exists
* literal -> global, never deleted (e.g. tg.request.librarian)
Fixes a bug where stopping a flow silently destroyed the global
librarian exchange, wedging all library operations until manual
restart.
RabbitMQ backend
----------------
- heartbeat=60, blocked_connection_timeout=300. Catches silently
dead connections (broker restart, orphaned channels, network
partitions) within ~2 heartbeat windows, so the consumer
reconnects and re-binds its queue rather than sitting forever
on a zombie connection.
Tests
-----
- Full test refresh: unit, integration, contract, provenance.
- Dropped user-field assertions and constructor kwargs across
~100 test files.
- Renamed user-collection isolation tests to workspace-collection.
2026-04-21 23:23:01 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
else:
|
|
|
|
|
resp = ConfigResponse(
|
|
|
|
|
error=Error(
|
|
|
|
|
type = "bad-operation",
|
|
|
|
|
message = "Bad operation"
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
return resp
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
async def handle_system(self, msg):
|
|
|
|
|
"""Handle system-level config operations.
|
|
|
|
|
Workspace, when needed, comes from message body."""
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
logger.debug(
|
|
|
|
|
f"Handling system config message: {msg.operation} "
|
|
|
|
|
f"workspace={msg.workspace}"
|
|
|
|
|
)
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
if msg.operation == "getvalues-all-ws":
|
|
|
|
|
resp = await self.handle_getvalues_all_ws(msg)
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
elif msg.operation in ("get", "list", "getvalues", "delete",
|
|
|
|
|
"put", "config"):
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
if not msg.workspace:
|
|
|
|
|
return ConfigResponse(
|
|
|
|
|
error=Error(
|
|
|
|
|
type = "bad-request",
|
|
|
|
|
message = "Workspace is required"
|
|
|
|
|
)
|
|
|
|
|
)
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
handler = {
|
|
|
|
|
"get": self.handle_get,
|
|
|
|
|
"list": self.handle_list,
|
|
|
|
|
"getvalues": self.handle_getvalues,
|
|
|
|
|
"delete": self.handle_delete,
|
|
|
|
|
"put": self.handle_put,
|
|
|
|
|
"config": self.handle_config,
|
|
|
|
|
}[msg.operation]
|
2025-04-22 20:21:38 +01:00
|
|
|
|
Per-workspace queue routing for workspace-scoped services (#862)
Workspace identity is now determined by queue infrastructure instead of
message body fields, closing a privilege-escalation vector where a caller
could spoof workspace in the request payload.
- Add WorkspaceProcessor base class: discovers workspaces from config at
startup, creates per-workspace consumers (queue:workspace), and manages
consumer lifecycle on workspace create/delete events
- Roll out to librarian, flow-svc, knowledge cores, and config-svc
- Config service gets a dual-queue regime: a system queue for
cross-workspace ops (getvalues-all-ws, bootstrapper writes to
__workspaces__) and per-workspace queues for tenant-scoped ops, with
workspace discovery from its own Cassandra store
- Remove workspace field from request schemas (FlowRequest,
LibrarianRequest, KnowledgeRequest, CollectionManagementRequest) and
from DocumentMetadata / ProcessingMetadata — table stores now accept
workspace as an explicit parameter
- Strip workspace encode/decode from all message translators and gateway
serializers
- Gateway enforces workspace existence: reject requests targeting
non-existent workspaces instead of routing to queues with no consumer
- Config service provisions new workspaces from __template__ on creation
- Add workspace lifecycle hooks to AsyncProcessor so any processor can
react to workspace create/delete without subclassing WorkspaceProcessor
2026-05-04 10:30:03 +01:00
|
|
|
resp = await handler(msg, msg.workspace)
|
|
|
|
|
|
|
|
|
|
else:
|
2025-04-22 20:21:38 +01:00
|
|
|
resp = ConfigResponse(
|
|
|
|
|
error=Error(
|
|
|
|
|
type = "bad-operation",
|
|
|
|
|
message = "Bad operation"
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
return resp
|