Add multi-pattern orchestrator with plan-then-execute and supervisor (#739)

Introduce an agent orchestrator service that supports three
execution patterns (ReAct, plan-then-execute, supervisor) with
LLM-based meta-routing to select the appropriate pattern and task
type per request. Update the agent schema to support
orchestration fields (correlation, sub-agents, plan steps) and
remove legacy response fields (answer, thought, observation).
This commit is contained in:
cybermaggedon 2026-03-31 00:32:49 +01:00 committed by GitHub
parent 7af1d60db8
commit 849987f0e6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 3006 additions and 172 deletions

View file

@ -402,23 +402,6 @@ class SocketClient:
content=resp.get("content", ""),
end_of_message=resp.get("end_of_message", False)
)
# Non-streaming agent format: chunk_type is empty but has thought/observation/answer fields
elif resp.get("thought"):
return AgentThought(
content=resp.get("thought", ""),
end_of_message=resp.get("end_of_message", False)
)
elif resp.get("observation"):
return AgentObservation(
content=resp.get("observation", ""),
end_of_message=resp.get("end_of_message", False)
)
elif resp.get("answer"):
return AgentAnswer(
content=resp.get("answer", ""),
end_of_message=resp.get("end_of_message", False),
end_of_dialog=resp.get("end_of_dialog", False)
)
else:
content = resp.get("response", resp.get("chunk", resp.get("text", "")))
return RAGChunk(

View file

@ -57,8 +57,7 @@ class AgentClient(RequestResponse):
await self.request(
AgentRequest(
question = question,
plan = plan,
state = state,
state = state or "",
history = history,
),
recipient=recipient,

View file

@ -90,9 +90,6 @@ class AgentService(FlowProcessor):
type = "agent-error",
message = str(e),
),
thought = None,
observation = None,
answer = None,
end_of_message = True,
end_of_dialog = True,
),

View file

@ -16,6 +16,14 @@ class AgentRequestTranslator(MessageTranslator):
collection=data.get("collection", "default"),
streaming=data.get("streaming", False),
session_id=data.get("session_id", ""),
conversation_id=data.get("conversation_id", ""),
pattern=data.get("pattern", ""),
task_type=data.get("task_type", ""),
framing=data.get("framing", ""),
correlation_id=data.get("correlation_id", ""),
parent_session_id=data.get("parent_session_id", ""),
subagent_goal=data.get("subagent_goal", ""),
expected_siblings=data.get("expected_siblings", 0),
)
def from_pulsar(self, obj: AgentRequest) -> Dict[str, Any]:
@ -28,6 +36,14 @@ class AgentRequestTranslator(MessageTranslator):
"collection": getattr(obj, "collection", "default"),
"streaming": getattr(obj, "streaming", False),
"session_id": getattr(obj, "session_id", ""),
"conversation_id": getattr(obj, "conversation_id", ""),
"pattern": getattr(obj, "pattern", ""),
"task_type": getattr(obj, "task_type", ""),
"framing": getattr(obj, "framing", ""),
"correlation_id": getattr(obj, "correlation_id", ""),
"parent_session_id": getattr(obj, "parent_session_id", ""),
"subagent_goal": getattr(obj, "subagent_goal", ""),
"expected_siblings": getattr(obj, "expected_siblings", 0),
}
@ -40,24 +56,15 @@ class AgentResponseTranslator(MessageTranslator):
def from_pulsar(self, obj: AgentResponse) -> Dict[str, Any]:
result = {}
# Check if this is a streaming response (has chunk_type)
if hasattr(obj, 'chunk_type') and obj.chunk_type:
if obj.chunk_type:
result["chunk_type"] = obj.chunk_type
if obj.content:
result["content"] = obj.content
result["end_of_message"] = getattr(obj, "end_of_message", False)
result["end_of_dialog"] = getattr(obj, "end_of_dialog", False)
else:
# Legacy format (non-streaming)
if obj.answer:
result["answer"] = obj.answer
if obj.thought:
result["thought"] = obj.thought
if obj.observation:
result["observation"] = obj.observation
# Include completion flags for legacy format too
result["end_of_message"] = getattr(obj, "end_of_message", False)
result["end_of_dialog"] = getattr(obj, "end_of_dialog", False)
if obj.content:
result["content"] = obj.content
result["end_of_message"] = getattr(obj, "end_of_message", False)
result["end_of_dialog"] = getattr(obj, "end_of_dialog", False)
if getattr(obj, "message_id", ""):
result["message_id"] = obj.message_id
# Include explainability fields if present
explain_id = getattr(obj, "explain_id", None)
@ -76,11 +83,5 @@ class AgentResponseTranslator(MessageTranslator):
def from_response_with_completion(self, obj: AgentResponse) -> Tuple[Dict[str, Any], bool]:
"""Returns (response_dict, is_final)"""
# For streaming responses, check end_of_dialog
if hasattr(obj, 'chunk_type') and obj.chunk_type:
is_final = getattr(obj, 'end_of_dialog', False)
else:
# For legacy responses, check if answer is present
is_final = (obj.answer is not None)
is_final = getattr(obj, 'end_of_dialog', False)
return self.from_pulsar(obj), is_final

View file

@ -1,5 +1,6 @@
from dataclasses import dataclass, field
from typing import Optional
from ..core.topic import topic
from ..core.primitives import Error
@ -8,6 +9,14 @@ from ..core.primitives import Error
# Prompt services, abstract the prompt generation
@dataclass
class PlanStep:
goal: str = ""
tool_hint: str = "" # Suggested tool for this step
depends_on: list[int] = field(default_factory=list) # Indices of prerequisite steps
status: str = "pending" # pending, running, completed, failed
result: str = "" # Result of step execution
@dataclass
class AgentStep:
thought: str = ""
@ -15,6 +24,9 @@ class AgentStep:
arguments: dict[str, str] = field(default_factory=dict)
observation: str = ""
user: str = "" # User context for the step
step_type: str = "" # "react", "plan", "execute", "decompose", "synthesise"
plan: list[PlanStep] = field(default_factory=list) # Plan steps (for plan-then-execute)
subagent_results: dict[str, str] = field(default_factory=dict) # Subagent results keyed by goal
@dataclass
class AgentRequest:
@ -27,6 +39,16 @@ class AgentRequest:
streaming: bool = False # Enable streaming response delivery (default false)
session_id: str = "" # For provenance tracking across iterations
# Orchestration fields
conversation_id: str = "" # Groups related requests into a conversation
pattern: str = "" # Selected pattern: "react", "plan-then-execute", "supervisor"
task_type: str = "" # Task type from config: "general", "research", etc.
framing: str = "" # Domain framing text injected into prompts
correlation_id: str = "" # Links fan-out subagents to parent for fan-in
parent_session_id: str = "" # Session ID of the supervisor that spawned this subagent
subagent_goal: str = "" # Specific goal for a subagent (set by supervisor)
expected_siblings: int = 0 # Number of sibling subagents in this fan-out
@dataclass
class AgentResponse:
# Streaming-first design
@ -39,11 +61,10 @@ class AgentResponse:
explain_id: str | None = None # Provenance URI (announced as created)
explain_graph: str | None = None # Named graph where explain was stored
# Legacy fields (deprecated but kept for backward compatibility)
answer: str = ""
# Orchestration fields
message_id: str = "" # Unique ID for this response message
error: Error | None = None
thought: str = ""
observation: str = ""
############################################################################