Revert "feat(chat): add multi-agent mode routing scaffold and telemetry."

This reverts commit 7b9a218d62.
This commit is contained in:
CREDO23 2026-04-29 16:21:57 +02:00
parent 2eed81d059
commit dcae196eea
13 changed files with 58 additions and 742 deletions

View file

@ -1,17 +0,0 @@
"""Multi-agent v1 architecture package."""
from app.agents.multi_agent_v1.contracts import (
GroundingEvidence,
SubagentResult,
SubagentTaskPlan,
WorkerBudget,
)
from app.agents.multi_agent_v1.entrypoint import MultiAgentEntrypoint
__all__ = [
"GroundingEvidence",
"MultiAgentEntrypoint",
"SubagentResult",
"SubagentTaskPlan",
"WorkerBudget",
]

View file

@ -1,36 +0,0 @@
"""Contracts for multi_agent_v1 orchestrator and subagent communication."""
from __future__ import annotations
from typing import Literal
from pydantic import BaseModel, Field
class WorkerBudget(BaseModel):
max_steps: int = Field(default=1, ge=1)
max_duration_ms: int = Field(default=15_000, ge=100)
class SubagentTaskPlan(BaseModel):
domain: str = Field(..., min_length=1)
goal: str = Field(..., min_length=1)
constraints: list[str] = Field(default_factory=list)
budget: WorkerBudget = Field(default_factory=WorkerBudget)
class GroundingEvidence(BaseModel):
claim: str = Field(..., min_length=1)
source_type: str = Field(..., min_length=1)
source_ref: str = Field(..., min_length=1)
confidence: float = Field(default=0.0, ge=0.0, le=1.0)
snippet: str = ""
class SubagentResult(BaseModel):
status: Literal["success", "partial", "error"]
summary: str = ""
evidence: list[GroundingEvidence] = Field(default_factory=list)
artifacts: list[str] = Field(default_factory=list)
needs_human: bool = False
error_class: str | None = None

View file

@ -1,24 +0,0 @@
"""Multi-agent v1 entrypoint scaffold with safe fallback behavior."""
from __future__ import annotations
from collections.abc import AsyncGenerator, Callable
from typing import Any
class MultiAgentEntrypoint:
def stream_new_chat(
self,
*,
fallback_streamer: Callable[..., AsyncGenerator[str, None]],
fallback_kwargs: dict[str, Any],
) -> AsyncGenerator[str, None]:
return fallback_streamer(**fallback_kwargs)
def stream_resume_chat(
self,
*,
fallback_streamer: Callable[..., AsyncGenerator[str, None]],
fallback_kwargs: dict[str, Any],
) -> AsyncGenerator[str, None]:
return fallback_streamer(**fallback_kwargs)

View file

@ -1,40 +0,0 @@
"""Architecture mode contracts and resolution helpers for chat sessions."""
from __future__ import annotations
from enum import StrEnum
from app.config import config
class ArchitectureMode(StrEnum):
SINGLE_AGENT = "single_agent"
SHADOW_MULTI_AGENT_V1 = "shadow_multi_agent_v1"
MULTI_AGENT_V1 = "multi_agent_v1"
def parse_architecture_mode(value: str | None) -> ArchitectureMode | None:
if not value:
return None
normalized = value.strip().lower()
if not normalized:
return None
try:
return ArchitectureMode(normalized)
except ValueError:
return None
def resolve_architecture_mode(request_override: str | None = None) -> ArchitectureMode:
if config.FORCE_SINGLE_AGENT:
return ArchitectureMode.SINGLE_AGENT
override_mode = parse_architecture_mode(request_override)
if override_mode is not None:
return override_mode
default_mode = parse_architecture_mode(config.AGENT_ARCHITECTURE_MODE)
if default_mode is not None:
return default_mode
return ArchitectureMode.SINGLE_AGENT

View file

@ -1,43 +0,0 @@
"""Architecture telemetry logging for chat execution modes."""
from __future__ import annotations
import json
from typing import Any
from app.utils.perf import get_perf_logger
_perf_log = get_perf_logger()
def log_architecture_telemetry(
*,
phase: str,
architecture_mode: str,
orchestrator_used: bool,
worker_count: int,
retry_count: int,
latency_ms: float,
token_total: int,
request_id: str | None = None,
turn_id: str | None = None,
status: str = "ok",
source: str = "new_chat",
extra: dict[str, Any] | None = None,
) -> None:
payload: dict[str, Any] = {
"phase": phase,
"source": source,
"status": status,
"architecture_mode": architecture_mode,
"orchestrator_used": orchestrator_used,
"worker_count": worker_count,
"retry_count": retry_count,
"latency_ms": round(latency_ms, 2),
"token_total": token_total,
"request_id": request_id or "unknown",
"turn_id": turn_id or "unknown",
}
if extra:
payload.update(extra)
_perf_log.info("[architecture_telemetry] %s", json.dumps(payload, ensure_ascii=False))