Add a route-level kill switch for streaming orchestrator cutover.

This commit is contained in:
CREDO23 2026-05-07 14:44:36 +02:00
parent 2ec2e82d9d
commit c0706364d1
3 changed files with 146 additions and 36 deletions

View file

@ -1,48 +1,127 @@
"""Top-level chat streaming entrypoints (stubs until wired)."""
"""Top-level chat streaming entrypoints.
For now these orchestrator functions are thin compatibility wrappers around the
current ``stream_new_chat`` / ``stream_resume_chat`` implementations. Routing
calls through this module lets us cut over to the fully modular event relay in
one place later without touching API routes again.
"""
from __future__ import annotations
from collections.abc import AsyncGenerator
from typing import Any
from typing import Any, Literal
from app.agents.new_chat.filesystem_selection import FilesystemSelection
from app.db import ChatVisibility
from app.tasks.chat.stream_new_chat import stream_new_chat, stream_resume_chat
async def stream_chat(
*,
request: Any,
user: Any,
db_session: Any,
) -> AsyncGenerator[str, None]: # pragma: no cover - orchestrator port in progress
del request, user, db_session
raise NotImplementedError(
"stream_chat: orchestrator not wired yet"
)
if False: # pragma: no cover
yield ""
user_query: str,
search_space_id: int,
chat_id: int,
user_id: str | None = None,
llm_config_id: int = -1,
mentioned_document_ids: list[int] | None = None,
mentioned_surfsense_doc_ids: list[int] | None = None,
mentioned_documents: list[dict[str, Any]] | None = None,
checkpoint_id: str | None = None,
needs_history_bootstrap: bool = False,
thread_visibility: ChatVisibility | None = None,
current_user_display_name: str | None = None,
disabled_tools: list[str] | None = None,
filesystem_selection: FilesystemSelection | None = None,
request_id: str | None = None,
user_image_data_urls: list[str] | None = None,
) -> AsyncGenerator[str, None]:
"""Stream a new chat turn through the current production pipeline."""
async for chunk in stream_new_chat(
user_query=user_query,
search_space_id=search_space_id,
chat_id=chat_id,
user_id=user_id,
llm_config_id=llm_config_id,
mentioned_document_ids=mentioned_document_ids,
mentioned_surfsense_doc_ids=mentioned_surfsense_doc_ids,
mentioned_documents=mentioned_documents,
checkpoint_id=checkpoint_id,
needs_history_bootstrap=needs_history_bootstrap,
thread_visibility=thread_visibility,
current_user_display_name=current_user_display_name,
disabled_tools=disabled_tools,
filesystem_selection=filesystem_selection,
request_id=request_id,
user_image_data_urls=user_image_data_urls,
):
yield chunk
async def stream_resume(
*,
request: Any,
user: Any,
db_session: Any,
) -> AsyncGenerator[str, None]: # pragma: no cover - orchestrator port in progress
del request, user, db_session
raise NotImplementedError(
"stream_resume: orchestrator not wired yet"
)
if False: # pragma: no cover
yield ""
chat_id: int,
search_space_id: int,
decisions: list[dict],
user_id: str | None = None,
llm_config_id: int = -1,
thread_visibility: ChatVisibility | None = None,
filesystem_selection: FilesystemSelection | None = None,
request_id: str | None = None,
disabled_tools: list[str] | None = None,
) -> AsyncGenerator[str, None]:
"""Resume an interrupted chat turn through the current production pipeline."""
async for chunk in stream_resume_chat(
chat_id=chat_id,
search_space_id=search_space_id,
decisions=decisions,
user_id=user_id,
llm_config_id=llm_config_id,
thread_visibility=thread_visibility,
filesystem_selection=filesystem_selection,
request_id=request_id,
disabled_tools=disabled_tools,
):
yield chunk
async def stream_regenerate(
*,
request: Any,
user: Any,
db_session: Any,
) -> AsyncGenerator[str, None]: # pragma: no cover - orchestrator port in progress
del request, user, db_session
raise NotImplementedError(
"stream_regenerate: orchestrator not wired yet"
)
if False: # pragma: no cover
yield ""
user_query: str,
search_space_id: int,
chat_id: int,
user_id: str | None = None,
llm_config_id: int = -1,
mentioned_document_ids: list[int] | None = None,
mentioned_surfsense_doc_ids: list[int] | None = None,
mentioned_documents: list[dict[str, Any]] | None = None,
checkpoint_id: str | None = None,
needs_history_bootstrap: bool = False,
thread_visibility: ChatVisibility | None = None,
current_user_display_name: str | None = None,
disabled_tools: list[str] | None = None,
filesystem_selection: FilesystemSelection | None = None,
request_id: str | None = None,
user_image_data_urls: list[str] | None = None,
flow: Literal["new", "regenerate"] = "regenerate",
) -> AsyncGenerator[str, None]:
"""Regenerate an assistant turn through the current production pipeline."""
async for chunk in stream_new_chat(
user_query=user_query,
search_space_id=search_space_id,
chat_id=chat_id,
user_id=user_id,
llm_config_id=llm_config_id,
mentioned_document_ids=mentioned_document_ids,
mentioned_surfsense_doc_ids=mentioned_surfsense_doc_ids,
mentioned_documents=mentioned_documents,
checkpoint_id=checkpoint_id,
needs_history_bootstrap=needs_history_bootstrap,
thread_visibility=thread_visibility,
current_user_display_name=current_user_display_name,
disabled_tools=disabled_tools,
filesystem_selection=filesystem_selection,
request_id=request_id,
user_image_data_urls=user_image_data_urls,
flow=flow,
):
yield chunk