Merge commit '61f4d05cd1' into dev_mod

This commit is contained in:
DESKTOP-RTLN3BA\$punk 2026-04-28 09:25:41 -07:00
commit e6433f78c4
62 changed files with 1747 additions and 1523 deletions

View file

@ -64,6 +64,7 @@ from app.services.connector_service import ConnectorService
from app.services.new_streaming_service import VercelStreamingService
from app.utils.content_utils import bootstrap_history_from_db
from app.utils.perf import get_perf_logger, log_system_snapshot, trim_native_heap
from app.utils.user_message_multimodal import build_human_message_content
_background_tasks: set[asyncio.Task] = set()
_perf_log = get_perf_logger()
@ -1394,6 +1395,7 @@ async def stream_new_chat(
disabled_tools: list[str] | None = None,
filesystem_selection: FilesystemSelection | None = None,
request_id: str | None = None,
user_image_data_urls: list[str] | None = None,
) -> AsyncGenerator[str, None]:
"""
Stream chat responses from the new SurfSense deep agent.
@ -1669,8 +1671,10 @@ async def stream_new_chat(
# elif msg.role == "assistant":
# langchain_messages.append(AIMessage(content=msg.content))
# else:
# Fallback: just use the current user query with attachment context
langchain_messages.append(HumanMessage(content=final_query))
human_content = build_human_message_content(
final_query, list(user_image_data_urls or ())
)
langchain_messages.append(HumanMessage(content=human_content))
input_state = {
# Lets not pass this message atm because we are using the checkpointer to manage the conversation history
@ -1731,8 +1735,13 @@ async def stream_new_chat(
action_verb = "Processing"
processing_parts = []
query_text = user_query[:80] + ("..." if len(user_query) > 80 else "")
processing_parts.append(query_text)
if user_query.strip():
query_text = user_query[:80] + ("..." if len(user_query) > 80 else "")
processing_parts.append(query_text)
elif user_image_data_urls:
processing_parts.append(f"[{len(user_image_data_urls)} image(s)]")
else:
processing_parts.append("(message)")
if mentioned_surfsense_docs:
doc_names = []
@ -1794,8 +1803,13 @@ async def stream_new_chat(
_turn_accumulator.set(None)
title_seed = user_query.strip() or (
f"[{len(user_image_data_urls or [])} image(s)]"
if user_image_data_urls
else ""
)
prompt = TITLE_GENERATION_PROMPT.replace(
"{user_query}", user_query[:500]
"{user_query}", title_seed[:500] or "(message)"
)
messages = [{"role": "user", "content": prompt}]
@ -1998,10 +2012,15 @@ async def stream_new_chat(
# Fire background memory extraction if the agent didn't handle it.
# Shared threads write to team memory; private threads write to user memory.
if not stream_result.agent_called_update_memory:
memory_seed = user_query.strip() or (
f"[{len(user_image_data_urls or [])} image(s)]"
if user_image_data_urls
else "(message)"
)
if visibility == ChatVisibility.SEARCH_SPACE:
task = asyncio.create_task(
extract_and_save_team_memory(
user_message=user_query,
user_message=memory_seed,
search_space_id=search_space_id,
llm=llm,
author_display_name=current_user_display_name,
@ -2012,7 +2031,7 @@ async def stream_new_chat(
elif user_id:
task = asyncio.create_task(
extract_and_save_memory(
user_message=user_query,
user_message=memory_seed,
user_id=user_id,
llm=llm,
)