Author labels in shared chats: bootstrap, stream prefix, route display name

This commit is contained in:
CREDO23 2026-02-06 18:09:32 +02:00
parent d732bb7334
commit 48d442a387
4 changed files with 27 additions and 4 deletions

View file

@ -138,7 +138,7 @@ You have access to the following tools:
* Prioritize showing: diagrams, charts, infographics, key illustrations, or images that help explain the content.
* Don't show every image - just the most relevant 1-3 images that enhance understanding.
7. save_memory: Save facts, preferences, or context about the user for personalized responses.
7. save_memory: Save facts, preferences, or context for personalized responses.
- Use this when the user explicitly or implicitly shares information worth remembering.
- Trigger scenarios:
* User says "remember this", "keep this in mind", "note that", or similar

View file

@ -1052,6 +1052,7 @@ async def handle_new_chat(
mentioned_surfsense_doc_ids=request.mentioned_surfsense_doc_ids,
needs_history_bootstrap=thread.needs_history_bootstrap,
thread_visibility=thread.visibility,
current_user_display_name=user.display_name or "A team member",
),
media_type="text/event-stream",
headers={
@ -1283,6 +1284,7 @@ async def regenerate_response(
checkpoint_id=target_checkpoint_id,
needs_history_bootstrap=thread.needs_history_bootstrap,
thread_visibility=thread.visibility,
current_user_display_name=user.display_name or "A team member",
):
yield chunk
# If we get here, streaming completed successfully

View file

@ -209,6 +209,7 @@ async def stream_new_chat(
checkpoint_id: str | None = None,
needs_history_bootstrap: bool = False,
thread_visibility: ChatVisibility | None = None,
current_user_display_name: str | None = None,
) -> AsyncGenerator[str, None]:
"""
Stream chat responses from the new SurfSense deep agent.
@ -315,7 +316,9 @@ async def stream_new_chat(
# Bootstrap history for cloned chats (no LangGraph checkpoint exists yet)
if needs_history_bootstrap:
langchain_messages = await bootstrap_history_from_db(session, chat_id)
langchain_messages = await bootstrap_history_from_db(
session, chat_id, thread_visibility=visibility
)
# Clear the flag so we don't bootstrap again on next message
from app.db import NewChatThread
@ -378,6 +381,9 @@ async def stream_new_chat(
context = "\n\n".join(context_parts)
final_query = f"{context}\n\n<user_query>{user_query}</user_query>"
if visibility == ChatVisibility.SEARCH_SPACE and current_user_display_name:
final_query = f"**[{current_user_display_name}]:** {final_query}"
# if messages:
# # Convert frontend messages to LangChain format
# for msg in messages:

View file

@ -12,6 +12,7 @@ These utilities help extract and transform content for different use cases.
from langchain_core.messages import AIMessage, HumanMessage
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
def extract_text_content(content: str | dict | list) -> str:
@ -38,6 +39,7 @@ def extract_text_content(content: str | dict | list) -> str:
async def bootstrap_history_from_db(
session: AsyncSession,
thread_id: int,
thread_visibility: "ChatVisibility | None" = None,
) -> list[HumanMessage | AIMessage]:
"""
Load message history from database and convert to LangChain format.
@ -45,20 +47,28 @@ async def bootstrap_history_from_db(
Used for cloned chats where the LangGraph checkpointer has no state,
but we have messages in the database that should be used as context.
When thread_visibility is SEARCH_SPACE, user messages are prefixed with
the author's display name so the LLM sees who said what.
Args:
session: Database session
thread_id: The chat thread ID
thread_visibility: When SEARCH_SPACE, user messages get author prefix
Returns:
List of LangChain messages (HumanMessage/AIMessage)
"""
from app.db import NewChatMessage
from app.db import ChatVisibility, NewChatMessage
result = await session.execute(
is_shared = thread_visibility == ChatVisibility.SEARCH_SPACE
stmt = (
select(NewChatMessage)
.filter(NewChatMessage.thread_id == thread_id)
.order_by(NewChatMessage.created_at)
)
if is_shared:
stmt = stmt.options(selectinload(NewChatMessage.author))
result = await session.execute(stmt)
db_messages = result.scalars().all()
langchain_messages: list[HumanMessage | AIMessage] = []
@ -68,6 +78,11 @@ async def bootstrap_history_from_db(
if not text_content:
continue
if msg.role == "user":
if is_shared:
author_name = (
(msg.author.display_name if msg.author else None) or "A team member"
)
text_content = f"**[{author_name}]:** {text_content}"
langchain_messages.append(HumanMessage(content=text_content))
elif msg.role == "assistant":
langchain_messages.append(AIMessage(content=text_content))