diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/__init__.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/agent.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/agent.py
new file mode 100644
index 000000000..3b27d39e8
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/agent.py
@@ -0,0 +1,54 @@
+"""`confluence` route: ``SubAgent`` spec for deepagents."""
+
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import Any
+
+from deepagents import SubAgent
+from langchain_core.language_models import BaseChatModel
+
+from app.agents.multi_agent_with_deepagents.subagents.shared.md_file_reader import (
+ read_md_file,
+)
+from app.agents.multi_agent_with_deepagents.subagents.shared.permissions import (
+ ToolsPermissions,
+ merge_tools_permissions,
+)
+from app.agents.multi_agent_with_deepagents.subagents.shared.subagent_builder import (
+ pack_subagent,
+)
+
+from .tools.index import load_tools
+
+NAME = "confluence"
+
+
+def build_subagent(
+ *,
+ dependencies: dict[str, Any],
+ model: BaseChatModel | None = None,
+ extra_middleware: Sequence[Any] | None = None,
+ extra_tools_bucket: ToolsPermissions | None = None,
+) -> SubAgent:
+ buckets = load_tools(dependencies=dependencies)
+ merged_tools_bucket = merge_tools_permissions(buckets, extra_tools_bucket)
+ tools = [
+ row["tool"]
+ for row in (*merged_tools_bucket["allow"], *merged_tools_bucket["ask"])
+ if row.get("tool") is not None
+ ]
+ interrupt_on = {r["name"]: True for r in merged_tools_bucket["ask"] if r.get("name")}
+ description = read_md_file(__package__, "description").strip()
+ if not description:
+ description = "Handles confluence tasks for this workspace."
+ system_prompt = read_md_file(__package__, "system_prompt").strip()
+ return pack_subagent(
+ name=NAME,
+ description=description,
+ system_prompt=system_prompt,
+ tools=tools,
+ interrupt_on=interrupt_on,
+ model=model,
+ extra_middleware=extra_middleware,
+ )
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/description.md b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/description.md
new file mode 100644
index 000000000..b6f1353d0
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/description.md
@@ -0,0 +1 @@
+Use for Confluence knowledge pages: search/read existing pages, create new pages, and update page content.
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/system_prompt.md b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/system_prompt.md
new file mode 100644
index 000000000..4d3b7462c
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/system_prompt.md
@@ -0,0 +1,55 @@
+You are the Confluence operations sub-agent.
+You receive delegated instructions from a supervisor agent and return structured results for supervisor synthesis.
+
+
+Execute Confluence page operations accurately in the connected space.
+
+
+
+- `create_confluence_page`
+- `update_confluence_page`
+- `delete_confluence_page`
+
+
+
+- Use only tools in ``.
+- Verify target page and intended mutation before update/delete.
+- If target page is ambiguous, return `status=blocked` with candidate options for supervisor disambiguation.
+- Never invent page IDs, titles, or mutation outcomes.
+
+
+
+- Do not perform non-Confluence tasks.
+
+
+
+- Never claim page mutation success without tool confirmation.
+- If destructive action appears already completed in this session, do not repeat; return prior evidence with an `assumptions` note.
+
+
+
+- On tool failure, return `status=error` with concise retry/recovery `next_step`.
+- On unresolved page ambiguity, return `status=blocked` with candidates.
+
+
+
+Return **only** one JSON object (no markdown/prose):
+{
+ "status": "success" | "partial" | "blocked" | "error",
+ "action_summary": string,
+ "evidence": {
+ "page_id": string | null,
+ "page_title": string | null,
+ "matched_candidates": [
+ { "page_id": string, "page_title": string | null }
+ ] | null
+ },
+ "next_step": string | null,
+ "missing_fields": string[] | null,
+ "assumptions": string[] | null
+}
+Rules:
+- `status=success` -> `next_step=null`, `missing_fields=null`.
+- `status=partial|blocked|error` -> `next_step` must be non-null.
+- `status=blocked` due to missing required inputs -> `missing_fields` must be non-null.
+
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/__init__.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/__init__.py
new file mode 100644
index 000000000..3bf80b61b
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/__init__.py
@@ -0,0 +1,11 @@
+"""Confluence tools for creating, updating, and deleting pages."""
+
+from .create_page import create_create_confluence_page_tool
+from .delete_page import create_delete_confluence_page_tool
+from .update_page import create_update_confluence_page_tool
+
+__all__ = [
+ "create_create_confluence_page_tool",
+ "create_delete_confluence_page_tool",
+ "create_update_confluence_page_tool",
+]
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/create_page.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/create_page.py
new file mode 100644
index 000000000..095413bdb
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/create_page.py
@@ -0,0 +1,211 @@
+import logging
+from typing import Any
+
+from langchain_core.tools import tool
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm.attributes import flag_modified
+
+from app.agents.new_chat.tools.hitl import request_approval
+from app.connectors.confluence_history import ConfluenceHistoryConnector
+from app.services.confluence import ConfluenceToolMetadataService
+
+logger = logging.getLogger(__name__)
+
+
+def create_create_confluence_page_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+ connector_id: int | None = None,
+):
+ @tool
+ async def create_confluence_page(
+ title: str,
+ content: str | None = None,
+ space_id: str | None = None,
+ ) -> dict[str, Any]:
+ """Create a new page in Confluence.
+
+ Use this tool when the user explicitly asks to create a new Confluence page.
+
+ Args:
+ title: Title of the page.
+ content: Optional HTML/storage format content for the page body.
+ space_id: Optional Confluence space ID to create the page in.
+
+ Returns:
+ Dictionary with status, page_id, and message.
+
+ IMPORTANT:
+ - If status is "rejected", do NOT retry.
+ - If status is "insufficient_permissions", inform user to re-authenticate.
+ """
+ logger.info(f"create_confluence_page called: title='{title}'")
+
+ if db_session is None or search_space_id is None or user_id is None:
+ return {
+ "status": "error",
+ "message": "Confluence tool not properly configured.",
+ }
+
+ try:
+ metadata_service = ConfluenceToolMetadataService(db_session)
+ context = await metadata_service.get_creation_context(
+ search_space_id, user_id
+ )
+
+ if "error" in context:
+ return {"status": "error", "message": context["error"]}
+
+ accounts = context.get("accounts", [])
+ if accounts and all(a.get("auth_expired") for a in accounts):
+ return {
+ "status": "auth_error",
+ "message": "All connected Confluence accounts need re-authentication.",
+ "connector_type": "confluence",
+ }
+
+ result = request_approval(
+ action_type="confluence_page_creation",
+ tool_name="create_confluence_page",
+ params={
+ "title": title,
+ "content": content,
+ "space_id": space_id,
+ "connector_id": connector_id,
+ },
+ context=context,
+ )
+
+ if result.rejected:
+ return {
+ "status": "rejected",
+ "message": "User declined. Do not retry or suggest alternatives.",
+ }
+
+ final_title = result.params.get("title", title)
+ final_content = result.params.get("content", content) or ""
+ final_space_id = result.params.get("space_id", space_id)
+ final_connector_id = result.params.get("connector_id", connector_id)
+
+ if not final_title or not final_title.strip():
+ return {"status": "error", "message": "Page title cannot be empty."}
+ if not final_space_id:
+ return {"status": "error", "message": "A space must be selected."}
+
+ from sqlalchemy.future import select
+
+ from app.db import SearchSourceConnector, SearchSourceConnectorType
+
+ actual_connector_id = final_connector_id
+ if actual_connector_id is None:
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.CONFLUENCE_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ return {
+ "status": "error",
+ "message": "No Confluence connector found.",
+ }
+ actual_connector_id = connector.id
+ else:
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == actual_connector_id,
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.CONFLUENCE_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ return {
+ "status": "error",
+ "message": "Selected Confluence connector is invalid.",
+ }
+
+ try:
+ client = ConfluenceHistoryConnector(
+ session=db_session, connector_id=actual_connector_id
+ )
+ api_result = await client.create_page(
+ space_id=final_space_id,
+ title=final_title,
+ body=final_content,
+ )
+ await client.close()
+ except Exception as api_err:
+ if (
+ "http 403" in str(api_err).lower()
+ or "status code 403" in str(api_err).lower()
+ ):
+ try:
+ _conn = connector
+ _conn.config = {**_conn.config, "auth_expired": True}
+ flag_modified(_conn, "config")
+ await db_session.commit()
+ except Exception:
+ pass
+ return {
+ "status": "insufficient_permissions",
+ "connector_id": actual_connector_id,
+ "message": "This Confluence account needs additional permissions. Please re-authenticate in connector settings.",
+ }
+ raise
+
+ page_id = str(api_result.get("id", ""))
+ page_links = (
+ api_result.get("_links", {}) if isinstance(api_result, dict) else {}
+ )
+ page_url = ""
+ if page_links.get("base") and page_links.get("webui"):
+ page_url = f"{page_links['base']}{page_links['webui']}"
+
+ kb_message_suffix = ""
+ try:
+ from app.services.confluence import ConfluenceKBSyncService
+
+ kb_service = ConfluenceKBSyncService(db_session)
+ kb_result = await kb_service.sync_after_create(
+ page_id=page_id,
+ page_title=final_title,
+ space_id=final_space_id,
+ body_content=final_content,
+ connector_id=actual_connector_id,
+ search_space_id=search_space_id,
+ user_id=user_id,
+ )
+ if kb_result["status"] == "success":
+ kb_message_suffix = " Your knowledge base has also been updated."
+ else:
+ kb_message_suffix = " This page will be added to your knowledge base in the next scheduled sync."
+ except Exception as kb_err:
+ logger.warning(f"KB sync after create failed: {kb_err}")
+ kb_message_suffix = " This page will be added to your knowledge base in the next scheduled sync."
+
+ return {
+ "status": "success",
+ "page_id": page_id,
+ "page_url": page_url,
+ "message": f"Confluence page '{final_title}' created successfully.{kb_message_suffix}",
+ }
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error(f"Error creating Confluence page: {e}", exc_info=True)
+ return {
+ "status": "error",
+ "message": "Something went wrong while creating the page.",
+ }
+
+ return create_confluence_page
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/delete_page.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/delete_page.py
new file mode 100644
index 000000000..7c03c2760
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/delete_page.py
@@ -0,0 +1,189 @@
+import logging
+from typing import Any
+
+from langchain_core.tools import tool
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm.attributes import flag_modified
+
+from app.agents.new_chat.tools.hitl import request_approval
+from app.connectors.confluence_history import ConfluenceHistoryConnector
+from app.services.confluence import ConfluenceToolMetadataService
+
+logger = logging.getLogger(__name__)
+
+
+def create_delete_confluence_page_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+ connector_id: int | None = None,
+):
+ @tool
+ async def delete_confluence_page(
+ page_title_or_id: str,
+ delete_from_kb: bool = False,
+ ) -> dict[str, Any]:
+ """Delete a Confluence page.
+
+ Use this tool when the user asks to delete or remove a Confluence page.
+
+ Args:
+ page_title_or_id: The page title or ID to identify the page.
+ delete_from_kb: Whether to also remove from the knowledge base.
+
+ Returns:
+ Dictionary with status, message, and deleted_from_kb.
+
+ IMPORTANT:
+ - If status is "rejected", do NOT retry.
+ - If status is "not_found", relay the message to the user.
+ - If status is "insufficient_permissions", inform user to re-authenticate.
+ """
+ logger.info(
+ f"delete_confluence_page called: page_title_or_id='{page_title_or_id}'"
+ )
+
+ if db_session is None or search_space_id is None or user_id is None:
+ return {
+ "status": "error",
+ "message": "Confluence tool not properly configured.",
+ }
+
+ try:
+ metadata_service = ConfluenceToolMetadataService(db_session)
+ context = await metadata_service.get_deletion_context(
+ search_space_id, user_id, page_title_or_id
+ )
+
+ if "error" in context:
+ error_msg = context["error"]
+ if context.get("auth_expired"):
+ return {
+ "status": "auth_error",
+ "message": error_msg,
+ "connector_id": context.get("connector_id"),
+ "connector_type": "confluence",
+ }
+ if "not found" in error_msg.lower():
+ return {"status": "not_found", "message": error_msg}
+ return {"status": "error", "message": error_msg}
+
+ page_data = context["page"]
+ page_id = page_data["page_id"]
+ page_title = page_data.get("page_title", "")
+ document_id = page_data["document_id"]
+ connector_id_from_context = context.get("account", {}).get("id")
+
+ result = request_approval(
+ action_type="confluence_page_deletion",
+ tool_name="delete_confluence_page",
+ params={
+ "page_id": page_id,
+ "connector_id": connector_id_from_context,
+ "delete_from_kb": delete_from_kb,
+ },
+ context=context,
+ )
+
+ if result.rejected:
+ return {
+ "status": "rejected",
+ "message": "User declined. Do not retry or suggest alternatives.",
+ }
+
+ final_page_id = result.params.get("page_id", page_id)
+ final_connector_id = result.params.get(
+ "connector_id", connector_id_from_context
+ )
+ final_delete_from_kb = result.params.get("delete_from_kb", delete_from_kb)
+
+ from sqlalchemy.future import select
+
+ from app.db import SearchSourceConnector, SearchSourceConnectorType
+
+ if not final_connector_id:
+ return {
+ "status": "error",
+ "message": "No connector found for this page.",
+ }
+
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == final_connector_id,
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.CONFLUENCE_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ return {
+ "status": "error",
+ "message": "Selected Confluence connector is invalid.",
+ }
+
+ try:
+ client = ConfluenceHistoryConnector(
+ session=db_session, connector_id=final_connector_id
+ )
+ await client.delete_page(final_page_id)
+ await client.close()
+ except Exception as api_err:
+ if (
+ "http 403" in str(api_err).lower()
+ or "status code 403" in str(api_err).lower()
+ ):
+ try:
+ connector.config = {**connector.config, "auth_expired": True}
+ flag_modified(connector, "config")
+ await db_session.commit()
+ except Exception:
+ pass
+ return {
+ "status": "insufficient_permissions",
+ "connector_id": final_connector_id,
+ "message": "This Confluence account needs additional permissions. Please re-authenticate in connector settings.",
+ }
+ raise
+
+ deleted_from_kb = False
+ if final_delete_from_kb and document_id:
+ try:
+ from app.db import Document
+
+ doc_result = await db_session.execute(
+ select(Document).filter(Document.id == document_id)
+ )
+ document = doc_result.scalars().first()
+ if document:
+ await db_session.delete(document)
+ await db_session.commit()
+ deleted_from_kb = True
+ except Exception as e:
+ logger.error(f"Failed to delete document from KB: {e}")
+ await db_session.rollback()
+
+ message = f"Confluence page '{page_title}' deleted successfully."
+ if deleted_from_kb:
+ message += " Also removed from the knowledge base."
+
+ return {
+ "status": "success",
+ "page_id": final_page_id,
+ "deleted_from_kb": deleted_from_kb,
+ "message": message,
+ }
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error(f"Error deleting Confluence page: {e}", exc_info=True)
+ return {
+ "status": "error",
+ "message": "Something went wrong while deleting the page.",
+ }
+
+ return delete_confluence_page
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/index.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/index.py
new file mode 100644
index 000000000..eb76eb17d
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/index.py
@@ -0,0 +1,32 @@
+from __future__ import annotations
+
+from typing import Any
+
+from app.agents.multi_agent_with_deepagents.subagents.shared.permissions import (
+ ToolsPermissions,
+)
+
+from .create_page import create_create_confluence_page_tool
+from .delete_page import create_delete_confluence_page_tool
+from .update_page import create_update_confluence_page_tool
+
+
+def load_tools(*, dependencies: dict[str, Any] | None = None, **kwargs: Any) -> ToolsPermissions:
+ resolved_dependencies = {**(dependencies or {}), **kwargs}
+ session_dependencies = {
+ "db_session": resolved_dependencies["db_session"],
+ "search_space_id": resolved_dependencies["search_space_id"],
+ "user_id": resolved_dependencies["user_id"],
+ "connector_id": resolved_dependencies.get("connector_id"),
+ }
+ create = create_create_confluence_page_tool(**session_dependencies)
+ update = create_update_confluence_page_tool(**session_dependencies)
+ delete = create_delete_confluence_page_tool(**session_dependencies)
+ return {
+ "allow": [],
+ "ask": [
+ {"name": getattr(create, "name", "") or "", "tool": create},
+ {"name": getattr(update, "name", "") or "", "tool": update},
+ {"name": getattr(delete, "name", "") or "", "tool": delete},
+ ],
+ }
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/update_page.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/update_page.py
new file mode 100644
index 000000000..791d0d8c5
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/confluence/tools/update_page.py
@@ -0,0 +1,218 @@
+import logging
+from typing import Any
+
+from langchain_core.tools import tool
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm.attributes import flag_modified
+
+from app.agents.new_chat.tools.hitl import request_approval
+from app.connectors.confluence_history import ConfluenceHistoryConnector
+from app.services.confluence import ConfluenceToolMetadataService
+
+logger = logging.getLogger(__name__)
+
+
+def create_update_confluence_page_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+ connector_id: int | None = None,
+):
+ @tool
+ async def update_confluence_page(
+ page_title_or_id: str,
+ new_title: str | None = None,
+ new_content: str | None = None,
+ ) -> dict[str, Any]:
+ """Update an existing Confluence page.
+
+ Use this tool when the user asks to modify or edit a Confluence page.
+
+ Args:
+ page_title_or_id: The page title or ID to identify the page.
+ new_title: Optional new title for the page.
+ new_content: Optional new HTML/storage format content.
+
+ Returns:
+ Dictionary with status and message.
+
+ IMPORTANT:
+ - If status is "rejected", do NOT retry.
+ - If status is "not_found", relay the message to the user.
+ - If status is "insufficient_permissions", inform user to re-authenticate.
+ """
+ logger.info(
+ f"update_confluence_page called: page_title_or_id='{page_title_or_id}'"
+ )
+
+ if db_session is None or search_space_id is None or user_id is None:
+ return {
+ "status": "error",
+ "message": "Confluence tool not properly configured.",
+ }
+
+ try:
+ metadata_service = ConfluenceToolMetadataService(db_session)
+ context = await metadata_service.get_update_context(
+ search_space_id, user_id, page_title_or_id
+ )
+
+ if "error" in context:
+ error_msg = context["error"]
+ if context.get("auth_expired"):
+ return {
+ "status": "auth_error",
+ "message": error_msg,
+ "connector_id": context.get("connector_id"),
+ "connector_type": "confluence",
+ }
+ if "not found" in error_msg.lower():
+ return {"status": "not_found", "message": error_msg}
+ return {"status": "error", "message": error_msg}
+
+ page_data = context["page"]
+ page_id = page_data["page_id"]
+ current_title = page_data["page_title"]
+ current_body = page_data.get("body", "")
+ current_version = page_data.get("version", 1)
+ document_id = page_data.get("document_id")
+ connector_id_from_context = context.get("account", {}).get("id")
+
+ result = request_approval(
+ action_type="confluence_page_update",
+ tool_name="update_confluence_page",
+ params={
+ "page_id": page_id,
+ "document_id": document_id,
+ "new_title": new_title,
+ "new_content": new_content,
+ "version": current_version,
+ "connector_id": connector_id_from_context,
+ },
+ context=context,
+ )
+
+ if result.rejected:
+ return {
+ "status": "rejected",
+ "message": "User declined. Do not retry or suggest alternatives.",
+ }
+
+ final_page_id = result.params.get("page_id", page_id)
+ final_title = result.params.get("new_title", new_title) or current_title
+ final_content = result.params.get("new_content", new_content)
+ if final_content is None:
+ final_content = current_body
+ final_version = result.params.get("version", current_version)
+ final_connector_id = result.params.get(
+ "connector_id", connector_id_from_context
+ )
+ final_document_id = result.params.get("document_id", document_id)
+
+ from sqlalchemy.future import select
+
+ from app.db import SearchSourceConnector, SearchSourceConnectorType
+
+ if not final_connector_id:
+ return {
+ "status": "error",
+ "message": "No connector found for this page.",
+ }
+
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == final_connector_id,
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.CONFLUENCE_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ return {
+ "status": "error",
+ "message": "Selected Confluence connector is invalid.",
+ }
+
+ try:
+ client = ConfluenceHistoryConnector(
+ session=db_session, connector_id=final_connector_id
+ )
+ api_result = await client.update_page(
+ page_id=final_page_id,
+ title=final_title,
+ body=final_content,
+ version_number=final_version + 1,
+ )
+ await client.close()
+ except Exception as api_err:
+ if (
+ "http 403" in str(api_err).lower()
+ or "status code 403" in str(api_err).lower()
+ ):
+ try:
+ connector.config = {**connector.config, "auth_expired": True}
+ flag_modified(connector, "config")
+ await db_session.commit()
+ except Exception:
+ pass
+ return {
+ "status": "insufficient_permissions",
+ "connector_id": final_connector_id,
+ "message": "This Confluence account needs additional permissions. Please re-authenticate in connector settings.",
+ }
+ raise
+
+ page_links = (
+ api_result.get("_links", {}) if isinstance(api_result, dict) else {}
+ )
+ page_url = ""
+ if page_links.get("base") and page_links.get("webui"):
+ page_url = f"{page_links['base']}{page_links['webui']}"
+
+ kb_message_suffix = ""
+ if final_document_id:
+ try:
+ from app.services.confluence import ConfluenceKBSyncService
+
+ kb_service = ConfluenceKBSyncService(db_session)
+ kb_result = await kb_service.sync_after_update(
+ document_id=final_document_id,
+ page_id=final_page_id,
+ user_id=user_id,
+ search_space_id=search_space_id,
+ )
+ if kb_result["status"] == "success":
+ kb_message_suffix = (
+ " Your knowledge base has also been updated."
+ )
+ else:
+ kb_message_suffix = (
+ " The knowledge base will be updated in the next sync."
+ )
+ except Exception as kb_err:
+ logger.warning(f"KB sync after update failed: {kb_err}")
+ kb_message_suffix = (
+ " The knowledge base will be updated in the next sync."
+ )
+
+ return {
+ "status": "success",
+ "page_id": final_page_id,
+ "page_url": page_url,
+ "message": f"Confluence page '{final_title}' updated successfully.{kb_message_suffix}",
+ }
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error(f"Error updating Confluence page: {e}", exc_info=True)
+ return {
+ "status": "error",
+ "message": "Something went wrong while updating the page.",
+ }
+
+ return update_confluence_page
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/__init__.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/agent.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/agent.py
new file mode 100644
index 000000000..793de429f
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/agent.py
@@ -0,0 +1,54 @@
+"""`discord` route: ``SubAgent`` spec for deepagents."""
+
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import Any
+
+from deepagents import SubAgent
+from langchain_core.language_models import BaseChatModel
+
+from app.agents.multi_agent_with_deepagents.subagents.shared.md_file_reader import (
+ read_md_file,
+)
+from app.agents.multi_agent_with_deepagents.subagents.shared.permissions import (
+ ToolsPermissions,
+ merge_tools_permissions,
+)
+from app.agents.multi_agent_with_deepagents.subagents.shared.subagent_builder import (
+ pack_subagent,
+)
+
+from .tools.index import load_tools
+
+NAME = "discord"
+
+
+def build_subagent(
+ *,
+ dependencies: dict[str, Any],
+ model: BaseChatModel | None = None,
+ extra_middleware: Sequence[Any] | None = None,
+ extra_tools_bucket: ToolsPermissions | None = None,
+) -> SubAgent:
+ buckets = load_tools(dependencies=dependencies)
+ merged_tools_bucket = merge_tools_permissions(buckets, extra_tools_bucket)
+ tools = [
+ row["tool"]
+ for row in (*merged_tools_bucket["allow"], *merged_tools_bucket["ask"])
+ if row.get("tool") is not None
+ ]
+ interrupt_on = {r["name"]: True for r in merged_tools_bucket["ask"] if r.get("name")}
+ description = read_md_file(__package__, "description").strip()
+ if not description:
+ description = "Handles discord tasks for this workspace."
+ system_prompt = read_md_file(__package__, "system_prompt").strip()
+ return pack_subagent(
+ name=NAME,
+ description=description,
+ system_prompt=system_prompt,
+ tools=tools,
+ interrupt_on=interrupt_on,
+ model=model,
+ extra_middleware=extra_middleware,
+ )
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/description.md b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/description.md
new file mode 100644
index 000000000..44065c10b
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/description.md
@@ -0,0 +1 @@
+Use for Discord communication: read channel/thread messages, gather context, and send replies.
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/system_prompt.md b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/system_prompt.md
new file mode 100644
index 000000000..40e9eb314
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/system_prompt.md
@@ -0,0 +1,56 @@
+You are the Discord operations sub-agent.
+You receive delegated instructions from a supervisor agent and return structured results for supervisor synthesis.
+
+
+Execute Discord reads and sends accurately in the connected server/workspace.
+
+
+
+- `list_discord_channels`
+- `read_discord_messages`
+- `send_discord_message`
+
+
+
+- Use only tools in ``.
+- Resolve channel/thread targets before reads/sends.
+- If target is ambiguous, return `status=blocked` with candidate channels/threads.
+- Never invent message content, sender identity, timestamps, or delivery results.
+
+
+
+- Do not perform non-Discord tasks.
+
+
+
+- Before send, verify destination and message intent match delegated instructions.
+- Never claim send success without tool confirmation.
+
+
+
+- On tool failure, return `status=error` with concise recovery `next_step`.
+- On unresolved destination ambiguity, return `status=blocked` with candidate options.
+
+
+
+Return **only** one JSON object (no markdown/prose):
+{
+ "status": "success" | "partial" | "blocked" | "error",
+ "action_summary": string,
+ "evidence": {
+ "channel_id": string | null,
+ "thread_id": string | null,
+ "message_id": string | null,
+ "matched_candidates": [
+ { "channel_id": string, "thread_id": string | null, "label": string | null }
+ ] | null
+ },
+ "next_step": string | null,
+ "missing_fields": string[] | null,
+ "assumptions": string[] | null
+}
+Rules:
+- `status=success` -> `next_step=null`, `missing_fields=null`.
+- `status=partial|blocked|error` -> `next_step` must be non-null.
+- `status=blocked` due to missing required inputs -> `missing_fields` must be non-null.
+
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/__init__.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/__init__.py
new file mode 100644
index 000000000..b4eaec1f0
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/__init__.py
@@ -0,0 +1,15 @@
+from app.agents.new_chat.tools.discord.list_channels import (
+ create_list_discord_channels_tool,
+)
+from app.agents.new_chat.tools.discord.read_messages import (
+ create_read_discord_messages_tool,
+)
+from app.agents.new_chat.tools.discord.send_message import (
+ create_send_discord_message_tool,
+)
+
+__all__ = [
+ "create_list_discord_channels_tool",
+ "create_read_discord_messages_tool",
+ "create_send_discord_message_tool",
+]
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/_auth.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/_auth.py
new file mode 100644
index 000000000..7636aff71
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/_auth.py
@@ -0,0 +1,43 @@
+"""Builds Discord REST API auth headers for connector-backed tools."""
+
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.future import select
+
+from app.config import config
+from app.db import SearchSourceConnector, SearchSourceConnectorType
+from app.utils.oauth_security import TokenEncryption
+
+DISCORD_API = "https://discord.com/api/v10"
+
+
+async def get_discord_connector(
+ db_session: AsyncSession,
+ search_space_id: int,
+ user_id: str,
+) -> SearchSourceConnector | None:
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.DISCORD_CONNECTOR,
+ )
+ )
+ return result.scalars().first()
+
+
+def get_bot_token(connector: SearchSourceConnector) -> str:
+ """Extract and decrypt the bot token from connector config."""
+ cfg = dict(connector.config)
+ if cfg.get("_token_encrypted") and config.SECRET_KEY:
+ enc = TokenEncryption(config.SECRET_KEY)
+ if cfg.get("bot_token"):
+ cfg["bot_token"] = enc.decrypt_token(cfg["bot_token"])
+ token = cfg.get("bot_token")
+ if not token:
+ raise ValueError("Discord bot token not found in connector config.")
+ return token
+
+
+def get_guild_id(connector: SearchSourceConnector) -> str | None:
+ return connector.config.get("guild_id")
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/index.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/index.py
new file mode 100644
index 000000000..66d13e7a6
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/index.py
@@ -0,0 +1,30 @@
+from __future__ import annotations
+
+from typing import Any
+
+from app.agents.multi_agent_with_deepagents.subagents.shared.permissions import (
+ ToolsPermissions,
+)
+
+from .list_channels import create_list_discord_channels_tool
+from .read_messages import create_read_discord_messages_tool
+from .send_message import create_send_discord_message_tool
+
+
+def load_tools(*, dependencies: dict[str, Any] | None = None, **kwargs: Any) -> ToolsPermissions:
+ d = {**(dependencies or {}), **kwargs}
+ common = {
+ "db_session": d["db_session"],
+ "search_space_id": d["search_space_id"],
+ "user_id": d["user_id"],
+ }
+ list_ch = create_list_discord_channels_tool(**common)
+ read_msg = create_read_discord_messages_tool(**common)
+ send = create_send_discord_message_tool(**common)
+ return {
+ "allow": [
+ {"name": getattr(list_ch, "name", "") or "", "tool": list_ch},
+ {"name": getattr(read_msg, "name", "") or "", "tool": read_msg},
+ ],
+ "ask": [{"name": getattr(send, "name", "") or "", "tool": send}],
+ }
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/list_channels.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/list_channels.py
new file mode 100644
index 000000000..3cc99ac17
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/list_channels.py
@@ -0,0 +1,87 @@
+import logging
+from typing import Any
+
+import httpx
+from langchain_core.tools import tool
+from sqlalchemy.ext.asyncio import AsyncSession
+
+from ._auth import DISCORD_API, get_bot_token, get_discord_connector, get_guild_id
+
+logger = logging.getLogger(__name__)
+
+
+def create_list_discord_channels_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+):
+ @tool
+ async def list_discord_channels() -> dict[str, Any]:
+ """List text channels in the connected Discord server.
+
+ Returns:
+ Dictionary with status and a list of channels (id, name).
+ """
+ if db_session is None or search_space_id is None or user_id is None:
+ return {
+ "status": "error",
+ "message": "Discord tool not properly configured.",
+ }
+
+ try:
+ connector = await get_discord_connector(
+ db_session, search_space_id, user_id
+ )
+ if not connector:
+ return {"status": "error", "message": "No Discord connector found."}
+
+ guild_id = get_guild_id(connector)
+ if not guild_id:
+ return {
+ "status": "error",
+ "message": "No guild ID in Discord connector config.",
+ }
+
+ token = get_bot_token(connector)
+
+ async with httpx.AsyncClient() as client:
+ resp = await client.get(
+ f"{DISCORD_API}/guilds/{guild_id}/channels",
+ headers={"Authorization": f"Bot {token}"},
+ timeout=15.0,
+ )
+
+ if resp.status_code == 401:
+ return {
+ "status": "auth_error",
+ "message": "Discord bot token is invalid.",
+ "connector_type": "discord",
+ }
+ if resp.status_code != 200:
+ return {
+ "status": "error",
+ "message": f"Discord API error: {resp.status_code}",
+ }
+
+ # Type 0 = text channel
+ channels = [
+ {"id": ch["id"], "name": ch["name"]}
+ for ch in resp.json()
+ if ch.get("type") == 0
+ ]
+ return {
+ "status": "success",
+ "guild_id": guild_id,
+ "channels": channels,
+ "total": len(channels),
+ }
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error("Error listing Discord channels: %s", e, exc_info=True)
+ return {"status": "error", "message": "Failed to list Discord channels."}
+
+ return list_discord_channels
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/read_messages.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/read_messages.py
new file mode 100644
index 000000000..d8bf989a1
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/read_messages.py
@@ -0,0 +1,100 @@
+import logging
+from typing import Any
+
+import httpx
+from langchain_core.tools import tool
+from sqlalchemy.ext.asyncio import AsyncSession
+
+from ._auth import DISCORD_API, get_bot_token, get_discord_connector
+
+logger = logging.getLogger(__name__)
+
+
+def create_read_discord_messages_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+):
+ @tool
+ async def read_discord_messages(
+ channel_id: str,
+ limit: int = 25,
+ ) -> dict[str, Any]:
+ """Read recent messages from a Discord text channel.
+
+ Args:
+ channel_id: The Discord channel ID (from list_discord_channels).
+ limit: Number of messages to fetch (default 25, max 50).
+
+ Returns:
+ Dictionary with status and a list of messages including
+ id, author, content, timestamp.
+ """
+ if db_session is None or search_space_id is None or user_id is None:
+ return {
+ "status": "error",
+ "message": "Discord tool not properly configured.",
+ }
+
+ limit = min(limit, 50)
+
+ try:
+ connector = await get_discord_connector(
+ db_session, search_space_id, user_id
+ )
+ if not connector:
+ return {"status": "error", "message": "No Discord connector found."}
+
+ token = get_bot_token(connector)
+
+ async with httpx.AsyncClient() as client:
+ resp = await client.get(
+ f"{DISCORD_API}/channels/{channel_id}/messages",
+ headers={"Authorization": f"Bot {token}"},
+ params={"limit": limit},
+ timeout=15.0,
+ )
+
+ if resp.status_code == 401:
+ return {
+ "status": "auth_error",
+ "message": "Discord bot token is invalid.",
+ "connector_type": "discord",
+ }
+ if resp.status_code == 403:
+ return {
+ "status": "error",
+ "message": "Bot lacks permission to read this channel.",
+ }
+ if resp.status_code != 200:
+ return {
+ "status": "error",
+ "message": f"Discord API error: {resp.status_code}",
+ }
+
+ messages = [
+ {
+ "id": m["id"],
+ "author": m.get("author", {}).get("username", "Unknown"),
+ "content": m.get("content", ""),
+ "timestamp": m.get("timestamp", ""),
+ }
+ for m in resp.json()
+ ]
+
+ return {
+ "status": "success",
+ "channel_id": channel_id,
+ "messages": messages,
+ "total": len(messages),
+ }
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error("Error reading Discord messages: %s", e, exc_info=True)
+ return {"status": "error", "message": "Failed to read Discord messages."}
+
+ return read_discord_messages
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/send_message.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/send_message.py
new file mode 100644
index 000000000..236cd017a
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/discord/tools/send_message.py
@@ -0,0 +1,117 @@
+import logging
+from typing import Any
+
+import httpx
+from langchain_core.tools import tool
+from sqlalchemy.ext.asyncio import AsyncSession
+
+from app.agents.new_chat.tools.hitl import request_approval
+
+from ._auth import DISCORD_API, get_bot_token, get_discord_connector
+
+logger = logging.getLogger(__name__)
+
+
+def create_send_discord_message_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+):
+ @tool
+ async def send_discord_message(
+ channel_id: str,
+ content: str,
+ ) -> dict[str, Any]:
+ """Send a message to a Discord text channel.
+
+ Args:
+ channel_id: The Discord channel ID (from list_discord_channels).
+ content: The message text (max 2000 characters).
+
+ Returns:
+ Dictionary with status, message_id on success.
+
+ IMPORTANT:
+ - If status is "rejected", the user explicitly declined. Do NOT retry.
+ """
+ if db_session is None or search_space_id is None or user_id is None:
+ return {
+ "status": "error",
+ "message": "Discord tool not properly configured.",
+ }
+
+ if len(content) > 2000:
+ return {
+ "status": "error",
+ "message": "Message exceeds Discord's 2000-character limit.",
+ }
+
+ try:
+ connector = await get_discord_connector(
+ db_session, search_space_id, user_id
+ )
+ if not connector:
+ return {"status": "error", "message": "No Discord connector found."}
+
+ result = request_approval(
+ action_type="discord_send_message",
+ tool_name="send_discord_message",
+ params={"channel_id": channel_id, "content": content},
+ context={"connector_id": connector.id},
+ )
+
+ if result.rejected:
+ return {
+ "status": "rejected",
+ "message": "User declined. Message was not sent.",
+ }
+
+ final_content = result.params.get("content", content)
+ final_channel = result.params.get("channel_id", channel_id)
+
+ token = get_bot_token(connector)
+
+ async with httpx.AsyncClient() as client:
+ resp = await client.post(
+ f"{DISCORD_API}/channels/{final_channel}/messages",
+ headers={
+ "Authorization": f"Bot {token}",
+ "Content-Type": "application/json",
+ },
+ json={"content": final_content},
+ timeout=15.0,
+ )
+
+ if resp.status_code == 401:
+ return {
+ "status": "auth_error",
+ "message": "Discord bot token is invalid.",
+ "connector_type": "discord",
+ }
+ if resp.status_code == 403:
+ return {
+ "status": "error",
+ "message": "Bot lacks permission to send messages in this channel.",
+ }
+ if resp.status_code not in (200, 201):
+ return {
+ "status": "error",
+ "message": f"Discord API error: {resp.status_code}",
+ }
+
+ msg_data = resp.json()
+ return {
+ "status": "success",
+ "message_id": msg_data.get("id"),
+ "message": f"Message sent to channel {final_channel}.",
+ }
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error("Error sending Discord message: %s", e, exc_info=True)
+ return {"status": "error", "message": "Failed to send Discord message."}
+
+ return send_discord_message
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/__init__.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/agent.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/agent.py
new file mode 100644
index 000000000..dc26c181b
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/agent.py
@@ -0,0 +1,54 @@
+"""`dropbox` route: ``SubAgent`` spec for deepagents."""
+
+from __future__ import annotations
+
+from collections.abc import Sequence
+from typing import Any
+
+from deepagents import SubAgent
+from langchain_core.language_models import BaseChatModel
+
+from app.agents.multi_agent_with_deepagents.subagents.shared.md_file_reader import (
+ read_md_file,
+)
+from app.agents.multi_agent_with_deepagents.subagents.shared.permissions import (
+ ToolsPermissions,
+ merge_tools_permissions,
+)
+from app.agents.multi_agent_with_deepagents.subagents.shared.subagent_builder import (
+ pack_subagent,
+)
+
+from .tools.index import load_tools
+
+NAME = "dropbox"
+
+
+def build_subagent(
+ *,
+ dependencies: dict[str, Any],
+ model: BaseChatModel | None = None,
+ extra_middleware: Sequence[Any] | None = None,
+ extra_tools_bucket: ToolsPermissions | None = None,
+) -> SubAgent:
+ buckets = load_tools(dependencies=dependencies)
+ merged_tools_bucket = merge_tools_permissions(buckets, extra_tools_bucket)
+ tools = [
+ row["tool"]
+ for row in (*merged_tools_bucket["allow"], *merged_tools_bucket["ask"])
+ if row.get("tool") is not None
+ ]
+ interrupt_on = {r["name"]: True for r in merged_tools_bucket["ask"] if r.get("name")}
+ description = read_md_file(__package__, "description").strip()
+ if not description:
+ description = "Handles dropbox tasks for this workspace."
+ system_prompt = read_md_file(__package__, "system_prompt").strip()
+ return pack_subagent(
+ name=NAME,
+ description=description,
+ system_prompt=system_prompt,
+ tools=tools,
+ interrupt_on=interrupt_on,
+ model=model,
+ extra_middleware=extra_middleware,
+ )
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/description.md b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/description.md
new file mode 100644
index 000000000..9c2575dd2
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/description.md
@@ -0,0 +1 @@
+Use for Dropbox file storage tasks: browse folders, read files, and manage Dropbox file content.
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/system_prompt.md b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/system_prompt.md
new file mode 100644
index 000000000..4b19be794
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/system_prompt.md
@@ -0,0 +1,52 @@
+You are the Dropbox operations sub-agent.
+You receive delegated instructions from a supervisor agent and return structured results for supervisor synthesis.
+
+
+Execute Dropbox file create/delete actions accurately in the connected account.
+
+
+
+- `create_dropbox_file`
+- `delete_dropbox_file`
+
+
+
+- Use only tools in ``.
+- Ensure target path/file identity is explicit before mutate actions.
+- If target is ambiguous, return `status=blocked` with candidate paths.
+- Never invent file IDs/paths or mutation outcomes.
+
+
+
+- Do not perform non-Dropbox tasks.
+
+
+
+- Never claim file mutation success without tool confirmation.
+
+
+
+- On tool failure, return `status=error` with concise recovery `next_step`.
+- On target ambiguity, return `status=blocked` with candidate paths.
+
+
+
+Return **only** one JSON object (no markdown/prose):
+{
+ "status": "success" | "partial" | "blocked" | "error",
+ "action_summary": string,
+ "evidence": {
+ "file_path": string | null,
+ "file_id": string | null,
+ "operation": "create" | "delete" | null,
+ "matched_candidates": string[] | null
+ },
+ "next_step": string | null,
+ "missing_fields": string[] | null,
+ "assumptions": string[] | null
+}
+Rules:
+- `status=success` -> `next_step=null`, `missing_fields=null`.
+- `status=partial|blocked|error` -> `next_step` must be non-null.
+- `status=blocked` due to missing required inputs -> `missing_fields` must be non-null.
+
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/tools/__init__.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/tools/__init__.py
new file mode 100644
index 000000000..836b9ee41
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/tools/__init__.py
@@ -0,0 +1,11 @@
+from app.agents.new_chat.tools.dropbox.create_file import (
+ create_create_dropbox_file_tool,
+)
+from app.agents.new_chat.tools.dropbox.trash_file import (
+ create_delete_dropbox_file_tool,
+)
+
+__all__ = [
+ "create_create_dropbox_file_tool",
+ "create_delete_dropbox_file_tool",
+]
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/tools/create_file.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/tools/create_file.py
new file mode 100644
index 000000000..22d8a8a27
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/tools/create_file.py
@@ -0,0 +1,275 @@
+import logging
+import os
+import tempfile
+from pathlib import Path
+from typing import Any, Literal
+
+from langchain_core.tools import tool
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.future import select
+
+from app.agents.new_chat.tools.hitl import request_approval
+from app.connectors.dropbox.client import DropboxClient
+from app.db import SearchSourceConnector, SearchSourceConnectorType
+
+logger = logging.getLogger(__name__)
+
+DOCX_MIME = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
+
+_FILE_TYPE_LABELS = {
+ "paper": "Dropbox Paper (.paper)",
+ "docx": "Word Document (.docx)",
+}
+
+_SUPPORTED_TYPES = [
+ {"value": "paper", "label": "Dropbox Paper (.paper)"},
+ {"value": "docx", "label": "Word Document (.docx)"},
+]
+
+
+def _ensure_extension(name: str, file_type: str) -> str:
+ """Strip any existing extension and append the correct one."""
+ stem = Path(name).stem
+ ext = ".paper" if file_type == "paper" else ".docx"
+ return f"{stem}{ext}"
+
+
+def _markdown_to_docx(markdown_text: str) -> bytes:
+ """Convert a markdown string to DOCX bytes using pypandoc."""
+ import pypandoc
+
+ fd, tmp_path = tempfile.mkstemp(suffix=".docx")
+ os.close(fd)
+ try:
+ pypandoc.convert_text(
+ markdown_text,
+ "docx",
+ format="gfm",
+ extra_args=["--standalone"],
+ outputfile=tmp_path,
+ )
+ with open(tmp_path, "rb") as f:
+ return f.read()
+ finally:
+ os.unlink(tmp_path)
+
+
+def create_create_dropbox_file_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+):
+ @tool
+ async def create_dropbox_file(
+ name: str,
+ file_type: Literal["paper", "docx"] = "paper",
+ content: str | None = None,
+ ) -> dict[str, Any]:
+ """Create a new document in Dropbox.
+
+ Use this tool when the user explicitly asks to create a new document
+ in Dropbox. The user MUST specify a topic before you call this tool.
+
+ Args:
+ name: The document title (without extension).
+ file_type: Either "paper" (Dropbox Paper, default) or "docx" (Word document).
+ content: Optional initial content as markdown.
+
+ Returns:
+ Dictionary with status, file_id, name, web_url, and message.
+ """
+ logger.info(
+ f"create_dropbox_file called: name='{name}', file_type='{file_type}'"
+ )
+
+ if db_session is None or search_space_id is None or user_id is None:
+ return {
+ "status": "error",
+ "message": "Dropbox tool not properly configured.",
+ }
+
+ try:
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.DROPBOX_CONNECTOR,
+ )
+ )
+ connectors = result.scalars().all()
+
+ if not connectors:
+ return {
+ "status": "error",
+ "message": "No Dropbox connector found. Please connect Dropbox in your workspace settings.",
+ }
+
+ accounts = []
+ for c in connectors:
+ cfg = c.config or {}
+ accounts.append(
+ {
+ "id": c.id,
+ "name": c.name,
+ "user_email": cfg.get("user_email"),
+ "auth_expired": cfg.get("auth_expired", False),
+ }
+ )
+
+ if all(a.get("auth_expired") for a in accounts):
+ return {
+ "status": "auth_error",
+ "message": "All connected Dropbox accounts need re-authentication.",
+ "connector_type": "dropbox",
+ }
+
+ parent_folders: dict[int, list[dict[str, str]]] = {}
+ for acc in accounts:
+ cid = acc["id"]
+ if acc.get("auth_expired"):
+ parent_folders[cid] = []
+ continue
+ try:
+ client = DropboxClient(session=db_session, connector_id=cid)
+ items, err = await client.list_folder("")
+ if err:
+ logger.warning(
+ "Failed to list folders for connector %s: %s", cid, err
+ )
+ parent_folders[cid] = []
+ else:
+ parent_folders[cid] = [
+ {
+ "folder_path": item.get("path_lower", ""),
+ "name": item["name"],
+ }
+ for item in items
+ if item.get(".tag") == "folder" and item.get("name")
+ ]
+ except Exception:
+ logger.warning(
+ "Error fetching folders for connector %s", cid, exc_info=True
+ )
+ parent_folders[cid] = []
+
+ context: dict[str, Any] = {
+ "accounts": accounts,
+ "parent_folders": parent_folders,
+ "supported_types": _SUPPORTED_TYPES,
+ }
+
+ result = request_approval(
+ action_type="dropbox_file_creation",
+ tool_name="create_dropbox_file",
+ params={
+ "name": name,
+ "file_type": file_type,
+ "content": content,
+ "connector_id": None,
+ "parent_folder_path": None,
+ },
+ context=context,
+ )
+
+ if result.rejected:
+ return {
+ "status": "rejected",
+ "message": "User declined. Do not retry or suggest alternatives.",
+ }
+
+ final_name = result.params.get("name", name)
+ final_file_type = result.params.get("file_type", file_type)
+ final_content = result.params.get("content", content)
+ final_connector_id = result.params.get("connector_id")
+ final_parent_folder_path = result.params.get("parent_folder_path")
+
+ if not final_name or not final_name.strip():
+ return {"status": "error", "message": "File name cannot be empty."}
+
+ final_name = _ensure_extension(final_name, final_file_type)
+
+ if final_connector_id is not None:
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == final_connector_id,
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.DROPBOX_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ else:
+ connector = connectors[0]
+
+ if not connector:
+ return {
+ "status": "error",
+ "message": "Selected Dropbox connector is invalid.",
+ }
+
+ client = DropboxClient(session=db_session, connector_id=connector.id)
+
+ parent_path = final_parent_folder_path or ""
+ file_path = (
+ f"{parent_path}/{final_name}" if parent_path else f"/{final_name}"
+ )
+
+ if final_file_type == "paper":
+ created = await client.create_paper_doc(file_path, final_content or "")
+ file_id = created.get("file_id", "")
+ web_url = created.get("url", "")
+ else:
+ docx_bytes = _markdown_to_docx(final_content or "")
+ created = await client.upload_file(
+ file_path, docx_bytes, mode="add", autorename=True
+ )
+ file_id = created.get("id", "")
+ web_url = ""
+
+ logger.info(f"Dropbox file created: id={file_id}, name={final_name}")
+
+ kb_message_suffix = ""
+ try:
+ from app.services.dropbox import DropboxKBSyncService
+
+ kb_service = DropboxKBSyncService(db_session)
+ kb_result = await kb_service.sync_after_create(
+ file_id=file_id,
+ file_name=final_name,
+ file_path=file_path,
+ web_url=web_url,
+ content=final_content,
+ connector_id=connector.id,
+ search_space_id=search_space_id,
+ user_id=user_id,
+ )
+ if kb_result["status"] == "success":
+ kb_message_suffix = " Your knowledge base has also been updated."
+ else:
+ kb_message_suffix = " This file will be added to your knowledge base in the next scheduled sync."
+ except Exception as kb_err:
+ logger.warning(f"KB sync after create failed: {kb_err}")
+ kb_message_suffix = " This file will be added to your knowledge base in the next scheduled sync."
+
+ return {
+ "status": "success",
+ "file_id": file_id,
+ "name": final_name,
+ "web_url": web_url,
+ "message": f"Successfully created '{final_name}' in Dropbox.{kb_message_suffix}",
+ }
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error(f"Error creating Dropbox file: {e}", exc_info=True)
+ return {
+ "status": "error",
+ "message": "Something went wrong while creating the file. Please try again.",
+ }
+
+ return create_dropbox_file
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/tools/index.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/tools/index.py
new file mode 100644
index 000000000..ba2c31f9a
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/tools/index.py
@@ -0,0 +1,28 @@
+from __future__ import annotations
+
+from typing import Any
+
+from app.agents.multi_agent_with_deepagents.subagents.shared.permissions import (
+ ToolsPermissions,
+)
+
+from .create_file import create_create_dropbox_file_tool
+from .trash_file import create_delete_dropbox_file_tool
+
+
+def load_tools(*, dependencies: dict[str, Any] | None = None, **kwargs: Any) -> ToolsPermissions:
+ d = {**(dependencies or {}), **kwargs}
+ common = {
+ "db_session": d["db_session"],
+ "search_space_id": d["search_space_id"],
+ "user_id": d["user_id"],
+ }
+ create = create_create_dropbox_file_tool(**common)
+ delete = create_delete_dropbox_file_tool(**common)
+ return {
+ "allow": [],
+ "ask": [
+ {"name": getattr(create, "name", "") or "", "tool": create},
+ {"name": getattr(delete, "name", "") or "", "tool": delete},
+ ],
+ }
diff --git a/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/tools/trash_file.py b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/tools/trash_file.py
new file mode 100644
index 000000000..12559b57a
--- /dev/null
+++ b/surfsense_backend/app/agents/multi_agent_with_deepagents/subagents/connectors/dropbox/tools/trash_file.py
@@ -0,0 +1,277 @@
+import logging
+from typing import Any
+
+from langchain_core.tools import tool
+from sqlalchemy import String, and_, cast, func
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.future import select
+
+from app.agents.new_chat.tools.hitl import request_approval
+from app.connectors.dropbox.client import DropboxClient
+from app.db import (
+ Document,
+ DocumentType,
+ SearchSourceConnector,
+ SearchSourceConnectorType,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def create_delete_dropbox_file_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+):
+ @tool
+ async def delete_dropbox_file(
+ file_name: str,
+ delete_from_kb: bool = False,
+ ) -> dict[str, Any]:
+ """Delete a file from Dropbox.
+
+ Use this tool when the user explicitly asks to delete, remove, or trash
+ a file in Dropbox.
+
+ Args:
+ file_name: The exact name of the file to delete.
+ delete_from_kb: Whether to also remove the file from the knowledge base.
+ Default is False.
+
+ Returns:
+ Dictionary with:
+ - status: "success", "rejected", "not_found", or "error"
+ - file_id: Dropbox file ID (if success)
+ - deleted_from_kb: whether the document was removed from the knowledge base
+ - message: Result message
+
+ IMPORTANT:
+ - If status is "rejected", the user explicitly declined. Respond with a brief
+ acknowledgment and do NOT retry or suggest alternatives.
+ - If status is "not_found", relay the exact message to the user and ask them
+ to verify the file name or check if it has been indexed.
+ """
+ logger.info(
+ f"delete_dropbox_file called: file_name='{file_name}', delete_from_kb={delete_from_kb}"
+ )
+
+ if db_session is None or search_space_id is None or user_id is None:
+ return {
+ "status": "error",
+ "message": "Dropbox tool not properly configured.",
+ }
+
+ try:
+ doc_result = await db_session.execute(
+ select(Document)
+ .join(
+ SearchSourceConnector,
+ Document.connector_id == SearchSourceConnector.id,
+ )
+ .filter(
+ and_(
+ Document.search_space_id == search_space_id,
+ Document.document_type == DocumentType.DROPBOX_FILE,
+ func.lower(Document.title) == func.lower(file_name),
+ SearchSourceConnector.user_id == user_id,
+ )
+ )
+ .order_by(Document.updated_at.desc().nullslast())
+ .limit(1)
+ )
+ document = doc_result.scalars().first()
+
+ if not document:
+ doc_result = await db_session.execute(
+ select(Document)
+ .join(
+ SearchSourceConnector,
+ Document.connector_id == SearchSourceConnector.id,
+ )
+ .filter(
+ and_(
+ Document.search_space_id == search_space_id,
+ Document.document_type == DocumentType.DROPBOX_FILE,
+ func.lower(
+ cast(
+ Document.document_metadata["dropbox_file_name"],
+ String,
+ )
+ )
+ == func.lower(file_name),
+ SearchSourceConnector.user_id == user_id,
+ )
+ )
+ .order_by(Document.updated_at.desc().nullslast())
+ .limit(1)
+ )
+ document = doc_result.scalars().first()
+
+ if not document:
+ return {
+ "status": "not_found",
+ "message": (
+ f"File '{file_name}' not found in your indexed Dropbox files. "
+ "This could mean: (1) the file doesn't exist, (2) it hasn't been indexed yet, "
+ "or (3) the file name is different."
+ ),
+ }
+
+ if not document.connector_id:
+ return {
+ "status": "error",
+ "message": "Document has no associated connector.",
+ }
+
+ meta = document.document_metadata or {}
+ file_path = meta.get("dropbox_path")
+ file_id = meta.get("dropbox_file_id")
+ document_id = document.id
+
+ if not file_path:
+ return {
+ "status": "error",
+ "message": "File path is missing. Please re-index the file.",
+ }
+
+ conn_result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ and_(
+ SearchSourceConnector.id == document.connector_id,
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.DROPBOX_CONNECTOR,
+ )
+ )
+ )
+ connector = conn_result.scalars().first()
+ if not connector:
+ return {
+ "status": "error",
+ "message": "Dropbox connector not found or access denied.",
+ }
+
+ cfg = connector.config or {}
+ if cfg.get("auth_expired"):
+ return {
+ "status": "auth_error",
+ "message": "Dropbox account needs re-authentication. Please re-authenticate in your connector settings.",
+ "connector_type": "dropbox",
+ }
+
+ context = {
+ "file": {
+ "file_id": file_id,
+ "file_path": file_path,
+ "name": file_name,
+ "document_id": document_id,
+ },
+ "account": {
+ "id": connector.id,
+ "name": connector.name,
+ "user_email": cfg.get("user_email"),
+ },
+ }
+
+ result = request_approval(
+ action_type="dropbox_file_trash",
+ tool_name="delete_dropbox_file",
+ params={
+ "file_path": file_path,
+ "connector_id": connector.id,
+ "delete_from_kb": delete_from_kb,
+ },
+ context=context,
+ )
+
+ if result.rejected:
+ return {
+ "status": "rejected",
+ "message": "User declined. Do not retry or suggest alternatives.",
+ }
+
+ final_file_path = result.params.get("file_path", file_path)
+ final_connector_id = result.params.get("connector_id", connector.id)
+ final_delete_from_kb = result.params.get("delete_from_kb", delete_from_kb)
+
+ if final_connector_id != connector.id:
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ and_(
+ SearchSourceConnector.id == final_connector_id,
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.DROPBOX_CONNECTOR,
+ )
+ )
+ )
+ validated_connector = result.scalars().first()
+ if not validated_connector:
+ return {
+ "status": "error",
+ "message": "Selected Dropbox connector is invalid or has been disconnected.",
+ }
+ actual_connector_id = validated_connector.id
+ else:
+ actual_connector_id = connector.id
+
+ logger.info(
+ f"Deleting Dropbox file: path='{final_file_path}', connector={actual_connector_id}"
+ )
+
+ client = DropboxClient(session=db_session, connector_id=actual_connector_id)
+ await client.delete_file(final_file_path)
+
+ logger.info(f"Dropbox file deleted: path={final_file_path}")
+
+ trash_result: dict[str, Any] = {
+ "status": "success",
+ "file_id": file_id,
+ "message": f"Successfully deleted '{file_name}' from Dropbox.",
+ }
+
+ deleted_from_kb = False
+ if final_delete_from_kb and document_id:
+ try:
+ doc_result = await db_session.execute(
+ select(Document).filter(Document.id == document_id)
+ )
+ doc = doc_result.scalars().first()
+ if doc:
+ await db_session.delete(doc)
+ await db_session.commit()
+ deleted_from_kb = True
+ logger.info(
+ f"Deleted document {document_id} from knowledge base"
+ )
+ else:
+ logger.warning(f"Document {document_id} not found in KB")
+ except Exception as e:
+ logger.error(f"Failed to delete document from KB: {e}")
+ await db_session.rollback()
+ trash_result["warning"] = (
+ f"File deleted, but failed to remove from knowledge base: {e!s}"
+ )
+
+ trash_result["deleted_from_kb"] = deleted_from_kb
+ if deleted_from_kb:
+ trash_result["message"] = (
+ f"{trash_result.get('message', '')} (also removed from knowledge base)"
+ )
+
+ return trash_result
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error(f"Error deleting Dropbox file: {e}", exc_info=True)
+ return {
+ "status": "error",
+ "message": "Something went wrong while deleting the file. Please try again.",
+ }
+
+ return delete_dropbox_file