diff --git a/surfsense_backend/app/agents/new_chat/chat_deepagent.py b/surfsense_backend/app/agents/new_chat/chat_deepagent.py
index 971472876..c1ad36252 100644
--- a/surfsense_backend/app/agents/new_chat/chat_deepagent.py
+++ b/surfsense_backend/app/agents/new_chat/chat_deepagent.py
@@ -335,6 +335,32 @@ async def create_surfsense_deep_agent(
]
modified_disabled_tools.extend(gmail_tools)
+ # Disable Jira action tools if no Jira connector is configured
+ has_jira_connector = (
+ available_connectors is not None
+ and "JIRA_CONNECTOR" in available_connectors
+ )
+ if not has_jira_connector:
+ jira_tools = [
+ "create_jira_issue",
+ "update_jira_issue",
+ "delete_jira_issue",
+ ]
+ modified_disabled_tools.extend(jira_tools)
+
+ # Disable Confluence action tools if no Confluence connector is configured
+ has_confluence_connector = (
+ available_connectors is not None
+ and "CONFLUENCE_CONNECTOR" in available_connectors
+ )
+ if not has_confluence_connector:
+ confluence_tools = [
+ "create_confluence_page",
+ "update_confluence_page",
+ "delete_confluence_page",
+ ]
+ modified_disabled_tools.extend(confluence_tools)
+
# Build tools using the async registry (includes MCP tools)
_t0 = time.perf_counter()
tools = await build_tools_async(
diff --git a/surfsense_backend/app/agents/new_chat/middleware/dedup_tool_calls.py b/surfsense_backend/app/agents/new_chat/middleware/dedup_tool_calls.py
index 085301570..5f1f864a0 100644
--- a/surfsense_backend/app/agents/new_chat/middleware/dedup_tool_calls.py
+++ b/surfsense_backend/app/agents/new_chat/middleware/dedup_tool_calls.py
@@ -30,6 +30,10 @@ _HITL_TOOL_DEDUP_KEYS: dict[str, str] = {
"update_notion_page": "page_title",
"delete_linear_issue": "issue_ref",
"update_linear_issue": "issue_ref",
+ "update_jira_issue": "issue_title_or_key",
+ "delete_jira_issue": "issue_title_or_key",
+ "update_confluence_page": "page_title_or_id",
+ "delete_confluence_page": "page_title_or_id",
}
diff --git a/surfsense_backend/app/agents/new_chat/tools/confluence/__init__.py b/surfsense_backend/app/agents/new_chat/tools/confluence/__init__.py
new file mode 100644
index 000000000..3bf80b61b
--- /dev/null
+++ b/surfsense_backend/app/agents/new_chat/tools/confluence/__init__.py
@@ -0,0 +1,11 @@
+"""Confluence tools for creating, updating, and deleting pages."""
+
+from .create_page import create_create_confluence_page_tool
+from .delete_page import create_delete_confluence_page_tool
+from .update_page import create_update_confluence_page_tool
+
+__all__ = [
+ "create_create_confluence_page_tool",
+ "create_delete_confluence_page_tool",
+ "create_update_confluence_page_tool",
+]
diff --git a/surfsense_backend/app/agents/new_chat/tools/confluence/create_page.py b/surfsense_backend/app/agents/new_chat/tools/confluence/create_page.py
new file mode 100644
index 000000000..6e69ca591
--- /dev/null
+++ b/surfsense_backend/app/agents/new_chat/tools/confluence/create_page.py
@@ -0,0 +1,197 @@
+import logging
+from typing import Any
+
+from langchain_core.tools import tool
+from langgraph.types import interrupt
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm.attributes import flag_modified
+
+from app.connectors.confluence_history import ConfluenceHistoryConnector
+from app.services.confluence import ConfluenceToolMetadataService
+
+logger = logging.getLogger(__name__)
+
+
+def create_create_confluence_page_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+ connector_id: int | None = None,
+):
+ @tool
+ async def create_confluence_page(
+ title: str,
+ content: str | None = None,
+ space_id: str | None = None,
+ ) -> dict[str, Any]:
+ """Create a new page in Confluence.
+
+ Use this tool when the user explicitly asks to create a new Confluence page.
+
+ Args:
+ title: Title of the page.
+ content: Optional HTML/storage format content for the page body.
+ space_id: Optional Confluence space ID to create the page in.
+
+ Returns:
+ Dictionary with status, page_id, and message.
+
+ IMPORTANT:
+ - If status is "rejected", do NOT retry.
+ - If status is "insufficient_permissions", inform user to re-authenticate.
+ """
+ logger.info(f"create_confluence_page called: title='{title}'")
+
+ if db_session is None or search_space_id is None or user_id is None:
+ return {"status": "error", "message": "Confluence tool not properly configured."}
+
+ try:
+ metadata_service = ConfluenceToolMetadataService(db_session)
+ context = await metadata_service.get_creation_context(search_space_id, user_id)
+
+ if "error" in context:
+ return {"status": "error", "message": context["error"]}
+
+ accounts = context.get("accounts", [])
+ if accounts and all(a.get("auth_expired") for a in accounts):
+ return {
+ "status": "auth_error",
+ "message": "All connected Confluence accounts need re-authentication.",
+ "connector_type": "confluence",
+ }
+
+ approval = interrupt({
+ "type": "confluence_page_creation",
+ "action": {
+ "tool": "create_confluence_page",
+ "params": {
+ "title": title,
+ "content": content,
+ "space_id": space_id,
+ "connector_id": connector_id,
+ },
+ },
+ "context": context,
+ })
+
+ decisions_raw = approval.get("decisions", []) if isinstance(approval, dict) else []
+ decisions = decisions_raw if isinstance(decisions_raw, list) else [decisions_raw]
+ decisions = [d for d in decisions if isinstance(d, dict)]
+ if not decisions:
+ return {"status": "error", "message": "No approval decision received"}
+
+ decision = decisions[0]
+ decision_type = decision.get("type") or decision.get("decision_type")
+
+ if decision_type == "reject":
+ return {"status": "rejected", "message": "User declined. The page was not created."}
+
+ final_params: dict[str, Any] = {}
+ edited_action = decision.get("edited_action")
+ if isinstance(edited_action, dict):
+ edited_args = edited_action.get("args")
+ if isinstance(edited_args, dict):
+ final_params = edited_args
+ elif isinstance(decision.get("args"), dict):
+ final_params = decision["args"]
+
+ final_title = final_params.get("title", title)
+ final_content = final_params.get("content", content) or ""
+ final_space_id = final_params.get("space_id", space_id)
+ final_connector_id = final_params.get("connector_id", connector_id)
+
+ if not final_title or not final_title.strip():
+ return {"status": "error", "message": "Page title cannot be empty."}
+ if not final_space_id:
+ return {"status": "error", "message": "A space must be selected."}
+
+ from sqlalchemy.future import select
+ from app.db import SearchSourceConnector, SearchSourceConnectorType
+
+ actual_connector_id = final_connector_id
+ if actual_connector_id is None:
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type == SearchSourceConnectorType.CONFLUENCE_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ return {"status": "error", "message": "No Confluence connector found."}
+ actual_connector_id = connector.id
+ else:
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == actual_connector_id,
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type == SearchSourceConnectorType.CONFLUENCE_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ return {"status": "error", "message": "Selected Confluence connector is invalid."}
+
+ try:
+ client = ConfluenceHistoryConnector(session=db_session, connector_id=actual_connector_id)
+ api_result = await client.create_page(
+ space_id=final_space_id,
+ title=final_title,
+ body=final_content,
+ )
+ await client.close()
+ except Exception as api_err:
+ if "http 403" in str(api_err).lower() or "status code 403" in str(api_err).lower():
+ try:
+ _conn = connector
+ _conn.config = {**_conn.config, "auth_expired": True}
+ flag_modified(_conn, "config")
+ await db_session.commit()
+ except Exception:
+ pass
+ return {
+ "status": "insufficient_permissions",
+ "connector_id": actual_connector_id,
+ "message": "This Confluence account needs additional permissions. Please re-authenticate in connector settings.",
+ }
+ raise
+
+ page_id = str(api_result.get("id", ""))
+
+ kb_message_suffix = ""
+ try:
+ from app.services.confluence import ConfluenceKBSyncService
+ kb_service = ConfluenceKBSyncService(db_session)
+ kb_result = await kb_service.sync_after_create(
+ page_id=page_id,
+ page_title=final_title,
+ space_id=final_space_id,
+ body_content=final_content,
+ connector_id=actual_connector_id,
+ search_space_id=search_space_id,
+ user_id=user_id,
+ )
+ if kb_result["status"] == "success":
+ kb_message_suffix = " Your knowledge base has also been updated."
+ else:
+ kb_message_suffix = " This page will be added to your knowledge base in the next scheduled sync."
+ except Exception as kb_err:
+ logger.warning(f"KB sync after create failed: {kb_err}")
+ kb_message_suffix = " This page will be added to your knowledge base in the next scheduled sync."
+
+ return {
+ "status": "success",
+ "page_id": page_id,
+ "message": f"Confluence page '{final_title}' created successfully.{kb_message_suffix}",
+ }
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error(f"Error creating Confluence page: {e}", exc_info=True)
+ return {"status": "error", "message": "Something went wrong while creating the page."}
+
+ return create_confluence_page
diff --git a/surfsense_backend/app/agents/new_chat/tools/confluence/delete_page.py b/surfsense_backend/app/agents/new_chat/tools/confluence/delete_page.py
new file mode 100644
index 000000000..e398aed32
--- /dev/null
+++ b/surfsense_backend/app/agents/new_chat/tools/confluence/delete_page.py
@@ -0,0 +1,179 @@
+import logging
+from typing import Any
+
+from langchain_core.tools import tool
+from langgraph.types import interrupt
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm.attributes import flag_modified
+
+from app.connectors.confluence_history import ConfluenceHistoryConnector
+from app.services.confluence import ConfluenceToolMetadataService
+
+logger = logging.getLogger(__name__)
+
+
+def create_delete_confluence_page_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+ connector_id: int | None = None,
+):
+ @tool
+ async def delete_confluence_page(
+ page_title_or_id: str,
+ delete_from_kb: bool = False,
+ ) -> dict[str, Any]:
+ """Delete a Confluence page.
+
+ Use this tool when the user asks to delete or remove a Confluence page.
+
+ Args:
+ page_title_or_id: The page title or ID to identify the page.
+ delete_from_kb: Whether to also remove from the knowledge base.
+
+ Returns:
+ Dictionary with status, message, and deleted_from_kb.
+
+ IMPORTANT:
+ - If status is "rejected", do NOT retry.
+ - If status is "not_found", relay the message to the user.
+ - If status is "insufficient_permissions", inform user to re-authenticate.
+ """
+ logger.info(f"delete_confluence_page called: page_title_or_id='{page_title_or_id}'")
+
+ if db_session is None or search_space_id is None or user_id is None:
+ return {"status": "error", "message": "Confluence tool not properly configured."}
+
+ try:
+ metadata_service = ConfluenceToolMetadataService(db_session)
+ context = await metadata_service.get_deletion_context(search_space_id, user_id, page_title_or_id)
+
+ if "error" in context:
+ error_msg = context["error"]
+ if context.get("auth_expired"):
+ return {
+ "status": "auth_error",
+ "message": error_msg,
+ "connector_id": context.get("connector_id"),
+ "connector_type": "confluence",
+ }
+ if "not found" in error_msg.lower():
+ return {"status": "not_found", "message": error_msg}
+ return {"status": "error", "message": error_msg}
+
+ page_data = context["page"]
+ page_id = page_data["page_id"]
+ page_title = page_data.get("page_title", "")
+ document_id = page_data["document_id"]
+ connector_id_from_context = context.get("account", {}).get("id")
+
+ approval = interrupt({
+ "type": "confluence_page_deletion",
+ "action": {
+ "tool": "delete_confluence_page",
+ "params": {
+ "page_id": page_id,
+ "connector_id": connector_id_from_context,
+ "delete_from_kb": delete_from_kb,
+ },
+ },
+ "context": context,
+ })
+
+ decisions_raw = approval.get("decisions", []) if isinstance(approval, dict) else []
+ decisions = decisions_raw if isinstance(decisions_raw, list) else [decisions_raw]
+ decisions = [d for d in decisions if isinstance(d, dict)]
+ if not decisions:
+ return {"status": "error", "message": "No approval decision received"}
+
+ decision = decisions[0]
+ decision_type = decision.get("type") or decision.get("decision_type")
+
+ if decision_type == "reject":
+ return {"status": "rejected", "message": "User declined. The page was not deleted."}
+
+ final_params: dict[str, Any] = {}
+ edited_action = decision.get("edited_action")
+ if isinstance(edited_action, dict):
+ edited_args = edited_action.get("args")
+ if isinstance(edited_args, dict):
+ final_params = edited_args
+ elif isinstance(decision.get("args"), dict):
+ final_params = decision["args"]
+
+ final_page_id = final_params.get("page_id", page_id)
+ final_connector_id = final_params.get("connector_id", connector_id_from_context)
+ final_delete_from_kb = final_params.get("delete_from_kb", delete_from_kb)
+
+ from sqlalchemy.future import select
+ from app.db import SearchSourceConnector, SearchSourceConnectorType
+
+ if not final_connector_id:
+ return {"status": "error", "message": "No connector found for this page."}
+
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == final_connector_id,
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type == SearchSourceConnectorType.CONFLUENCE_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ return {"status": "error", "message": "Selected Confluence connector is invalid."}
+
+ try:
+ client = ConfluenceHistoryConnector(session=db_session, connector_id=final_connector_id)
+ await client.delete_page(final_page_id)
+ await client.close()
+ except Exception as api_err:
+ if "http 403" in str(api_err).lower() or "status code 403" in str(api_err).lower():
+ try:
+ connector.config = {**connector.config, "auth_expired": True}
+ flag_modified(connector, "config")
+ await db_session.commit()
+ except Exception:
+ pass
+ return {
+ "status": "insufficient_permissions",
+ "connector_id": final_connector_id,
+ "message": "This Confluence account needs additional permissions. Please re-authenticate in connector settings.",
+ }
+ raise
+
+ deleted_from_kb = False
+ if final_delete_from_kb and document_id:
+ try:
+ from app.db import Document
+ doc_result = await db_session.execute(
+ select(Document).filter(Document.id == document_id)
+ )
+ document = doc_result.scalars().first()
+ if document:
+ await db_session.delete(document)
+ await db_session.commit()
+ deleted_from_kb = True
+ except Exception as e:
+ logger.error(f"Failed to delete document from KB: {e}")
+ await db_session.rollback()
+
+ message = f"Confluence page '{page_title}' deleted successfully."
+ if deleted_from_kb:
+ message += " Also removed from the knowledge base."
+
+ return {
+ "status": "success",
+ "page_id": final_page_id,
+ "deleted_from_kb": deleted_from_kb,
+ "message": message,
+ }
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error(f"Error deleting Confluence page: {e}", exc_info=True)
+ return {"status": "error", "message": "Something went wrong while deleting the page."}
+
+ return delete_confluence_page
diff --git a/surfsense_backend/app/agents/new_chat/tools/confluence/update_page.py b/surfsense_backend/app/agents/new_chat/tools/confluence/update_page.py
new file mode 100644
index 000000000..7c7be1c66
--- /dev/null
+++ b/surfsense_backend/app/agents/new_chat/tools/confluence/update_page.py
@@ -0,0 +1,194 @@
+import logging
+from typing import Any
+
+from langchain_core.tools import tool
+from langgraph.types import interrupt
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm.attributes import flag_modified
+
+from app.connectors.confluence_history import ConfluenceHistoryConnector
+from app.services.confluence import ConfluenceToolMetadataService
+
+logger = logging.getLogger(__name__)
+
+
+def create_update_confluence_page_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+ connector_id: int | None = None,
+):
+ @tool
+ async def update_confluence_page(
+ page_title_or_id: str,
+ new_title: str | None = None,
+ new_content: str | None = None,
+ ) -> dict[str, Any]:
+ """Update an existing Confluence page.
+
+ Use this tool when the user asks to modify or edit a Confluence page.
+
+ Args:
+ page_title_or_id: The page title or ID to identify the page.
+ new_title: Optional new title for the page.
+ new_content: Optional new HTML/storage format content.
+
+ Returns:
+ Dictionary with status and message.
+
+ IMPORTANT:
+ - If status is "rejected", do NOT retry.
+ - If status is "not_found", relay the message to the user.
+ - If status is "insufficient_permissions", inform user to re-authenticate.
+ """
+ logger.info(f"update_confluence_page called: page_title_or_id='{page_title_or_id}'")
+
+ if db_session is None or search_space_id is None or user_id is None:
+ return {"status": "error", "message": "Confluence tool not properly configured."}
+
+ try:
+ metadata_service = ConfluenceToolMetadataService(db_session)
+ context = await metadata_service.get_update_context(search_space_id, user_id, page_title_or_id)
+
+ if "error" in context:
+ error_msg = context["error"]
+ if context.get("auth_expired"):
+ return {
+ "status": "auth_error",
+ "message": error_msg,
+ "connector_id": context.get("connector_id"),
+ "connector_type": "confluence",
+ }
+ if "not found" in error_msg.lower():
+ return {"status": "not_found", "message": error_msg}
+ return {"status": "error", "message": error_msg}
+
+ page_data = context["page"]
+ page_id = page_data["page_id"]
+ current_title = page_data["page_title"]
+ current_body = page_data.get("body", "")
+ current_version = page_data.get("version", 1)
+ document_id = page_data.get("document_id")
+ connector_id_from_context = context.get("account", {}).get("id")
+
+ approval = interrupt({
+ "type": "confluence_page_update",
+ "action": {
+ "tool": "update_confluence_page",
+ "params": {
+ "page_id": page_id,
+ "document_id": document_id,
+ "new_title": new_title,
+ "new_content": new_content,
+ "version": current_version,
+ "connector_id": connector_id_from_context,
+ },
+ },
+ "context": context,
+ })
+
+ decisions_raw = approval.get("decisions", []) if isinstance(approval, dict) else []
+ decisions = decisions_raw if isinstance(decisions_raw, list) else [decisions_raw]
+ decisions = [d for d in decisions if isinstance(d, dict)]
+ if not decisions:
+ return {"status": "error", "message": "No approval decision received"}
+
+ decision = decisions[0]
+ decision_type = decision.get("type") or decision.get("decision_type")
+
+ if decision_type == "reject":
+ return {"status": "rejected", "message": "User declined. The page was not updated."}
+
+ final_params: dict[str, Any] = {}
+ edited_action = decision.get("edited_action")
+ if isinstance(edited_action, dict):
+ edited_args = edited_action.get("args")
+ if isinstance(edited_args, dict):
+ final_params = edited_args
+ elif isinstance(decision.get("args"), dict):
+ final_params = decision["args"]
+
+ final_page_id = final_params.get("page_id", page_id)
+ final_title = final_params.get("new_title", new_title) or current_title
+ final_content = final_params.get("new_content", new_content)
+ if final_content is None:
+ final_content = current_body
+ final_version = final_params.get("version", current_version)
+ final_connector_id = final_params.get("connector_id", connector_id_from_context)
+ final_document_id = final_params.get("document_id", document_id)
+
+ from sqlalchemy.future import select
+ from app.db import SearchSourceConnector, SearchSourceConnectorType
+
+ if not final_connector_id:
+ return {"status": "error", "message": "No connector found for this page."}
+
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == final_connector_id,
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type == SearchSourceConnectorType.CONFLUENCE_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ return {"status": "error", "message": "Selected Confluence connector is invalid."}
+
+ try:
+ client = ConfluenceHistoryConnector(session=db_session, connector_id=final_connector_id)
+ await client.update_page(
+ page_id=final_page_id,
+ title=final_title,
+ body=final_content,
+ version_number=final_version + 1,
+ )
+ await client.close()
+ except Exception as api_err:
+ if "http 403" in str(api_err).lower() or "status code 403" in str(api_err).lower():
+ try:
+ connector.config = {**connector.config, "auth_expired": True}
+ flag_modified(connector, "config")
+ await db_session.commit()
+ except Exception:
+ pass
+ return {
+ "status": "insufficient_permissions",
+ "connector_id": final_connector_id,
+ "message": "This Confluence account needs additional permissions. Please re-authenticate in connector settings.",
+ }
+ raise
+
+ kb_message_suffix = ""
+ if final_document_id:
+ try:
+ from app.services.confluence import ConfluenceKBSyncService
+ kb_service = ConfluenceKBSyncService(db_session)
+ kb_result = await kb_service.sync_after_update(
+ document_id=final_document_id,
+ page_id=final_page_id,
+ user_id=user_id,
+ search_space_id=search_space_id,
+ )
+ if kb_result["status"] == "success":
+ kb_message_suffix = " Your knowledge base has also been updated."
+ else:
+ kb_message_suffix = " The knowledge base will be updated in the next sync."
+ except Exception as kb_err:
+ logger.warning(f"KB sync after update failed: {kb_err}")
+ kb_message_suffix = " The knowledge base will be updated in the next sync."
+
+ return {
+ "status": "success",
+ "page_id": final_page_id,
+ "message": f"Confluence page '{final_title}' updated successfully.{kb_message_suffix}",
+ }
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error(f"Error updating Confluence page: {e}", exc_info=True)
+ return {"status": "error", "message": "Something went wrong while updating the page."}
+
+ return update_confluence_page
diff --git a/surfsense_backend/app/agents/new_chat/tools/jira/__init__.py b/surfsense_backend/app/agents/new_chat/tools/jira/__init__.py
new file mode 100644
index 000000000..768738118
--- /dev/null
+++ b/surfsense_backend/app/agents/new_chat/tools/jira/__init__.py
@@ -0,0 +1,11 @@
+"""Jira tools for creating, updating, and deleting issues."""
+
+from .create_issue import create_create_jira_issue_tool
+from .delete_issue import create_delete_jira_issue_tool
+from .update_issue import create_update_jira_issue_tool
+
+__all__ = [
+ "create_create_jira_issue_tool",
+ "create_delete_jira_issue_tool",
+ "create_update_jira_issue_tool",
+]
diff --git a/surfsense_backend/app/agents/new_chat/tools/jira/create_issue.py b/surfsense_backend/app/agents/new_chat/tools/jira/create_issue.py
new file mode 100644
index 000000000..d072254d4
--- /dev/null
+++ b/surfsense_backend/app/agents/new_chat/tools/jira/create_issue.py
@@ -0,0 +1,210 @@
+import asyncio
+import logging
+from typing import Any
+
+from langchain_core.tools import tool
+from langgraph.types import interrupt
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm.attributes import flag_modified
+
+from app.connectors.jira_history import JiraHistoryConnector
+from app.services.jira import JiraToolMetadataService
+
+logger = logging.getLogger(__name__)
+
+
+def create_create_jira_issue_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+ connector_id: int | None = None,
+):
+ @tool
+ async def create_jira_issue(
+ project_key: str,
+ summary: str,
+ issue_type: str = "Task",
+ description: str | None = None,
+ priority: str | None = None,
+ ) -> dict[str, Any]:
+ """Create a new issue in Jira.
+
+ Use this tool when the user explicitly asks to create a new Jira issue/ticket.
+
+ Args:
+ project_key: The Jira project key (e.g. "PROJ", "ENG").
+ summary: Short, descriptive issue title.
+ issue_type: Issue type (default "Task"). Others: "Bug", "Story", "Epic".
+ description: Optional description body for the issue.
+ priority: Optional priority name (e.g. "High", "Medium", "Low").
+
+ Returns:
+ Dictionary with status, issue_key, and message.
+
+ IMPORTANT:
+ - If status is "rejected", the user declined. Do NOT retry.
+ - If status is "insufficient_permissions", inform user to re-authenticate.
+ """
+ logger.info(f"create_jira_issue called: project_key='{project_key}', summary='{summary}'")
+
+ if db_session is None or search_space_id is None or user_id is None:
+ return {"status": "error", "message": "Jira tool not properly configured."}
+
+ try:
+ metadata_service = JiraToolMetadataService(db_session)
+ context = await metadata_service.get_creation_context(search_space_id, user_id)
+
+ if "error" in context:
+ return {"status": "error", "message": context["error"]}
+
+ accounts = context.get("accounts", [])
+ if accounts and all(a.get("auth_expired") for a in accounts):
+ return {
+ "status": "auth_error",
+ "message": "All connected Jira accounts need re-authentication.",
+ "connector_type": "jira",
+ }
+
+ approval = interrupt({
+ "type": "jira_issue_creation",
+ "action": {
+ "tool": "create_jira_issue",
+ "params": {
+ "project_key": project_key,
+ "summary": summary,
+ "issue_type": issue_type,
+ "description": description,
+ "priority": priority,
+ "connector_id": connector_id,
+ },
+ },
+ "context": context,
+ })
+
+ decisions_raw = approval.get("decisions", []) if isinstance(approval, dict) else []
+ decisions = decisions_raw if isinstance(decisions_raw, list) else [decisions_raw]
+ decisions = [d for d in decisions if isinstance(d, dict)]
+ if not decisions:
+ return {"status": "error", "message": "No approval decision received"}
+
+ decision = decisions[0]
+ decision_type = decision.get("type") or decision.get("decision_type")
+
+ if decision_type == "reject":
+ return {"status": "rejected", "message": "User declined. The issue was not created."}
+
+ final_params: dict[str, Any] = {}
+ edited_action = decision.get("edited_action")
+ if isinstance(edited_action, dict):
+ edited_args = edited_action.get("args")
+ if isinstance(edited_args, dict):
+ final_params = edited_args
+ elif isinstance(decision.get("args"), dict):
+ final_params = decision["args"]
+
+ final_project_key = final_params.get("project_key", project_key)
+ final_summary = final_params.get("summary", summary)
+ final_issue_type = final_params.get("issue_type", issue_type)
+ final_description = final_params.get("description", description)
+ final_priority = final_params.get("priority", priority)
+ final_connector_id = final_params.get("connector_id", connector_id)
+
+ if not final_summary or not final_summary.strip():
+ return {"status": "error", "message": "Issue summary cannot be empty."}
+ if not final_project_key:
+ return {"status": "error", "message": "A project must be selected."}
+
+ from sqlalchemy.future import select
+ from app.db import SearchSourceConnector, SearchSourceConnectorType
+
+ actual_connector_id = final_connector_id
+ if actual_connector_id is None:
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type == SearchSourceConnectorType.JIRA_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ return {"status": "error", "message": "No Jira connector found."}
+ actual_connector_id = connector.id
+ else:
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == actual_connector_id,
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type == SearchSourceConnectorType.JIRA_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ return {"status": "error", "message": "Selected Jira connector is invalid."}
+
+ try:
+ jira_history = JiraHistoryConnector(session=db_session, connector_id=actual_connector_id)
+ jira_client = await jira_history._get_jira_client()
+ api_result = await asyncio.to_thread(
+ jira_client.create_issue,
+ project_key=final_project_key,
+ summary=final_summary,
+ issue_type=final_issue_type,
+ description=final_description,
+ priority=final_priority,
+ )
+ except Exception as api_err:
+ if "status code 403" in str(api_err).lower():
+ try:
+ _conn = connector
+ _conn.config = {**_conn.config, "auth_expired": True}
+ flag_modified(_conn, "config")
+ await db_session.commit()
+ except Exception:
+ pass
+ return {
+ "status": "insufficient_permissions",
+ "connector_id": actual_connector_id,
+ "message": "This Jira account needs additional permissions. Please re-authenticate in connector settings.",
+ }
+ raise
+
+ issue_key = api_result.get("key", "")
+
+ kb_message_suffix = ""
+ try:
+ from app.services.jira import JiraKBSyncService
+ kb_service = JiraKBSyncService(db_session)
+ kb_result = await kb_service.sync_after_create(
+ issue_id=issue_key,
+ issue_identifier=issue_key,
+ issue_title=final_summary,
+ description=final_description,
+ state="To Do",
+ connector_id=actual_connector_id,
+ search_space_id=search_space_id,
+ user_id=user_id,
+ )
+ if kb_result["status"] == "success":
+ kb_message_suffix = " Your knowledge base has also been updated."
+ else:
+ kb_message_suffix = " This issue will be added to your knowledge base in the next scheduled sync."
+ except Exception as kb_err:
+ logger.warning(f"KB sync after create failed: {kb_err}")
+ kb_message_suffix = " This issue will be added to your knowledge base in the next scheduled sync."
+
+ return {
+ "status": "success",
+ "issue_key": issue_key,
+ "message": f"Jira issue {issue_key} created successfully.{kb_message_suffix}",
+ }
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error(f"Error creating Jira issue: {e}", exc_info=True)
+ return {"status": "error", "message": "Something went wrong while creating the issue."}
+
+ return create_jira_issue
diff --git a/surfsense_backend/app/agents/new_chat/tools/jira/delete_issue.py b/surfsense_backend/app/agents/new_chat/tools/jira/delete_issue.py
new file mode 100644
index 000000000..46e97d3d5
--- /dev/null
+++ b/surfsense_backend/app/agents/new_chat/tools/jira/delete_issue.py
@@ -0,0 +1,179 @@
+import asyncio
+import logging
+from typing import Any
+
+from langchain_core.tools import tool
+from langgraph.types import interrupt
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm.attributes import flag_modified
+
+from app.connectors.jira_history import JiraHistoryConnector
+from app.services.jira import JiraToolMetadataService
+
+logger = logging.getLogger(__name__)
+
+
+def create_delete_jira_issue_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+ connector_id: int | None = None,
+):
+ @tool
+ async def delete_jira_issue(
+ issue_title_or_key: str,
+ delete_from_kb: bool = False,
+ ) -> dict[str, Any]:
+ """Delete a Jira issue.
+
+ Use this tool when the user asks to delete or remove a Jira issue.
+
+ Args:
+ issue_title_or_key: The issue key (e.g. "PROJ-42") or title.
+ delete_from_kb: Whether to also remove from the knowledge base.
+
+ Returns:
+ Dictionary with status, message, and deleted_from_kb.
+
+ IMPORTANT:
+ - If status is "rejected", do NOT retry.
+ - If status is "not_found", relay the message to the user.
+ - If status is "insufficient_permissions", inform user to re-authenticate.
+ """
+ logger.info(f"delete_jira_issue called: issue_title_or_key='{issue_title_or_key}'")
+
+ if db_session is None or search_space_id is None or user_id is None:
+ return {"status": "error", "message": "Jira tool not properly configured."}
+
+ try:
+ metadata_service = JiraToolMetadataService(db_session)
+ context = await metadata_service.get_deletion_context(search_space_id, user_id, issue_title_or_key)
+
+ if "error" in context:
+ error_msg = context["error"]
+ if context.get("auth_expired"):
+ return {
+ "status": "auth_error",
+ "message": error_msg,
+ "connector_id": context.get("connector_id"),
+ "connector_type": "jira",
+ }
+ if "not found" in error_msg.lower():
+ return {"status": "not_found", "message": error_msg}
+ return {"status": "error", "message": error_msg}
+
+ issue_data = context["issue"]
+ issue_key = issue_data["issue_id"]
+ document_id = issue_data["document_id"]
+ connector_id_from_context = context.get("account", {}).get("id")
+
+ approval = interrupt({
+ "type": "jira_issue_deletion",
+ "action": {
+ "tool": "delete_jira_issue",
+ "params": {
+ "issue_key": issue_key,
+ "connector_id": connector_id_from_context,
+ "delete_from_kb": delete_from_kb,
+ },
+ },
+ "context": context,
+ })
+
+ decisions_raw = approval.get("decisions", []) if isinstance(approval, dict) else []
+ decisions = decisions_raw if isinstance(decisions_raw, list) else [decisions_raw]
+ decisions = [d for d in decisions if isinstance(d, dict)]
+ if not decisions:
+ return {"status": "error", "message": "No approval decision received"}
+
+ decision = decisions[0]
+ decision_type = decision.get("type") or decision.get("decision_type")
+
+ if decision_type == "reject":
+ return {"status": "rejected", "message": "User declined. The issue was not deleted."}
+
+ final_params: dict[str, Any] = {}
+ edited_action = decision.get("edited_action")
+ if isinstance(edited_action, dict):
+ edited_args = edited_action.get("args")
+ if isinstance(edited_args, dict):
+ final_params = edited_args
+ elif isinstance(decision.get("args"), dict):
+ final_params = decision["args"]
+
+ final_issue_key = final_params.get("issue_key", issue_key)
+ final_connector_id = final_params.get("connector_id", connector_id_from_context)
+ final_delete_from_kb = final_params.get("delete_from_kb", delete_from_kb)
+
+ from sqlalchemy.future import select
+ from app.db import SearchSourceConnector, SearchSourceConnectorType
+
+ if not final_connector_id:
+ return {"status": "error", "message": "No connector found for this issue."}
+
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == final_connector_id,
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type == SearchSourceConnectorType.JIRA_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ return {"status": "error", "message": "Selected Jira connector is invalid."}
+
+ try:
+ jira_history = JiraHistoryConnector(session=db_session, connector_id=final_connector_id)
+ jira_client = await jira_history._get_jira_client()
+ await asyncio.to_thread(jira_client.delete_issue, final_issue_key)
+ except Exception as api_err:
+ if "status code 403" in str(api_err).lower():
+ try:
+ connector.config = {**connector.config, "auth_expired": True}
+ flag_modified(connector, "config")
+ await db_session.commit()
+ except Exception:
+ pass
+ return {
+ "status": "insufficient_permissions",
+ "connector_id": final_connector_id,
+ "message": "This Jira account needs additional permissions. Please re-authenticate in connector settings.",
+ }
+ raise
+
+ deleted_from_kb = False
+ if final_delete_from_kb and document_id:
+ try:
+ from app.db import Document
+ doc_result = await db_session.execute(
+ select(Document).filter(Document.id == document_id)
+ )
+ document = doc_result.scalars().first()
+ if document:
+ await db_session.delete(document)
+ await db_session.commit()
+ deleted_from_kb = True
+ except Exception as e:
+ logger.error(f"Failed to delete document from KB: {e}")
+ await db_session.rollback()
+
+ message = f"Jira issue {final_issue_key} deleted successfully."
+ if deleted_from_kb:
+ message += " Also removed from the knowledge base."
+
+ return {
+ "status": "success",
+ "issue_key": final_issue_key,
+ "deleted_from_kb": deleted_from_kb,
+ "message": message,
+ }
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error(f"Error deleting Jira issue: {e}", exc_info=True)
+ return {"status": "error", "message": "Something went wrong while deleting the issue."}
+
+ return delete_jira_issue
diff --git a/surfsense_backend/app/agents/new_chat/tools/jira/update_issue.py b/surfsense_backend/app/agents/new_chat/tools/jira/update_issue.py
new file mode 100644
index 000000000..a3ffa3020
--- /dev/null
+++ b/surfsense_backend/app/agents/new_chat/tools/jira/update_issue.py
@@ -0,0 +1,202 @@
+import asyncio
+import logging
+from typing import Any
+
+from langchain_core.tools import tool
+from langgraph.types import interrupt
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm.attributes import flag_modified
+
+from app.connectors.jira_history import JiraHistoryConnector
+from app.services.jira import JiraToolMetadataService
+
+logger = logging.getLogger(__name__)
+
+
+def create_update_jira_issue_tool(
+ db_session: AsyncSession | None = None,
+ search_space_id: int | None = None,
+ user_id: str | None = None,
+ connector_id: int | None = None,
+):
+ @tool
+ async def update_jira_issue(
+ issue_title_or_key: str,
+ new_summary: str | None = None,
+ new_description: str | None = None,
+ new_priority: str | None = None,
+ ) -> dict[str, Any]:
+ """Update an existing Jira issue.
+
+ Use this tool when the user asks to modify, edit, or update a Jira issue.
+
+ Args:
+ issue_title_or_key: The issue key (e.g. "PROJ-42") or title to identify the issue.
+ new_summary: Optional new title/summary for the issue.
+ new_description: Optional new description.
+ new_priority: Optional new priority name.
+
+ Returns:
+ Dictionary with status and message.
+
+ IMPORTANT:
+ - If status is "rejected", do NOT retry.
+ - If status is "not_found", relay the message and ask user to verify.
+ - If status is "insufficient_permissions", inform user to re-authenticate.
+ """
+ logger.info(f"update_jira_issue called: issue_title_or_key='{issue_title_or_key}'")
+
+ if db_session is None or search_space_id is None or user_id is None:
+ return {"status": "error", "message": "Jira tool not properly configured."}
+
+ try:
+ metadata_service = JiraToolMetadataService(db_session)
+ context = await metadata_service.get_update_context(search_space_id, user_id, issue_title_or_key)
+
+ if "error" in context:
+ error_msg = context["error"]
+ if context.get("auth_expired"):
+ return {
+ "status": "auth_error",
+ "message": error_msg,
+ "connector_id": context.get("connector_id"),
+ "connector_type": "jira",
+ }
+ if "not found" in error_msg.lower():
+ return {"status": "not_found", "message": error_msg}
+ return {"status": "error", "message": error_msg}
+
+ issue_data = context["issue"]
+ issue_key = issue_data["issue_id"]
+ document_id = issue_data.get("document_id")
+ connector_id_from_context = context.get("account", {}).get("id")
+
+ approval = interrupt({
+ "type": "jira_issue_update",
+ "action": {
+ "tool": "update_jira_issue",
+ "params": {
+ "issue_key": issue_key,
+ "document_id": document_id,
+ "new_summary": new_summary,
+ "new_description": new_description,
+ "new_priority": new_priority,
+ "connector_id": connector_id_from_context,
+ },
+ },
+ "context": context,
+ })
+
+ decisions_raw = approval.get("decisions", []) if isinstance(approval, dict) else []
+ decisions = decisions_raw if isinstance(decisions_raw, list) else [decisions_raw]
+ decisions = [d for d in decisions if isinstance(d, dict)]
+ if not decisions:
+ return {"status": "error", "message": "No approval decision received"}
+
+ decision = decisions[0]
+ decision_type = decision.get("type") or decision.get("decision_type")
+
+ if decision_type == "reject":
+ return {"status": "rejected", "message": "User declined. The issue was not updated."}
+
+ final_params: dict[str, Any] = {}
+ edited_action = decision.get("edited_action")
+ if isinstance(edited_action, dict):
+ edited_args = edited_action.get("args")
+ if isinstance(edited_args, dict):
+ final_params = edited_args
+ elif isinstance(decision.get("args"), dict):
+ final_params = decision["args"]
+
+ final_issue_key = final_params.get("issue_key", issue_key)
+ final_summary = final_params.get("new_summary", new_summary)
+ final_description = final_params.get("new_description", new_description)
+ final_priority = final_params.get("new_priority", new_priority)
+ final_connector_id = final_params.get("connector_id", connector_id_from_context)
+ final_document_id = final_params.get("document_id", document_id)
+
+ from sqlalchemy.future import select
+ from app.db import SearchSourceConnector, SearchSourceConnectorType
+
+ if not final_connector_id:
+ return {"status": "error", "message": "No connector found for this issue."}
+
+ result = await db_session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == final_connector_id,
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type == SearchSourceConnectorType.JIRA_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ return {"status": "error", "message": "Selected Jira connector is invalid."}
+
+ fields: dict[str, Any] = {}
+ if final_summary:
+ fields["summary"] = final_summary
+ if final_description is not None:
+ fields["description"] = {
+ "type": "doc",
+ "version": 1,
+ "content": [{"type": "paragraph", "content": [{"type": "text", "text": final_description}]}],
+ }
+ if final_priority:
+ fields["priority"] = {"name": final_priority}
+
+ if not fields:
+ return {"status": "error", "message": "No changes specified."}
+
+ try:
+ jira_history = JiraHistoryConnector(session=db_session, connector_id=final_connector_id)
+ jira_client = await jira_history._get_jira_client()
+ await asyncio.to_thread(jira_client.update_issue, final_issue_key, fields)
+ except Exception as api_err:
+ if "status code 403" in str(api_err).lower():
+ try:
+ connector.config = {**connector.config, "auth_expired": True}
+ flag_modified(connector, "config")
+ await db_session.commit()
+ except Exception:
+ pass
+ return {
+ "status": "insufficient_permissions",
+ "connector_id": final_connector_id,
+ "message": "This Jira account needs additional permissions. Please re-authenticate in connector settings.",
+ }
+ raise
+
+ kb_message_suffix = ""
+ if final_document_id:
+ try:
+ from app.services.jira import JiraKBSyncService
+ kb_service = JiraKBSyncService(db_session)
+ kb_result = await kb_service.sync_after_update(
+ document_id=final_document_id,
+ issue_id=final_issue_key,
+ user_id=user_id,
+ search_space_id=search_space_id,
+ )
+ if kb_result["status"] == "success":
+ kb_message_suffix = " Your knowledge base has also been updated."
+ else:
+ kb_message_suffix = " The knowledge base will be updated in the next sync."
+ except Exception as kb_err:
+ logger.warning(f"KB sync after update failed: {kb_err}")
+ kb_message_suffix = " The knowledge base will be updated in the next sync."
+
+ return {
+ "status": "success",
+ "issue_key": final_issue_key,
+ "message": f"Jira issue {final_issue_key} updated successfully.{kb_message_suffix}",
+ }
+
+ except Exception as e:
+ from langgraph.errors import GraphInterrupt
+ if isinstance(e, GraphInterrupt):
+ raise
+ logger.error(f"Error updating Jira issue: {e}", exc_info=True)
+ return {"status": "error", "message": "Something went wrong while updating the issue."}
+
+ return update_jira_issue
diff --git a/surfsense_backend/app/agents/new_chat/tools/registry.py b/surfsense_backend/app/agents/new_chat/tools/registry.py
index 13f396db9..9374fd9d0 100644
--- a/surfsense_backend/app/agents/new_chat/tools/registry.py
+++ b/surfsense_backend/app/agents/new_chat/tools/registry.py
@@ -70,6 +70,16 @@ from .linear import (
)
from .link_preview import create_link_preview_tool
from .mcp_tool import load_mcp_tools
+from .jira import (
+ create_create_jira_issue_tool,
+ create_delete_jira_issue_tool,
+ create_update_jira_issue_tool,
+)
+from .confluence import (
+ create_create_confluence_page_tool,
+ create_delete_confluence_page_tool,
+ create_update_confluence_page_tool,
+)
from .notion import (
create_create_notion_page_tool,
create_delete_notion_page_tool,
@@ -425,6 +435,74 @@ BUILTIN_TOOLS: list[ToolDefinition] = [
),
requires=["db_session", "search_space_id", "user_id"],
),
+ # =========================================================================
+ # JIRA TOOLS - create, update, delete issues
+ # Auto-disabled when no Jira connector is configured (see chat_deepagent.py)
+ # =========================================================================
+ ToolDefinition(
+ name="create_jira_issue",
+ description="Create a new issue in the user's Jira project",
+ factory=lambda deps: create_create_jira_issue_tool(
+ db_session=deps["db_session"],
+ search_space_id=deps["search_space_id"],
+ user_id=deps["user_id"],
+ ),
+ requires=["db_session", "search_space_id", "user_id"],
+ ),
+ ToolDefinition(
+ name="update_jira_issue",
+ description="Update an existing indexed Jira issue",
+ factory=lambda deps: create_update_jira_issue_tool(
+ db_session=deps["db_session"],
+ search_space_id=deps["search_space_id"],
+ user_id=deps["user_id"],
+ ),
+ requires=["db_session", "search_space_id", "user_id"],
+ ),
+ ToolDefinition(
+ name="delete_jira_issue",
+ description="Delete an existing indexed Jira issue",
+ factory=lambda deps: create_delete_jira_issue_tool(
+ db_session=deps["db_session"],
+ search_space_id=deps["search_space_id"],
+ user_id=deps["user_id"],
+ ),
+ requires=["db_session", "search_space_id", "user_id"],
+ ),
+ # =========================================================================
+ # CONFLUENCE TOOLS - create, update, delete pages
+ # Auto-disabled when no Confluence connector is configured (see chat_deepagent.py)
+ # =========================================================================
+ ToolDefinition(
+ name="create_confluence_page",
+ description="Create a new page in the user's Confluence space",
+ factory=lambda deps: create_create_confluence_page_tool(
+ db_session=deps["db_session"],
+ search_space_id=deps["search_space_id"],
+ user_id=deps["user_id"],
+ ),
+ requires=["db_session", "search_space_id", "user_id"],
+ ),
+ ToolDefinition(
+ name="update_confluence_page",
+ description="Update an existing indexed Confluence page",
+ factory=lambda deps: create_update_confluence_page_tool(
+ db_session=deps["db_session"],
+ search_space_id=deps["search_space_id"],
+ user_id=deps["user_id"],
+ ),
+ requires=["db_session", "search_space_id", "user_id"],
+ ),
+ ToolDefinition(
+ name="delete_confluence_page",
+ description="Delete an existing indexed Confluence page",
+ factory=lambda deps: create_delete_confluence_page_tool(
+ db_session=deps["db_session"],
+ search_space_id=deps["search_space_id"],
+ user_id=deps["user_id"],
+ ),
+ requires=["db_session", "search_space_id", "user_id"],
+ ),
]
diff --git a/surfsense_backend/app/connectors/confluence_history.py b/surfsense_backend/app/connectors/confluence_history.py
index 5d19edc54..927ebffeb 100644
--- a/surfsense_backend/app/connectors/confluence_history.py
+++ b/surfsense_backend/app/connectors/confluence_history.py
@@ -341,6 +341,65 @@ class ConfluenceHistoryConnector:
logger.error(f"Confluence API request error: {e!s}", exc_info=True)
raise Exception(f"Confluence API request failed: {e!s}") from e
+ async def _make_api_request_with_method(
+ self,
+ endpoint: str,
+ method: str = "GET",
+ json_payload: dict[str, Any] | None = None,
+ params: dict[str, Any] | None = None,
+ ) -> dict[str, Any]:
+ """Make a request to the Confluence API with a specified HTTP method."""
+ if not self._use_oauth:
+ raise ValueError("Write operations require OAuth authentication")
+
+ token = await self._get_valid_token()
+ base_url = await self._get_base_url()
+ http_client = await self._get_client()
+
+ url = f"{base_url}/wiki/api/v2/{endpoint}"
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {token}",
+ "Accept": "application/json",
+ }
+
+ try:
+ method_upper = method.upper()
+ if method_upper == "POST":
+ response = await http_client.post(
+ url, headers=headers, json=json_payload, params=params
+ )
+ elif method_upper == "PUT":
+ response = await http_client.put(
+ url, headers=headers, json=json_payload, params=params
+ )
+ elif method_upper == "DELETE":
+ response = await http_client.delete(
+ url, headers=headers, params=params
+ )
+ else:
+ response = await http_client.get(
+ url, headers=headers, params=params
+ )
+
+ response.raise_for_status()
+ if response.status_code == 204 or not response.text:
+ return {"status": "success"}
+ return response.json()
+ except httpx.HTTPStatusError as e:
+ error_detail = {
+ "status_code": e.response.status_code,
+ "url": str(e.request.url),
+ "response_text": e.response.text,
+ }
+ logger.error(f"Confluence API HTTP error: {error_detail}")
+ raise Exception(
+ f"Confluence API request failed (HTTP {e.response.status_code}): {e.response.text}"
+ ) from e
+ except httpx.RequestError as e:
+ logger.error(f"Confluence API request error: {e!s}", exc_info=True)
+ raise Exception(f"Confluence API request failed: {e!s}") from e
+
async def get_all_spaces(self) -> list[dict[str, Any]]:
"""
Fetch all spaces from Confluence.
@@ -593,6 +652,65 @@ class ConfluenceHistoryConnector:
except Exception as e:
return [], f"Error fetching pages: {e!s}"
+ async def get_page(self, page_id: str) -> dict[str, Any]:
+ """Fetch a single page by ID with body content."""
+ return await self._make_api_request(
+ f"pages/{page_id}", params={"body-format": "storage"}
+ )
+
+ async def create_page(
+ self,
+ space_id: str,
+ title: str,
+ body: str,
+ parent_page_id: str | None = None,
+ ) -> dict[str, Any]:
+ """Create a new Confluence page."""
+ payload: dict[str, Any] = {
+ "spaceId": space_id,
+ "title": title,
+ "body": {
+ "representation": "storage",
+ "value": body,
+ },
+ "status": "current",
+ }
+ if parent_page_id:
+ payload["parentId"] = parent_page_id
+ return await self._make_api_request_with_method(
+ "pages", method="POST", json_payload=payload
+ )
+
+ async def update_page(
+ self,
+ page_id: str,
+ title: str,
+ body: str,
+ version_number: int,
+ ) -> dict[str, Any]:
+ """Update an existing Confluence page (requires version number)."""
+ payload: dict[str, Any] = {
+ "id": page_id,
+ "title": title,
+ "body": {
+ "representation": "storage",
+ "value": body,
+ },
+ "version": {
+ "number": version_number,
+ },
+ "status": "current",
+ }
+ return await self._make_api_request_with_method(
+ f"pages/{page_id}", method="PUT", json_payload=payload
+ )
+
+ async def delete_page(self, page_id: str) -> dict[str, Any]:
+ """Delete a Confluence page."""
+ return await self._make_api_request_with_method(
+ f"pages/{page_id}", method="DELETE"
+ )
+
async def close(self):
"""Close the HTTP client connection."""
if self._http_client:
diff --git a/surfsense_backend/app/connectors/jira_connector.py b/surfsense_backend/app/connectors/jira_connector.py
index 370460e04..772b9404c 100644
--- a/surfsense_backend/app/connectors/jira_connector.py
+++ b/surfsense_backend/app/connectors/jira_connector.py
@@ -167,14 +167,23 @@ class JiraConnector:
# Use direct base URL (works for both OAuth and legacy)
url = f"{self.base_url}/rest/api/{self.api_version}/{endpoint}"
- if method.upper() == "POST":
+ method_upper = method.upper()
+ if method_upper == "POST":
response = requests.post(
url, headers=headers, json=json_payload, timeout=500
)
+ elif method_upper == "PUT":
+ response = requests.put(
+ url, headers=headers, json=json_payload, timeout=500
+ )
+ elif method_upper == "DELETE":
+ response = requests.delete(url, headers=headers, params=params, timeout=500)
else:
response = requests.get(url, headers=headers, params=params, timeout=500)
- if response.status_code == 200:
+ if response.status_code in (200, 201, 204):
+ if response.status_code == 204 or not response.text:
+ return {"status": "success"}
return response.json()
else:
raise Exception(
@@ -352,6 +361,91 @@ class JiraConnector:
except Exception as e:
return [], f"Error fetching issues: {e!s}"
+ def get_myself(self) -> dict[str, Any]:
+ """Fetch the current user's profile (health check)."""
+ return self.make_api_request("myself")
+
+ def get_projects(self) -> list[dict[str, Any]]:
+ """Fetch all projects the user has access to."""
+ result = self.make_api_request("project/search")
+ return result.get("values", [])
+
+ def get_issue_types(self) -> list[dict[str, Any]]:
+ """Fetch all issue types."""
+ return self.make_api_request("issuetype")
+
+ def get_priorities(self) -> list[dict[str, Any]]:
+ """Fetch all priority levels."""
+ return self.make_api_request("priority")
+
+ def get_issue(self, issue_id_or_key: str) -> dict[str, Any]:
+ """Fetch a single issue by ID or key."""
+ return self.make_api_request(f"issue/{issue_id_or_key}")
+
+ def create_issue(
+ self,
+ project_key: str,
+ summary: str,
+ issue_type: str = "Task",
+ description: str | None = None,
+ priority: str | None = None,
+ assignee_id: str | None = None,
+ ) -> dict[str, Any]:
+ """Create a new Jira issue."""
+ fields: dict[str, Any] = {
+ "project": {"key": project_key},
+ "summary": summary,
+ "issuetype": {"name": issue_type},
+ }
+ if description:
+ fields["description"] = {
+ "type": "doc",
+ "version": 1,
+ "content": [
+ {
+ "type": "paragraph",
+ "content": [{"type": "text", "text": description}],
+ }
+ ],
+ }
+ if priority:
+ fields["priority"] = {"name": priority}
+ if assignee_id:
+ fields["assignee"] = {"accountId": assignee_id}
+
+ return self.make_api_request(
+ "issue", method="POST", json_payload={"fields": fields}
+ )
+
+ def update_issue(
+ self, issue_id_or_key: str, fields: dict[str, Any]
+ ) -> dict[str, Any]:
+ """Update an existing Jira issue fields."""
+ return self.make_api_request(
+ f"issue/{issue_id_or_key}",
+ method="PUT",
+ json_payload={"fields": fields},
+ )
+
+ def delete_issue(self, issue_id_or_key: str) -> dict[str, Any]:
+ """Delete a Jira issue."""
+ return self.make_api_request(f"issue/{issue_id_or_key}", method="DELETE")
+
+ def get_transitions(self, issue_id_or_key: str) -> list[dict[str, Any]]:
+ """Get available transitions for an issue (for status changes)."""
+ result = self.make_api_request(f"issue/{issue_id_or_key}/transitions")
+ return result.get("transitions", [])
+
+ def transition_issue(
+ self, issue_id_or_key: str, transition_id: str
+ ) -> dict[str, Any]:
+ """Transition an issue to a new status."""
+ return self.make_api_request(
+ f"issue/{issue_id_or_key}/transitions",
+ method="POST",
+ json_payload={"transition": {"id": transition_id}},
+ )
+
def format_issue(self, issue: dict[str, Any]) -> dict[str, Any]:
"""
Format an issue for easier consumption.
diff --git a/surfsense_backend/app/routes/confluence_add_connector_route.py b/surfsense_backend/app/routes/confluence_add_connector_route.py
index f50383860..42235e240 100644
--- a/surfsense_backend/app/routes/confluence_add_connector_route.py
+++ b/surfsense_backend/app/routes/confluence_add_connector_route.py
@@ -46,6 +46,8 @@ SCOPES = [
"read:space:confluence",
"read:page:confluence",
"read:comment:confluence",
+ "write:page:confluence", # Required for creating/updating pages
+ "delete:page:confluence", # Required for deleting pages
"offline_access", # Required for refresh tokens
]
@@ -196,6 +198,8 @@ async def confluence_callback(
user_id = UUID(data["user_id"])
space_id = data["space_id"]
+ reauth_connector_id = data.get("connector_id")
+ reauth_return_url = data.get("return_url")
# Validate redirect URI (security: ensure it matches configured value)
if not config.CONFLUENCE_REDIRECT_URI:
@@ -292,6 +296,46 @@ async def confluence_callback(
"_token_encrypted": True,
}
+ # Handle re-authentication: update existing connector instead of creating new one
+ if reauth_connector_id:
+ from sqlalchemy.future import select as sa_select
+ from sqlalchemy.orm.attributes import flag_modified
+
+ result = await session.execute(
+ sa_select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == reauth_connector_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.search_space_id == space_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.CONFLUENCE_CONNECTOR,
+ )
+ )
+ db_connector = result.scalars().first()
+ if not db_connector:
+ raise HTTPException(
+ status_code=404,
+ detail="Connector not found or access denied during re-auth",
+ )
+
+ db_connector.config = {
+ **connector_config,
+ "auth_expired": False,
+ }
+ flag_modified(db_connector, "config")
+ await session.commit()
+ await session.refresh(db_connector)
+
+ logger.info(
+ f"Re-authenticated Confluence connector {db_connector.id} for user {user_id}"
+ )
+ if reauth_return_url and reauth_return_url.startswith("/"):
+ return RedirectResponse(
+ url=f"{config.NEXT_FRONTEND_URL}{reauth_return_url}?reauth=success&connector=confluence-connector"
+ )
+ return RedirectResponse(
+ url=f"{config.NEXT_FRONTEND_URL}/dashboard/{space_id}/connectors/callback?reauth=success&connector=confluence-connector"
+ )
+
# Extract unique identifier from connector credentials
connector_identifier = extract_identifier_from_credentials(
SearchSourceConnectorType.CONFLUENCE_CONNECTOR, connector_config
@@ -372,6 +416,73 @@ async def confluence_callback(
) from e
+@router.get("/auth/confluence/connector/reauth")
+async def reauth_confluence(
+ space_id: int,
+ connector_id: int,
+ return_url: str | None = None,
+ user: User = Depends(current_active_user),
+ session: AsyncSession = Depends(get_async_session),
+):
+ """Initiate Confluence re-authentication to upgrade OAuth scopes."""
+ try:
+ from sqlalchemy.future import select
+
+ result = await session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == connector_id,
+ SearchSourceConnector.user_id == user.id,
+ SearchSourceConnector.search_space_id == space_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.CONFLUENCE_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ raise HTTPException(
+ status_code=404,
+ detail="Confluence connector not found or access denied",
+ )
+
+ if not config.SECRET_KEY:
+ raise HTTPException(
+ status_code=500, detail="SECRET_KEY not configured for OAuth security."
+ )
+
+ state_manager = get_state_manager()
+ extra: dict = {"connector_id": connector_id}
+ if return_url and return_url.startswith("/"):
+ extra["return_url"] = return_url
+ state_encoded = state_manager.generate_secure_state(space_id, user.id, **extra)
+
+ from urllib.parse import urlencode
+
+ auth_params = {
+ "audience": "api.atlassian.com",
+ "client_id": config.ATLASSIAN_CLIENT_ID,
+ "scope": " ".join(SCOPES),
+ "redirect_uri": config.CONFLUENCE_REDIRECT_URI,
+ "state": state_encoded,
+ "response_type": "code",
+ "prompt": "consent",
+ }
+
+ auth_url = f"{AUTHORIZATION_URL}?{urlencode(auth_params)}"
+
+ logger.info(
+ f"Initiating Confluence re-auth for user {user.id}, connector {connector_id}"
+ )
+ return {"auth_url": auth_url}
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Failed to initiate Confluence re-auth: {e!s}", exc_info=True)
+ raise HTTPException(
+ status_code=500, detail=f"Failed to initiate Confluence re-auth: {e!s}"
+ ) from e
+
+
async def refresh_confluence_token(
session: AsyncSession, connector: SearchSourceConnector
) -> SearchSourceConnector:
diff --git a/surfsense_backend/app/routes/jira_add_connector_route.py b/surfsense_backend/app/routes/jira_add_connector_route.py
index 90267bbab..6cd6283d7 100644
--- a/surfsense_backend/app/routes/jira_add_connector_route.py
+++ b/surfsense_backend/app/routes/jira_add_connector_route.py
@@ -45,6 +45,7 @@ ACCESSIBLE_RESOURCES_URL = "https://api.atlassian.com/oauth/token/accessible-res
SCOPES = [
"read:jira-work",
"read:jira-user",
+ "write:jira-work", # Required for creating/updating/deleting issues
"offline_access", # Required for refresh tokens
]
@@ -193,6 +194,8 @@ async def jira_callback(
user_id = UUID(data["user_id"])
space_id = data["space_id"]
+ reauth_connector_id = data.get("connector_id")
+ reauth_return_url = data.get("return_url")
# Validate redirect URI (security: ensure it matches configured value)
if not config.JIRA_REDIRECT_URI:
@@ -310,6 +313,46 @@ async def jira_callback(
"_token_encrypted": True,
}
+ # Handle re-authentication: update existing connector instead of creating new one
+ if reauth_connector_id:
+ from sqlalchemy.future import select as sa_select
+ from sqlalchemy.orm.attributes import flag_modified
+
+ result = await session.execute(
+ sa_select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == reauth_connector_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.search_space_id == space_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.JIRA_CONNECTOR,
+ )
+ )
+ db_connector = result.scalars().first()
+ if not db_connector:
+ raise HTTPException(
+ status_code=404,
+ detail="Connector not found or access denied during re-auth",
+ )
+
+ db_connector.config = {
+ **connector_config,
+ "auth_expired": False,
+ }
+ flag_modified(db_connector, "config")
+ await session.commit()
+ await session.refresh(db_connector)
+
+ logger.info(
+ f"Re-authenticated Jira connector {db_connector.id} for user {user_id}"
+ )
+ if reauth_return_url and reauth_return_url.startswith("/"):
+ return RedirectResponse(
+ url=f"{config.NEXT_FRONTEND_URL}{reauth_return_url}?reauth=success&connector=jira-connector"
+ )
+ return RedirectResponse(
+ url=f"{config.NEXT_FRONTEND_URL}/dashboard/{space_id}/connectors/callback?reauth=success&connector=jira-connector"
+ )
+
# Extract unique identifier from connector credentials
connector_identifier = extract_identifier_from_credentials(
SearchSourceConnectorType.JIRA_CONNECTOR, connector_config
@@ -390,6 +433,73 @@ async def jira_callback(
) from e
+@router.get("/auth/jira/connector/reauth")
+async def reauth_jira(
+ space_id: int,
+ connector_id: int,
+ return_url: str | None = None,
+ user: User = Depends(current_active_user),
+ session: AsyncSession = Depends(get_async_session),
+):
+ """Initiate Jira re-authentication to upgrade OAuth scopes."""
+ try:
+ from sqlalchemy.future import select
+
+ result = await session.execute(
+ select(SearchSourceConnector).filter(
+ SearchSourceConnector.id == connector_id,
+ SearchSourceConnector.user_id == user.id,
+ SearchSourceConnector.search_space_id == space_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.JIRA_CONNECTOR,
+ )
+ )
+ connector = result.scalars().first()
+ if not connector:
+ raise HTTPException(
+ status_code=404,
+ detail="Jira connector not found or access denied",
+ )
+
+ if not config.SECRET_KEY:
+ raise HTTPException(
+ status_code=500, detail="SECRET_KEY not configured for OAuth security."
+ )
+
+ state_manager = get_state_manager()
+ extra: dict = {"connector_id": connector_id}
+ if return_url and return_url.startswith("/"):
+ extra["return_url"] = return_url
+ state_encoded = state_manager.generate_secure_state(space_id, user.id, **extra)
+
+ from urllib.parse import urlencode
+
+ auth_params = {
+ "audience": "api.atlassian.com",
+ "client_id": config.ATLASSIAN_CLIENT_ID,
+ "scope": " ".join(SCOPES),
+ "redirect_uri": config.JIRA_REDIRECT_URI,
+ "state": state_encoded,
+ "response_type": "code",
+ "prompt": "consent",
+ }
+
+ auth_url = f"{AUTHORIZATION_URL}?{urlencode(auth_params)}"
+
+ logger.info(
+ f"Initiating Jira re-auth for user {user.id}, connector {connector_id}"
+ )
+ return {"auth_url": auth_url}
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Failed to initiate Jira re-auth: {e!s}", exc_info=True)
+ raise HTTPException(
+ status_code=500, detail=f"Failed to initiate Jira re-auth: {e!s}"
+ ) from e
+
+
async def refresh_jira_token(
session: AsyncSession, connector: SearchSourceConnector
) -> SearchSourceConnector:
diff --git a/surfsense_backend/app/services/confluence/__init__.py b/surfsense_backend/app/services/confluence/__init__.py
new file mode 100644
index 000000000..aeedd384a
--- /dev/null
+++ b/surfsense_backend/app/services/confluence/__init__.py
@@ -0,0 +1,13 @@
+from app.services.confluence.kb_sync_service import ConfluenceKBSyncService
+from app.services.confluence.tool_metadata_service import (
+ ConfluencePage,
+ ConfluenceToolMetadataService,
+ ConfluenceWorkspace,
+)
+
+__all__ = [
+ "ConfluenceKBSyncService",
+ "ConfluencePage",
+ "ConfluenceToolMetadataService",
+ "ConfluenceWorkspace",
+]
diff --git a/surfsense_backend/app/services/confluence/kb_sync_service.py b/surfsense_backend/app/services/confluence/kb_sync_service.py
new file mode 100644
index 000000000..a63a22f3f
--- /dev/null
+++ b/surfsense_backend/app/services/confluence/kb_sync_service.py
@@ -0,0 +1,225 @@
+import logging
+from datetime import datetime
+
+from sqlalchemy.ext.asyncio import AsyncSession
+
+from app.connectors.confluence_history import ConfluenceHistoryConnector
+from app.db import Document, DocumentType
+from app.services.llm_service import get_user_long_context_llm
+from app.utils.document_converters import (
+ create_document_chunks,
+ embed_text,
+ generate_content_hash,
+ generate_document_summary,
+ generate_unique_identifier_hash,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class ConfluenceKBSyncService:
+ """Syncs Confluence page documents to the knowledge base after HITL actions."""
+
+ def __init__(self, db_session: AsyncSession):
+ self.db_session = db_session
+
+ async def sync_after_create(
+ self,
+ page_id: str,
+ page_title: str,
+ space_id: str,
+ body_content: str | None,
+ connector_id: int,
+ search_space_id: int,
+ user_id: str,
+ ) -> dict:
+ from app.tasks.connector_indexers.base import (
+ check_document_by_unique_identifier,
+ check_duplicate_document_by_hash,
+ get_current_timestamp,
+ safe_set_chunks,
+ )
+
+ try:
+ unique_hash = generate_unique_identifier_hash(
+ DocumentType.CONFLUENCE_CONNECTOR, page_id, search_space_id
+ )
+
+ existing = await check_document_by_unique_identifier(
+ self.db_session, unique_hash
+ )
+ if existing:
+ return {"status": "success"}
+
+ indexable_content = (body_content or "").strip()
+ if not indexable_content:
+ indexable_content = f"Confluence Page: {page_title}"
+
+ page_content = f"# {page_title}\n\n{indexable_content}"
+
+ content_hash = generate_content_hash(page_content, search_space_id)
+
+ with self.db_session.no_autoflush:
+ dup = await check_duplicate_document_by_hash(
+ self.db_session, content_hash
+ )
+ if dup:
+ content_hash = unique_hash
+
+ user_llm = await get_user_long_context_llm(
+ self.db_session, user_id, search_space_id, disable_streaming=True,
+ )
+
+ doc_metadata_for_summary = {
+ "page_title": page_title,
+ "space_id": space_id,
+ "document_type": "Confluence Page",
+ "connector_type": "Confluence",
+ }
+
+ if user_llm:
+ summary_content, summary_embedding = await generate_document_summary(
+ page_content, user_llm, doc_metadata_for_summary
+ )
+ else:
+ summary_content = f"Confluence Page: {page_title}\n\n{page_content}"
+ summary_embedding = embed_text(summary_content)
+
+ chunks = await create_document_chunks(page_content)
+ now_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+
+ document = Document(
+ title=page_title,
+ document_type=DocumentType.CONFLUENCE_CONNECTOR,
+ document_metadata={
+ "page_id": page_id,
+ "page_title": page_title,
+ "space_id": space_id,
+ "comment_count": 0,
+ "indexed_at": now_str,
+ "connector_id": connector_id,
+ },
+ content=summary_content,
+ content_hash=content_hash,
+ unique_identifier_hash=unique_hash,
+ embedding=summary_embedding,
+ search_space_id=search_space_id,
+ connector_id=connector_id,
+ updated_at=get_current_timestamp(),
+ created_by_id=user_id,
+ )
+
+ self.db_session.add(document)
+ await self.db_session.flush()
+ await safe_set_chunks(self.db_session, document, chunks)
+ await self.db_session.commit()
+
+ logger.info(
+ "KB sync after create succeeded: doc_id=%s, page=%s",
+ document.id, page_title,
+ )
+ return {"status": "success"}
+
+ except Exception as e:
+ error_str = str(e).lower()
+ if "duplicate key value violates unique constraint" in error_str or "uniqueviolationerror" in error_str:
+ await self.db_session.rollback()
+ return {"status": "error", "message": "Duplicate document detected"}
+
+ logger.error("KB sync after create failed for page %s: %s", page_title, e, exc_info=True)
+ await self.db_session.rollback()
+ return {"status": "error", "message": str(e)}
+
+ async def sync_after_update(
+ self,
+ document_id: int,
+ page_id: str,
+ user_id: str,
+ search_space_id: int,
+ ) -> dict:
+ from app.tasks.connector_indexers.base import (
+ get_current_timestamp,
+ safe_set_chunks,
+ )
+
+ try:
+ document = await self.db_session.get(Document, document_id)
+ if not document:
+ return {"status": "not_indexed"}
+
+ connector_id = document.connector_id
+ if not connector_id:
+ return {"status": "error", "message": "Document has no connector_id"}
+
+ client = ConfluenceHistoryConnector(
+ session=self.db_session, connector_id=connector_id
+ )
+ page_data = await client.get_page(page_id)
+ await client.close()
+
+ page_title = page_data.get("title", "")
+ body_obj = page_data.get("body", {})
+ body_content = ""
+ if isinstance(body_obj, dict):
+ storage = body_obj.get("storage", {})
+ if isinstance(storage, dict):
+ body_content = storage.get("value", "")
+
+ page_content = f"# {page_title}\n\n{body_content}"
+
+ if not page_content.strip():
+ return {"status": "error", "message": "Page produced empty content"}
+
+ space_id = (document.document_metadata or {}).get("space_id", "")
+
+ user_llm = await get_user_long_context_llm(
+ self.db_session, user_id, search_space_id, disable_streaming=True
+ )
+
+ if user_llm:
+ doc_meta = {
+ "page_title": page_title,
+ "space_id": space_id,
+ "document_type": "Confluence Page",
+ "connector_type": "Confluence",
+ }
+ summary_content, summary_embedding = await generate_document_summary(
+ page_content, user_llm, doc_meta
+ )
+ else:
+ summary_content = f"Confluence Page: {page_title}\n\n{page_content}"
+ summary_embedding = embed_text(summary_content)
+
+ chunks = await create_document_chunks(page_content)
+
+ document.title = page_title
+ document.content = summary_content
+ document.content_hash = generate_content_hash(page_content, search_space_id)
+ document.embedding = summary_embedding
+
+ from sqlalchemy.orm.attributes import flag_modified
+
+ document.document_metadata = {
+ **(document.document_metadata or {}),
+ "page_id": page_id,
+ "page_title": page_title,
+ "space_id": space_id,
+ "indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+ "connector_id": connector_id,
+ }
+ flag_modified(document, "document_metadata")
+ await safe_set_chunks(self.db_session, document, chunks)
+ document.updated_at = get_current_timestamp()
+
+ await self.db_session.commit()
+
+ logger.info(
+ "KB sync successful for document %s (%s)",
+ document_id, page_title,
+ )
+ return {"status": "success"}
+
+ except Exception as e:
+ logger.error("KB sync failed for document %s: %s", document_id, e, exc_info=True)
+ await self.db_session.rollback()
+ return {"status": "error", "message": str(e)}
diff --git a/surfsense_backend/app/services/confluence/tool_metadata_service.py b/surfsense_backend/app/services/confluence/tool_metadata_service.py
new file mode 100644
index 000000000..31b6d68f1
--- /dev/null
+++ b/surfsense_backend/app/services/confluence/tool_metadata_service.py
@@ -0,0 +1,307 @@
+import logging
+from dataclasses import dataclass
+
+from sqlalchemy import and_, func, or_
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.future import select
+from sqlalchemy.orm.attributes import flag_modified
+
+from app.connectors.confluence_history import ConfluenceHistoryConnector
+from app.db import (
+ Document,
+ DocumentType,
+ SearchSourceConnector,
+ SearchSourceConnectorType,
+)
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class ConfluenceWorkspace:
+ """Represents a Confluence connector as a workspace for tool context."""
+
+ id: int
+ name: str
+ base_url: str
+
+ @classmethod
+ def from_connector(cls, connector: SearchSourceConnector) -> "ConfluenceWorkspace":
+ return cls(
+ id=connector.id,
+ name=connector.name,
+ base_url=connector.config.get("base_url", ""),
+ )
+
+ def to_dict(self) -> dict:
+ return {
+ "id": self.id,
+ "name": self.name,
+ "base_url": self.base_url,
+ }
+
+
+@dataclass
+class ConfluencePage:
+ """Represents an indexed Confluence page resolved from the knowledge base."""
+
+ page_id: str
+ page_title: str
+ space_id: str
+ connector_id: int
+ document_id: int
+ indexed_at: str | None
+
+ @classmethod
+ def from_document(cls, document: Document) -> "ConfluencePage":
+ meta = document.document_metadata or {}
+ return cls(
+ page_id=meta.get("page_id", ""),
+ page_title=meta.get("page_title", document.title),
+ space_id=meta.get("space_id", ""),
+ connector_id=document.connector_id,
+ document_id=document.id,
+ indexed_at=meta.get("indexed_at"),
+ )
+
+ def to_dict(self) -> dict:
+ return {
+ "page_id": self.page_id,
+ "page_title": self.page_title,
+ "space_id": self.space_id,
+ "connector_id": self.connector_id,
+ "document_id": self.document_id,
+ "indexed_at": self.indexed_at,
+ }
+
+
+class ConfluenceToolMetadataService:
+ """Builds interrupt context for Confluence HITL tools."""
+
+ def __init__(self, db_session: AsyncSession):
+ self._db_session = db_session
+
+ async def _check_account_health(self, connector: SearchSourceConnector) -> bool:
+ """Check if the Confluence connector auth is still valid.
+
+ Returns True if auth is expired/invalid, False if healthy.
+ """
+ try:
+ client = ConfluenceHistoryConnector(
+ session=self._db_session, connector_id=connector.id
+ )
+ await client._get_valid_token()
+ await client.close()
+ return False
+ except Exception as e:
+ logger.warning(
+ "Confluence connector %s health check failed: %s", connector.id, e
+ )
+ try:
+ connector.config = {**connector.config, "auth_expired": True}
+ flag_modified(connector, "config")
+ await self._db_session.commit()
+ await self._db_session.refresh(connector)
+ except Exception:
+ logger.warning(
+ "Failed to persist auth_expired for connector %s",
+ connector.id,
+ exc_info=True,
+ )
+ return True
+
+ async def get_creation_context(self, search_space_id: int, user_id: str) -> dict:
+ """Return context needed to create a new Confluence page.
+
+ Fetches all connected accounts, and for the first healthy one fetches spaces.
+ """
+ connectors = await self._get_all_confluence_connectors(search_space_id, user_id)
+ if not connectors:
+ return {"error": "No Confluence account connected"}
+
+ accounts = []
+ spaces = []
+ fetched_context = False
+
+ for connector in connectors:
+ auth_expired = await self._check_account_health(connector)
+ workspace = ConfluenceWorkspace.from_connector(connector)
+ accounts.append({
+ **workspace.to_dict(),
+ "auth_expired": auth_expired,
+ })
+
+ if not auth_expired and not fetched_context:
+ try:
+ client = ConfluenceHistoryConnector(
+ session=self._db_session, connector_id=connector.id
+ )
+ raw_spaces = await client.get_all_spaces()
+ spaces = [
+ {"id": s.get("id"), "key": s.get("key"), "name": s.get("name")}
+ for s in raw_spaces
+ ]
+ await client.close()
+ fetched_context = True
+ except Exception as e:
+ logger.warning(
+ "Failed to fetch Confluence spaces for connector %s: %s",
+ connector.id, e,
+ )
+
+ return {
+ "accounts": accounts,
+ "spaces": spaces,
+ }
+
+ async def get_update_context(
+ self, search_space_id: int, user_id: str, page_ref: str
+ ) -> dict:
+ """Return context needed to update an indexed Confluence page.
+
+ Resolves the page from KB, then fetches current content and version from API.
+ """
+ document = await self._resolve_page(search_space_id, user_id, page_ref)
+ if not document:
+ return {
+ "error": f"Page '{page_ref}' not found in your synced Confluence pages. "
+ "Please make sure the page is indexed in your knowledge base."
+ }
+
+ connector = await self._get_connector_for_document(document, user_id)
+ if not connector:
+ return {"error": "Connector not found or access denied"}
+
+ auth_expired = await self._check_account_health(connector)
+ if auth_expired:
+ return {
+ "error": "Confluence authentication has expired. Please re-authenticate.",
+ "auth_expired": True,
+ "connector_id": connector.id,
+ }
+
+ workspace = ConfluenceWorkspace.from_connector(connector)
+ page = ConfluencePage.from_document(document)
+
+ try:
+ client = ConfluenceHistoryConnector(
+ session=self._db_session, connector_id=connector.id
+ )
+ page_data = await client.get_page(page.page_id)
+ await client.close()
+ except Exception as e:
+ error_str = str(e).lower()
+ if "401" in error_str or "403" in error_str or "authentication" in error_str:
+ return {
+ "error": f"Failed to fetch Confluence page: {e!s}",
+ "auth_expired": True,
+ "connector_id": connector.id,
+ }
+ return {"error": f"Failed to fetch Confluence page: {e!s}"}
+
+ body_storage = ""
+ body_obj = page_data.get("body", {})
+ if isinstance(body_obj, dict):
+ storage = body_obj.get("storage", {})
+ if isinstance(storage, dict):
+ body_storage = storage.get("value", "")
+
+ version_obj = page_data.get("version", {})
+ version_number = version_obj.get("number", 1) if isinstance(version_obj, dict) else 1
+
+ return {
+ "account": {**workspace.to_dict(), "auth_expired": False},
+ "page": {
+ "page_id": page.page_id,
+ "page_title": page_data.get("title", page.page_title),
+ "space_id": page.space_id,
+ "body": body_storage,
+ "version": version_number,
+ "document_id": page.document_id,
+ "indexed_at": page.indexed_at,
+ },
+ }
+
+ async def get_deletion_context(
+ self, search_space_id: int, user_id: str, page_ref: str
+ ) -> dict:
+ """Return context needed to delete a Confluence page (KB metadata only)."""
+ document = await self._resolve_page(search_space_id, user_id, page_ref)
+ if not document:
+ return {
+ "error": f"Page '{page_ref}' not found in your synced Confluence pages. "
+ "Please make sure the page is indexed in your knowledge base."
+ }
+
+ connector = await self._get_connector_for_document(document, user_id)
+ if not connector:
+ return {"error": "Connector not found or access denied"}
+
+ auth_expired = connector.config.get("auth_expired", False)
+ workspace = ConfluenceWorkspace.from_connector(connector)
+ page = ConfluencePage.from_document(document)
+
+ return {
+ "account": {**workspace.to_dict(), "auth_expired": auth_expired},
+ "page": page.to_dict(),
+ }
+
+ async def _resolve_page(
+ self, search_space_id: int, user_id: str, page_ref: str
+ ) -> Document | None:
+ """Resolve a page from KB: page_title -> document.title."""
+ ref_lower = page_ref.lower()
+
+ result = await self._db_session.execute(
+ select(Document)
+ .join(
+ SearchSourceConnector, Document.connector_id == SearchSourceConnector.id
+ )
+ .filter(
+ and_(
+ Document.search_space_id == search_space_id,
+ Document.document_type == DocumentType.CONFLUENCE_CONNECTOR,
+ SearchSourceConnector.user_id == user_id,
+ or_(
+ func.lower(
+ Document.document_metadata.op("->>")("page_title")
+ )
+ == ref_lower,
+ func.lower(Document.title) == ref_lower,
+ ),
+ )
+ )
+ .order_by(Document.updated_at.desc().nullslast())
+ .limit(1)
+ )
+ return result.scalars().first()
+
+ async def _get_all_confluence_connectors(
+ self, search_space_id: int, user_id: str
+ ) -> list[SearchSourceConnector]:
+ result = await self._db_session.execute(
+ select(SearchSourceConnector).filter(
+ and_(
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.CONFLUENCE_CONNECTOR,
+ )
+ )
+ )
+ return result.scalars().all()
+
+ async def _get_connector_for_document(
+ self, document: Document, user_id: str
+ ) -> SearchSourceConnector | None:
+ if not document.connector_id:
+ return None
+ result = await self._db_session.execute(
+ select(SearchSourceConnector).filter(
+ and_(
+ SearchSourceConnector.id == document.connector_id,
+ SearchSourceConnector.user_id == user_id,
+ )
+ )
+ )
+ return result.scalars().first()
diff --git a/surfsense_backend/app/services/jira/__init__.py b/surfsense_backend/app/services/jira/__init__.py
new file mode 100644
index 000000000..fad49b68d
--- /dev/null
+++ b/surfsense_backend/app/services/jira/__init__.py
@@ -0,0 +1,13 @@
+from app.services.jira.kb_sync_service import JiraKBSyncService
+from app.services.jira.tool_metadata_service import (
+ JiraIssue,
+ JiraToolMetadataService,
+ JiraWorkspace,
+)
+
+__all__ = [
+ "JiraIssue",
+ "JiraKBSyncService",
+ "JiraToolMetadataService",
+ "JiraWorkspace",
+]
diff --git a/surfsense_backend/app/services/jira/kb_sync_service.py b/surfsense_backend/app/services/jira/kb_sync_service.py
new file mode 100644
index 000000000..8d7fd6bb4
--- /dev/null
+++ b/surfsense_backend/app/services/jira/kb_sync_service.py
@@ -0,0 +1,229 @@
+import asyncio
+import logging
+from datetime import datetime
+
+from sqlalchemy.ext.asyncio import AsyncSession
+
+from app.connectors.jira_history import JiraHistoryConnector
+from app.db import Document, DocumentType
+from app.services.llm_service import get_user_long_context_llm
+from app.utils.document_converters import (
+ create_document_chunks,
+ embed_text,
+ generate_content_hash,
+ generate_document_summary,
+ generate_unique_identifier_hash,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class JiraKBSyncService:
+ """Syncs Jira issue documents to the knowledge base after HITL actions."""
+
+ def __init__(self, db_session: AsyncSession):
+ self.db_session = db_session
+
+ async def sync_after_create(
+ self,
+ issue_id: str,
+ issue_identifier: str,
+ issue_title: str,
+ description: str | None,
+ state: str | None,
+ connector_id: int,
+ search_space_id: int,
+ user_id: str,
+ ) -> dict:
+ from app.tasks.connector_indexers.base import (
+ check_document_by_unique_identifier,
+ check_duplicate_document_by_hash,
+ get_current_timestamp,
+ safe_set_chunks,
+ )
+
+ try:
+ unique_hash = generate_unique_identifier_hash(
+ DocumentType.JIRA_CONNECTOR, issue_id, search_space_id
+ )
+
+ existing = await check_document_by_unique_identifier(
+ self.db_session, unique_hash
+ )
+ if existing:
+ logger.info(
+ "Document for Jira issue %s already exists (doc_id=%s), skipping",
+ issue_identifier, existing.id,
+ )
+ return {"status": "success"}
+
+ indexable_content = (description or "").strip()
+ if not indexable_content:
+ indexable_content = f"Jira Issue {issue_identifier}: {issue_title}"
+
+ issue_content = f"# {issue_identifier}: {issue_title}\n\n{indexable_content}"
+
+ content_hash = generate_content_hash(issue_content, search_space_id)
+
+ with self.db_session.no_autoflush:
+ dup = await check_duplicate_document_by_hash(
+ self.db_session, content_hash
+ )
+ if dup:
+ content_hash = unique_hash
+
+ user_llm = await get_user_long_context_llm(
+ self.db_session, user_id, search_space_id, disable_streaming=True,
+ )
+
+ doc_metadata_for_summary = {
+ "issue_id": issue_identifier,
+ "issue_title": issue_title,
+ "document_type": "Jira Issue",
+ "connector_type": "Jira",
+ }
+
+ if user_llm:
+ summary_content, summary_embedding = await generate_document_summary(
+ issue_content, user_llm, doc_metadata_for_summary
+ )
+ else:
+ summary_content = f"Jira Issue {issue_identifier}: {issue_title}\n\n{issue_content}"
+ summary_embedding = embed_text(summary_content)
+
+ chunks = await create_document_chunks(issue_content)
+ now_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+
+ document = Document(
+ title=f"{issue_identifier}: {issue_title}",
+ document_type=DocumentType.JIRA_CONNECTOR,
+ document_metadata={
+ "issue_id": issue_id,
+ "issue_identifier": issue_identifier,
+ "issue_title": issue_title,
+ "state": state or "Unknown",
+ "indexed_at": now_str,
+ "connector_id": connector_id,
+ },
+ content=summary_content,
+ content_hash=content_hash,
+ unique_identifier_hash=unique_hash,
+ embedding=summary_embedding,
+ search_space_id=search_space_id,
+ connector_id=connector_id,
+ updated_at=get_current_timestamp(),
+ created_by_id=user_id,
+ )
+
+ self.db_session.add(document)
+ await self.db_session.flush()
+ await safe_set_chunks(self.db_session, document, chunks)
+ await self.db_session.commit()
+
+ logger.info(
+ "KB sync after create succeeded: doc_id=%s, issue=%s",
+ document.id, issue_identifier,
+ )
+ return {"status": "success"}
+
+ except Exception as e:
+ error_str = str(e).lower()
+ if "duplicate key value violates unique constraint" in error_str or "uniqueviolationerror" in error_str:
+ await self.db_session.rollback()
+ return {"status": "error", "message": "Duplicate document detected"}
+
+ logger.error("KB sync after create failed for issue %s: %s", issue_identifier, e, exc_info=True)
+ await self.db_session.rollback()
+ return {"status": "error", "message": str(e)}
+
+ async def sync_after_update(
+ self,
+ document_id: int,
+ issue_id: str,
+ user_id: str,
+ search_space_id: int,
+ ) -> dict:
+ from app.tasks.connector_indexers.base import (
+ get_current_timestamp,
+ safe_set_chunks,
+ )
+
+ try:
+ document = await self.db_session.get(Document, document_id)
+ if not document:
+ return {"status": "not_indexed"}
+
+ connector_id = document.connector_id
+ if not connector_id:
+ return {"status": "error", "message": "Document has no connector_id"}
+
+ jira_history = JiraHistoryConnector(
+ session=self.db_session, connector_id=connector_id
+ )
+ jira_client = await jira_history._get_jira_client()
+ issue_raw = await asyncio.to_thread(jira_client.get_issue, issue_id)
+ formatted = jira_client.format_issue(issue_raw)
+ issue_content = jira_client.format_issue_to_markdown(formatted)
+
+ if not issue_content:
+ return {"status": "error", "message": "Issue produced empty content"}
+
+ issue_identifier = formatted.get("key", "")
+ issue_title = formatted.get("title", "")
+ state = formatted.get("status", "Unknown")
+ comment_count = len(formatted.get("comments", []))
+
+ user_llm = await get_user_long_context_llm(
+ self.db_session, user_id, search_space_id, disable_streaming=True
+ )
+
+ if user_llm:
+ doc_meta = {
+ "issue_key": issue_identifier,
+ "issue_title": issue_title,
+ "status": state,
+ "document_type": "Jira Issue",
+ "connector_type": "Jira",
+ }
+ summary_content, summary_embedding = await generate_document_summary(
+ issue_content, user_llm, doc_meta
+ )
+ else:
+ summary_content = f"Jira Issue {issue_identifier}: {issue_title}\n\n{issue_content}"
+ summary_embedding = embed_text(summary_content)
+
+ chunks = await create_document_chunks(issue_content)
+
+ document.title = f"{issue_identifier}: {issue_title}"
+ document.content = summary_content
+ document.content_hash = generate_content_hash(issue_content, search_space_id)
+ document.embedding = summary_embedding
+
+ from sqlalchemy.orm.attributes import flag_modified
+
+ document.document_metadata = {
+ **(document.document_metadata or {}),
+ "issue_id": issue_id,
+ "issue_identifier": issue_identifier,
+ "issue_title": issue_title,
+ "state": state,
+ "comment_count": comment_count,
+ "indexed_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+ "connector_id": connector_id,
+ }
+ flag_modified(document, "document_metadata")
+ await safe_set_chunks(self.db_session, document, chunks)
+ document.updated_at = get_current_timestamp()
+
+ await self.db_session.commit()
+
+ logger.info(
+ "KB sync successful for document %s (%s: %s)",
+ document_id, issue_identifier, issue_title,
+ )
+ return {"status": "success"}
+
+ except Exception as e:
+ logger.error("KB sync failed for document %s: %s", document_id, e, exc_info=True)
+ await self.db_session.rollback()
+ return {"status": "error", "message": str(e)}
diff --git a/surfsense_backend/app/services/jira/tool_metadata_service.py b/surfsense_backend/app/services/jira/tool_metadata_service.py
new file mode 100644
index 000000000..d8e7f2d3d
--- /dev/null
+++ b/surfsense_backend/app/services/jira/tool_metadata_service.py
@@ -0,0 +1,327 @@
+import asyncio
+import logging
+from dataclasses import dataclass
+
+from sqlalchemy import and_, func, or_
+from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.future import select
+from sqlalchemy.orm.attributes import flag_modified
+
+from app.connectors.jira_history import JiraHistoryConnector
+from app.db import (
+ Document,
+ DocumentType,
+ SearchSourceConnector,
+ SearchSourceConnectorType,
+)
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class JiraWorkspace:
+ """Represents a Jira connector as a workspace for tool context."""
+
+ id: int
+ name: str
+ base_url: str
+
+ @classmethod
+ def from_connector(cls, connector: SearchSourceConnector) -> "JiraWorkspace":
+ return cls(
+ id=connector.id,
+ name=connector.name,
+ base_url=connector.config.get("base_url", ""),
+ )
+
+ def to_dict(self) -> dict:
+ return {
+ "id": self.id,
+ "name": self.name,
+ "base_url": self.base_url,
+ }
+
+
+@dataclass
+class JiraIssue:
+ """Represents an indexed Jira issue resolved from the knowledge base."""
+
+ issue_id: str
+ issue_identifier: str
+ issue_title: str
+ state: str
+ connector_id: int
+ document_id: int
+ indexed_at: str | None
+
+ @classmethod
+ def from_document(cls, document: Document) -> "JiraIssue":
+ meta = document.document_metadata or {}
+ return cls(
+ issue_id=meta.get("issue_id", ""),
+ issue_identifier=meta.get("issue_identifier", ""),
+ issue_title=meta.get("issue_title", document.title),
+ state=meta.get("state", ""),
+ connector_id=document.connector_id,
+ document_id=document.id,
+ indexed_at=meta.get("indexed_at"),
+ )
+
+ def to_dict(self) -> dict:
+ return {
+ "issue_id": self.issue_id,
+ "issue_identifier": self.issue_identifier,
+ "issue_title": self.issue_title,
+ "state": self.state,
+ "connector_id": self.connector_id,
+ "document_id": self.document_id,
+ "indexed_at": self.indexed_at,
+ }
+
+
+class JiraToolMetadataService:
+ """Builds interrupt context for Jira HITL tools."""
+
+ def __init__(self, db_session: AsyncSession):
+ self._db_session = db_session
+
+ async def _check_account_health(self, connector: SearchSourceConnector) -> bool:
+ """Check if the Jira connector auth is still valid.
+
+ Returns True if auth is expired/invalid, False if healthy.
+ """
+ try:
+ jira_history = JiraHistoryConnector(
+ session=self._db_session, connector_id=connector.id
+ )
+ jira_client = await jira_history._get_jira_client()
+ await asyncio.to_thread(jira_client.get_myself)
+ return False
+ except Exception as e:
+ logger.warning(
+ "Jira connector %s health check failed: %s", connector.id, e
+ )
+ try:
+ connector.config = {**connector.config, "auth_expired": True}
+ flag_modified(connector, "config")
+ await self._db_session.commit()
+ await self._db_session.refresh(connector)
+ except Exception:
+ logger.warning(
+ "Failed to persist auth_expired for connector %s",
+ connector.id,
+ exc_info=True,
+ )
+ return True
+
+ async def get_creation_context(self, search_space_id: int, user_id: str) -> dict:
+ """Return context needed to create a new Jira issue.
+
+ Fetches all connected Jira accounts, and for the first healthy one
+ fetches projects, issue types, and priorities.
+ """
+ connectors = await self._get_all_jira_connectors(search_space_id, user_id)
+ if not connectors:
+ return {"error": "No Jira account connected"}
+
+ accounts = []
+ projects = []
+ issue_types = []
+ priorities = []
+ fetched_context = False
+
+ for connector in connectors:
+ auth_expired = await self._check_account_health(connector)
+ workspace = JiraWorkspace.from_connector(connector)
+ account_info = {
+ **workspace.to_dict(),
+ "auth_expired": auth_expired,
+ }
+ accounts.append(account_info)
+
+ if not auth_expired and not fetched_context:
+ try:
+ jira_history = JiraHistoryConnector(
+ session=self._db_session, connector_id=connector.id
+ )
+ jira_client = await jira_history._get_jira_client()
+ raw_projects = await asyncio.to_thread(jira_client.get_projects)
+ projects = [
+ {"id": p.get("id"), "key": p.get("key"), "name": p.get("name")}
+ for p in raw_projects
+ ]
+ raw_types = await asyncio.to_thread(jira_client.get_issue_types)
+ issue_types = [
+ {"id": t.get("id"), "name": t.get("name")}
+ for t in raw_types
+ if not t.get("subtask", False)
+ ]
+ raw_priorities = await asyncio.to_thread(jira_client.get_priorities)
+ priorities = [
+ {"id": p.get("id"), "name": p.get("name")}
+ for p in raw_priorities
+ ]
+ fetched_context = True
+ except Exception as e:
+ logger.warning(
+ "Failed to fetch Jira context for connector %s: %s",
+ connector.id, e,
+ )
+
+ return {
+ "accounts": accounts,
+ "projects": projects,
+ "issue_types": issue_types,
+ "priorities": priorities,
+ }
+
+ async def get_update_context(
+ self, search_space_id: int, user_id: str, issue_ref: str
+ ) -> dict:
+ """Return context needed to update an indexed Jira issue.
+
+ Resolves the issue from the KB, then fetches current details from the Jira API.
+ """
+ document = await self._resolve_issue(search_space_id, user_id, issue_ref)
+ if not document:
+ return {
+ "error": f"Issue '{issue_ref}' not found in your synced Jira issues. "
+ "Please make sure the issue is indexed in your knowledge base."
+ }
+
+ connector = await self._get_connector_for_document(document, user_id)
+ if not connector:
+ return {"error": "Connector not found or access denied"}
+
+ auth_expired = await self._check_account_health(connector)
+ if auth_expired:
+ return {
+ "error": "Jira authentication has expired. Please re-authenticate.",
+ "auth_expired": True,
+ "connector_id": connector.id,
+ }
+
+ workspace = JiraWorkspace.from_connector(connector)
+ issue = JiraIssue.from_document(document)
+
+ try:
+ jira_history = JiraHistoryConnector(
+ session=self._db_session, connector_id=connector.id
+ )
+ jira_client = await jira_history._get_jira_client()
+ issue_data = await asyncio.to_thread(
+ jira_client.get_issue, issue.issue_id
+ )
+ formatted = jira_client.format_issue(issue_data)
+ except Exception as e:
+ error_str = str(e).lower()
+ if "401" in error_str or "403" in error_str or "authentication" in error_str:
+ return {
+ "error": f"Failed to fetch Jira issue: {e!s}",
+ "auth_expired": True,
+ "connector_id": connector.id,
+ }
+ return {"error": f"Failed to fetch Jira issue: {e!s}"}
+
+ return {
+ "account": {**workspace.to_dict(), "auth_expired": False},
+ "issue": {
+ "issue_id": formatted.get("key", issue.issue_id),
+ "issue_identifier": formatted.get("key", issue.issue_identifier),
+ "issue_title": formatted.get("title", issue.issue_title),
+ "state": formatted.get("status", "Unknown"),
+ "priority": formatted.get("priority", "Unknown"),
+ "issue_type": formatted.get("issue_type", "Unknown"),
+ "assignee": formatted.get("assignee"),
+ "description": formatted.get("description"),
+ "project": formatted.get("project", ""),
+ "document_id": issue.document_id,
+ "indexed_at": issue.indexed_at,
+ },
+ }
+
+ async def get_deletion_context(
+ self, search_space_id: int, user_id: str, issue_ref: str
+ ) -> dict:
+ """Return context needed to delete a Jira issue (KB metadata only, no API call)."""
+ document = await self._resolve_issue(search_space_id, user_id, issue_ref)
+ if not document:
+ return {
+ "error": f"Issue '{issue_ref}' not found in your synced Jira issues. "
+ "Please make sure the issue is indexed in your knowledge base."
+ }
+
+ connector = await self._get_connector_for_document(document, user_id)
+ if not connector:
+ return {"error": "Connector not found or access denied"}
+
+ auth_expired = connector.config.get("auth_expired", False)
+ workspace = JiraWorkspace.from_connector(connector)
+ issue = JiraIssue.from_document(document)
+
+ return {
+ "account": {**workspace.to_dict(), "auth_expired": auth_expired},
+ "issue": issue.to_dict(),
+ }
+
+ async def _resolve_issue(
+ self, search_space_id: int, user_id: str, issue_ref: str
+ ) -> Document | None:
+ """Resolve an issue from KB: issue_identifier -> issue_title -> document.title."""
+ ref_lower = issue_ref.lower()
+
+ result = await self._db_session.execute(
+ select(Document)
+ .join(
+ SearchSourceConnector, Document.connector_id == SearchSourceConnector.id
+ )
+ .filter(
+ and_(
+ Document.search_space_id == search_space_id,
+ Document.document_type == DocumentType.JIRA_CONNECTOR,
+ SearchSourceConnector.user_id == user_id,
+ or_(
+ func.lower(
+ Document.document_metadata.op("->>")("issue_identifier")
+ )
+ == ref_lower,
+ func.lower(Document.document_metadata.op("->>")("issue_title"))
+ == ref_lower,
+ func.lower(Document.title) == ref_lower,
+ ),
+ )
+ )
+ .order_by(Document.updated_at.desc().nullslast())
+ .limit(1)
+ )
+ return result.scalars().first()
+
+ async def _get_all_jira_connectors(
+ self, search_space_id: int, user_id: str
+ ) -> list[SearchSourceConnector]:
+ result = await self._db_session.execute(
+ select(SearchSourceConnector).filter(
+ and_(
+ SearchSourceConnector.search_space_id == search_space_id,
+ SearchSourceConnector.user_id == user_id,
+ SearchSourceConnector.connector_type
+ == SearchSourceConnectorType.JIRA_CONNECTOR,
+ )
+ )
+ )
+ return result.scalars().all()
+
+ async def _get_connector_for_document(
+ self, document: Document, user_id: str
+ ) -> SearchSourceConnector | None:
+ if not document.connector_id:
+ return None
+ result = await self._db_session.execute(
+ select(SearchSourceConnector).filter(
+ and_(
+ SearchSourceConnector.id == document.connector_id,
+ SearchSourceConnector.user_id == user_id,
+ )
+ )
+ )
+ return result.scalars().first()
diff --git a/surfsense_web/app/dashboard/[search_space_id]/new-chat/[[...chat_id]]/page.tsx b/surfsense_web/app/dashboard/[search_space_id]/new-chat/[[...chat_id]]/page.tsx
index 90f4fb889..a5cadad4e 100644
--- a/surfsense_web/app/dashboard/[search_space_id]/new-chat/[[...chat_id]]/page.tsx
+++ b/surfsense_web/app/dashboard/[search_space_id]/new-chat/[[...chat_id]]/page.tsx
@@ -56,6 +56,16 @@ import {
CreateGoogleDriveFileToolUI,
DeleteGoogleDriveFileToolUI,
} from "@/components/tool-ui/google-drive";
+import {
+ CreateJiraIssueToolUI,
+ DeleteJiraIssueToolUI,
+ UpdateJiraIssueToolUI,
+} from "@/components/tool-ui/jira";
+import {
+ CreateConfluencePageToolUI,
+ DeleteConfluencePageToolUI,
+ UpdateConfluencePageToolUI,
+} from "@/components/tool-ui/confluence";
import {
CreateLinearIssueToolUI,
DeleteLinearIssueToolUI,
@@ -197,6 +207,12 @@ const TOOLS_WITH_UI = new Set([
"update_gmail_draft",
"send_gmail_email",
"trash_gmail_email",
+ "create_jira_issue",
+ "update_jira_issue",
+ "delete_jira_issue",
+ "create_confluence_page",
+ "update_confluence_page",
+ "delete_confluence_page",
"execute",
// "write_todos", // Disabled for now
]);
@@ -1723,6 +1739,12 @@ export default function NewChatPage() {
+ {phase === "rejected" + ? "Confluence Page Rejected" + : phase === "processing" || phase === "complete" + ? "Confluence Page Approved" + : "Create Confluence Page"} +
+ {phase === "processing" ? ( ++ {pendingEdits ? "Page created with your changes" : "Page created"} +
+ ) : phase === "rejected" ? ( ++ Page creation was cancelled +
+ ) : ( ++ Requires your approval to proceed +
+ )} +{interruptData.context.error}
+ ) : ( + <> + {accounts.length > 0 && ( ++ Confluence Account * +
+ ++ Space * +
+ +{pendingEdits?.title ?? args.title}
+ )} + {(pendingEdits?.content ?? args.content) != null && ( ++ All Confluence accounts expired +
+{result.message}
++ Additional Confluence permissions required +
+{result.message}
+Failed to create Confluence page
+{result.message}
++ {result.message || "Confluence page created successfully"} +
++ {phase === "rejected" + ? "Confluence Page Deletion Rejected" + : phase === "processing" || phase === "complete" + ? "Confluence Page Deletion Approved" + : "Delete Confluence Page"} +
+ {phase === "processing" ? ( +Page deleted
+ ) : phase === "rejected" ? ( ++ Page deletion was cancelled +
+ ) : ( ++ Requires your approval to proceed +
+ )} +{context.error}
+ ) : ( + <> + {context.account && ( +Confluence Account
+Page to Delete
++ Confluence authentication expired +
+{result.message}
++ Additional Confluence permissions required +
+{result.message}
+Failed to delete Confluence page
+{result.message}
++ Page not found +
+{result.message}
+Partial success
+{result.warning}
++ {result.message || "Confluence page deleted successfully"} +
++ {phase === "rejected" + ? "Confluence Page Update Rejected" + : phase === "processing" || phase === "complete" + ? "Confluence Page Update Approved" + : "Update Confluence Page"} +
+ {phase === "processing" ? ( ++ {hasPanelEdits ? "Page updated with your changes" : "Page updated"} +
+ ) : phase === "rejected" ? ( ++ Page update was cancelled +
+ ) : ( ++ Requires your approval to proceed +
+ )} +{context.error}
+ ) : ( + <> + {context?.account && ( +Confluence Account
+Current Page
++ {String(hasPanelEdits ? editedArgs.title : (actionArgs.new_title ?? args.new_title))} +
+ )} + {(hasPanelEdits ? editedArgs.content : (actionArgs.new_content ?? args.new_content)) && ( +No changes proposed
+ )} ++ Confluence authentication expired +
+{result.message}
++ Additional Confluence permissions required +
+{result.message}
+Failed to update Confluence page
+{result.message}
++ Page not found +
+{result.message}
++ {result.message || "Confluence page updated successfully"} +
++ {phase === "rejected" + ? "Jira Issue Rejected" + : phase === "processing" || phase === "complete" + ? "Jira Issue Approved" + : "Create Jira Issue"} +
+ {phase === "processing" ? ( ++ {pendingEdits ? "Issue created with your changes" : "Issue created"} +
+ ) : phase === "rejected" ? ( ++ Issue creation was cancelled +
+ ) : ( ++ Requires your approval to proceed +
+ )} +{interruptData.context.error}
+ ) : ( + <> + {accounts.length > 0 && ( ++ Jira Account * +
+ ++ Project * +
+ +Issue Type
+ +Priority
+ +{pendingEdits?.title ?? args.summary}
+ )} + {(pendingEdits?.description ?? args.description) != null && ( ++ All Jira accounts expired +
+{result.message}
++ Additional Jira permissions required +
+{result.message}
+Failed to create Jira issue
+{result.message}
++ {result.message || "Jira issue created successfully"} +
++ {phase === "rejected" + ? "Jira Issue Deletion Rejected" + : phase === "processing" || phase === "complete" + ? "Jira Issue Deletion Approved" + : "Delete Jira Issue"} +
+ {phase === "processing" ? ( +Issue deleted
+ ) : phase === "rejected" ? ( ++ Issue deletion was cancelled +
+ ) : ( ++ Requires your approval to proceed +
+ )} +{context.error}
+ ) : ( + <> + {account && ( +Jira Account
+Issue to Delete
++ Jira authentication expired +
+{result.message}
++ Additional Jira permissions required +
+{result.message}
+Failed to delete Jira issue
+{result.message}
++ Issue not found +
+{result.message}
+Partial success
+{result.warning}
++ {result.message || "Jira issue deleted successfully"} +
++ {phase === "rejected" + ? "Jira Issue Update Rejected" + : phase === "processing" || phase === "complete" + ? "Jira Issue Update Approved" + : "Update Jira Issue"} +
+ {phase === "processing" ? ( ++ {hasPanelEdits ? "Issue updated with your changes" : "Issue updated"} +
+ ) : phase === "rejected" ? ( ++ Issue update was cancelled +
+ ) : ( ++ Requires your approval to proceed +
+ )} +{context.error}
+ ) : ( + <> + {account && ( +Jira Account
+Current Issue
+Priority
+ ++ {String(hasPanelEdits ? editedArgs.summary : (actionArgs.new_summary ?? args.new_summary))} +
+ )} + {(hasPanelEdits ? editedArgs.description : (actionArgs.new_description ?? args.new_description)) && ( +No changes proposed
+ )} ++ Jira authentication expired +
+{result.message}
++ Additional Jira permissions required +
+{result.message}
+Failed to update Jira issue
+{result.message}
++ Issue not found +
+{result.message}
++ {result.message || "Jira issue updated successfully"} +
+