feat: added circleback connector

This commit is contained in:
DESKTOP-RTLN3BA\$punk 2025-12-30 09:00:59 -08:00
parent 23870042f3
commit c19d300c9d
27 changed files with 1153 additions and 97 deletions

View file

@ -57,18 +57,17 @@ def upgrade() -> None:
def downgrade() -> None: def downgrade() -> None:
"""Remove 'GOOGLE_DRIVE_CONNECTOR' from enum types. """Remove 'GOOGLE_DRIVE_CONNECTOR' from enum types.
Note: PostgreSQL doesn't support removing enum values directly. Note: PostgreSQL doesn't support removing enum values directly.
This would require recreating the enum type, which is complex and risky. This would require recreating the enum type, which is complex and risky.
For now, we'll leave the enum values in place. For now, we'll leave the enum values in place.
In a production environment with strict downgrade requirements, you would need to: In a production environment with strict downgrade requirements, you would need to:
1. Create new enum types without the value 1. Create new enum types without the value
2. Convert all columns to use the new type 2. Convert all columns to use the new type
3. Drop the old enum type 3. Drop the old enum type
4. Rename the new type to the old name 4. Rename the new type to the old name
This is left as pass to avoid accidental data loss. This is left as pass to avoid accidental data loss.
""" """
pass pass

View file

@ -19,9 +19,9 @@ depends_on: str | Sequence[str] | None = None
def upgrade() -> None: def upgrade() -> None:
from sqlalchemy import text from sqlalchemy import text
connection = op.get_bind() connection = op.get_bind()
connection.execute( connection.execute(
text( text(
""" """
@ -39,9 +39,9 @@ def upgrade() -> None:
""" """
) )
) )
connection.commit() connection.commit()
connection.execute( connection.execute(
text( text(
""" """
@ -51,15 +51,15 @@ def upgrade() -> None:
""" """
) )
) )
connection.commit() connection.commit()
def downgrade() -> None: def downgrade() -> None:
from sqlalchemy import text from sqlalchemy import text
connection = op.get_bind() connection = op.get_bind()
connection.execute( connection.execute(
text( text(
""" """
@ -69,6 +69,5 @@ def downgrade() -> None:
""" """
) )
) )
connection.commit()
connection.commit()

View file

@ -0,0 +1,73 @@
"""Add Circleback connector enums
Revision ID: 56
Revises: 55
Create Date: 2025-12-30 12:00:00.000000
"""
from collections.abc import Sequence
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "56"
down_revision: str | None = "55"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
"""Safely add 'CIRCLEBACK' to documenttype and 'CIRCLEBACK_CONNECTOR' to searchsourceconnectortype enums if missing."""
# Add to documenttype enum
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_type t
JOIN pg_enum e ON t.oid = e.enumtypid
WHERE t.typname = 'documenttype' AND e.enumlabel = 'CIRCLEBACK'
) THEN
ALTER TYPE documenttype ADD VALUE 'CIRCLEBACK';
END IF;
END
$$;
"""
)
# Add to searchsourceconnectortype enum
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_type t
JOIN pg_enum e ON t.oid = e.enumtypid
WHERE t.typname = 'searchsourceconnectortype' AND e.enumlabel = 'CIRCLEBACK_CONNECTOR'
) THEN
ALTER TYPE searchsourceconnectortype ADD VALUE 'CIRCLEBACK_CONNECTOR';
END IF;
END
$$;
"""
)
def downgrade() -> None:
"""Remove 'CIRCLEBACK' and 'CIRCLEBACK_CONNECTOR' from enum types.
Note: PostgreSQL doesn't support removing enum values directly.
This would require recreating the enum type, which is complex and risky.
For now, we'll leave the enum values in place.
In a production environment with strict downgrade requirements, you would need to:
1. Create new enum types without the value
2. Convert all columns to use the new type
3. Drop the old enum type
4. Rename the new type to the old name
This is left as pass to avoid accidental data loss.
"""
pass

View file

@ -47,6 +47,7 @@ _ALL_CONNECTORS: list[str] = [
"NOTE", "NOTE",
"BOOKSTACK_CONNECTOR", "BOOKSTACK_CONNECTOR",
"CRAWLED_URL", "CRAWLED_URL",
"CIRCLEBACK",
] ]

View file

@ -8,13 +8,12 @@ from .folder_manager import get_files_in_folder, list_folder_contents
__all__ = [ __all__ = [
"GoogleDriveClient", "GoogleDriveClient",
"get_valid_credentials",
"validate_credentials",
"download_and_process_file",
"get_files_in_folder",
"list_folder_contents",
"get_start_page_token",
"fetch_all_changes",
"categorize_change", "categorize_change",
"download_and_process_file",
"fetch_all_changes",
"get_files_in_folder",
"get_start_page_token",
"get_valid_credentials",
"list_folder_contents",
"validate_credentials",
] ]

View file

@ -202,4 +202,3 @@ async def fetch_all_changes(
except Exception as e: except Exception as e:
logger.error(f"Error fetching all changes: {e!s}", exc_info=True) logger.error(f"Error fetching all changes: {e!s}", exc_info=True)
return all_changes, current_token, f"Error fetching all changes: {e!s}" return all_changes, current_token, f"Error fetching all changes: {e!s}"

View file

@ -2,7 +2,6 @@
from typing import Any from typing import Any
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build from googleapiclient.discovery import build
from googleapiclient.errors import HttpError from googleapiclient.errors import HttpError
from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.ext.asyncio import AsyncSession
@ -107,16 +106,18 @@ class GoogleDriveClient:
""" """
try: try:
service = await self.get_service() service = await self.get_service()
file = service.files().get(fileId=file_id, fields=fields, supportsAllDrives=True).execute() file = (
service.files()
.get(fileId=file_id, fields=fields, supportsAllDrives=True)
.execute()
)
return file, None return file, None
except HttpError as e: except HttpError as e:
return None, f"HTTP error getting file metadata: {e.resp.status}" return None, f"HTTP error getting file metadata: {e.resp.status}"
except Exception as e: except Exception as e:
return None, f"Error getting file metadata: {e!s}" return None, f"Error getting file metadata: {e!s}"
async def download_file( async def download_file(self, file_id: str) -> tuple[bytes | None, str | None]:
self, file_id: str
) -> tuple[bytes | None, str | None]:
""" """
Download binary file content. Download binary file content.
@ -164,9 +165,7 @@ class GoogleDriveClient:
try: try:
service = await self.get_service() service = await self.get_service()
content = ( content = (
service.files() service.files().export(fileId=file_id, mimeType=mime_type).execute()
.export(fileId=file_id, mimeType=mime_type)
.execute()
) )
# Content is already bytes from the API # Content is already bytes from the API
@ -180,4 +179,3 @@ class GoogleDriveClient:
return None, f"HTTP error exporting file: {e.resp.status}" return None, f"HTTP error exporting file: {e.resp.status}"
except Exception as e: except Exception as e:
return None, f"Error exporting file: {e!s}" return None, f"Error exporting file: {e!s}"

View file

@ -78,10 +78,10 @@ async def download_and_process_file(
tmp_file.write(content_bytes) tmp_file.write(content_bytes)
temp_file_path = tmp_file.name temp_file_path = tmp_file.name
from app.db import DocumentType
from app.tasks.document_processors.file_processors import ( from app.tasks.document_processors.file_processors import (
process_file_in_background, process_file_in_background,
) )
from app.db import DocumentType
connector_info = { connector_info = {
"type": DocumentType.GOOGLE_DRIVE_FILE, "type": DocumentType.GOOGLE_DRIVE_FILE,
@ -92,7 +92,7 @@ async def download_and_process_file(
"source_connector": "google_drive", "source_connector": "google_drive",
}, },
} }
# Add additional Drive metadata if available # Add additional Drive metadata if available
if "modifiedTime" in file: if "modifiedTime" in file:
connector_info["metadata"]["modified_time"] = file["modifiedTime"] connector_info["metadata"]["modified_time"] = file["modifiedTime"]
@ -102,10 +102,12 @@ async def download_and_process_file(
connector_info["metadata"]["file_size"] = file["size"] connector_info["metadata"]["file_size"] = file["size"]
if "webViewLink" in file: if "webViewLink" in file:
connector_info["metadata"]["web_view_link"] = file["webViewLink"] connector_info["metadata"]["web_view_link"] = file["webViewLink"]
if is_google_workspace_file(mime_type): if is_google_workspace_file(mime_type):
connector_info["metadata"]["exported_as"] = "pdf" connector_info["metadata"]["exported_as"] = "pdf"
connector_info["metadata"]["original_workspace_type"] = mime_type.split(".")[-1] connector_info["metadata"]["original_workspace_type"] = mime_type.split(
"."
)[-1]
logger.info(f"Processing {file_name} with Surfsense's file processor") logger.info(f"Processing {file_name} with Surfsense's file processor")
await process_file_in_background( await process_file_in_background(
@ -132,5 +134,3 @@ async def download_and_process_file(
os.unlink(temp_file_path) os.unlink(temp_file_path)
except Exception as e: except Exception as e:
logger.debug(f"Could not delete temp file {temp_file_path}: {e}") logger.debug(f"Could not delete temp file {temp_file_path}: {e}")

View file

@ -9,7 +9,7 @@ from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select from sqlalchemy.future import select
from sqlalchemy.orm.attributes import flag_modified from sqlalchemy.orm.attributes import flag_modified
from app.db import SearchSourceConnector, SearchSourceConnectorType from app.db import SearchSourceConnector
async def get_valid_credentials( async def get_valid_credentials(
@ -31,9 +31,7 @@ async def get_valid_credentials(
Exception: If token refresh fails Exception: If token refresh fails
""" """
result = await session.execute( result = await session.execute(
select(SearchSourceConnector).filter( select(SearchSourceConnector).filter(SearchSourceConnector.id == connector_id)
SearchSourceConnector.id == connector_id
)
) )
connector = result.scalars().first() connector = result.scalars().first()
@ -95,4 +93,3 @@ def validate_credentials(credentials: Credentials) -> bool:
credentials.refresh_token, credentials.refresh_token,
] ]
) )

View file

@ -26,5 +26,3 @@ def should_skip_file(mime_type: str) -> bool:
def get_export_mime_type(mime_type: str) -> str | None: def get_export_mime_type(mime_type: str) -> str | None:
"""Get export MIME type for Google Workspace files.""" """Get export MIME type for Google Workspace files."""
return EXPORT_FORMATS.get(mime_type) return EXPORT_FORMATS.get(mime_type)

View file

@ -24,7 +24,10 @@ async def list_folders(
""" """
try: try:
# Build query to get only folders # Build query to get only folders
query_parts = ["mimeType = 'application/vnd.google-apps.folder'", "trashed = false"] query_parts = [
"mimeType = 'application/vnd.google-apps.folder'",
"trashed = false",
]
if parent_id: if parent_id:
query_parts.append(f"'{parent_id}' in parents") query_parts.append(f"'{parent_id}' in parents")
@ -68,8 +71,7 @@ async def get_folder_hierarchy(
# Traverse up to root # Traverse up to root
while current_id: while current_id:
file, error = await client.get_file_metadata( file, error = await client.get_file_metadata(
current_id, current_id, fields="id, name, parents, mimeType"
fields="id, name, parents, mimeType"
) )
if error: if error:
@ -189,7 +191,7 @@ async def list_folder_contents(
# Fetch all items with pagination (max 1000 per page) # Fetch all items with pagination (max 1000 per page)
all_items = [] all_items = []
page_token = None page_token = None
while True: while True:
items, next_token, error = await client.list_files( items, next_token, error = await client.list_files(
query=query, query=query,
@ -202,10 +204,10 @@ async def list_folder_contents(
return [], error return [], error
all_items.extend(items) all_items.extend(items)
if not next_token: if not next_token:
break break
page_token = next_token page_token = next_token
for item in all_items: for item in all_items:
@ -226,5 +228,3 @@ async def list_folder_contents(
except Exception as e: except Exception as e:
logger.error(f"Error listing folder contents: {e!s}", exc_info=True) logger.error(f"Error listing folder contents: {e!s}", exc_info=True)
return [], f"Error listing folder contents: {e!s}" return [], f"Error listing folder contents: {e!s}"

View file

@ -51,6 +51,7 @@ class DocumentType(str, Enum):
LUMA_CONNECTOR = "LUMA_CONNECTOR" LUMA_CONNECTOR = "LUMA_CONNECTOR"
ELASTICSEARCH_CONNECTOR = "ELASTICSEARCH_CONNECTOR" ELASTICSEARCH_CONNECTOR = "ELASTICSEARCH_CONNECTOR"
BOOKSTACK_CONNECTOR = "BOOKSTACK_CONNECTOR" BOOKSTACK_CONNECTOR = "BOOKSTACK_CONNECTOR"
CIRCLEBACK = "CIRCLEBACK"
NOTE = "NOTE" NOTE = "NOTE"
@ -76,6 +77,7 @@ class SearchSourceConnectorType(str, Enum):
ELASTICSEARCH_CONNECTOR = "ELASTICSEARCH_CONNECTOR" ELASTICSEARCH_CONNECTOR = "ELASTICSEARCH_CONNECTOR"
WEBCRAWLER_CONNECTOR = "WEBCRAWLER_CONNECTOR" WEBCRAWLER_CONNECTOR = "WEBCRAWLER_CONNECTOR"
BOOKSTACK_CONNECTOR = "BOOKSTACK_CONNECTOR" BOOKSTACK_CONNECTOR = "BOOKSTACK_CONNECTOR"
CIRCLEBACK_CONNECTOR = "CIRCLEBACK_CONNECTOR"
class LiteLLMProvider(str, Enum): class LiteLLMProvider(str, Enum):

View file

@ -3,17 +3,18 @@ from fastapi import APIRouter
from .airtable_add_connector_route import ( from .airtable_add_connector_route import (
router as airtable_add_connector_router, router as airtable_add_connector_router,
) )
from .circleback_webhook_route import router as circleback_webhook_router
from .documents_routes import router as documents_router from .documents_routes import router as documents_router
from .editor_routes import router as editor_router from .editor_routes import router as editor_router
from .google_calendar_add_connector_route import ( from .google_calendar_add_connector_route import (
router as google_calendar_add_connector_router, router as google_calendar_add_connector_router,
) )
from .google_gmail_add_connector_route import (
router as google_gmail_add_connector_router,
)
from .google_drive_add_connector_route import ( from .google_drive_add_connector_route import (
router as google_drive_add_connector_router, router as google_drive_add_connector_router,
) )
from .google_gmail_add_connector_route import (
router as google_gmail_add_connector_router,
)
from .logs_routes import router as logs_router from .logs_routes import router as logs_router
from .luma_add_connector_route import router as luma_add_connector_router from .luma_add_connector_route import router as luma_add_connector_router
from .new_chat_routes import router as new_chat_router from .new_chat_routes import router as new_chat_router
@ -41,3 +42,4 @@ router.include_router(airtable_add_connector_router)
router.include_router(luma_add_connector_router) router.include_router(luma_add_connector_router)
router.include_router(new_llm_config_router) # LLM configs with prompt configuration router.include_router(new_llm_config_router) # LLM configs with prompt configuration
router.include_router(logs_router) router.include_router(logs_router)
router.include_router(circleback_webhook_router) # Circleback meeting webhooks

View file

@ -0,0 +1,317 @@
"""
Circleback Webhook Route
This module provides a webhook endpoint for receiving meeting data from Circleback.
It processes the incoming webhook payload and saves it as a document in the specified search space.
"""
import logging
from datetime import datetime
from typing import Any
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel, Field
logger = logging.getLogger(__name__)
router = APIRouter()
# Pydantic models for Circleback webhook payload
class CirclebackAttendee(BaseModel):
"""Attendee model for Circleback meeting."""
name: str | None = None
email: str | None = None
class CirclebackActionItemAssignee(BaseModel):
"""Assignee model for action items."""
name: str | None = None
email: str | None = None
class CirclebackActionItem(BaseModel):
"""Action item model for Circleback meeting."""
id: int
title: str
description: str = ""
assignee: CirclebackActionItemAssignee | None = None
status: str = "PENDING"
class CirclebackTranscriptSegment(BaseModel):
"""Transcript segment model for Circleback meeting."""
speaker: str
text: str
timestamp: float
class CirclebackInsightItem(BaseModel):
"""Individual insight item."""
insight: str | dict[str, Any]
speaker: str | None = None
timestamp: float | None = None
class CirclebackWebhookPayload(BaseModel):
"""
Circleback webhook payload model.
This model represents the data sent by Circleback when a meeting is processed.
"""
model_config = {"populate_by_name": True}
id: int = Field(..., description="Circleback meeting ID")
name: str = Field(..., description="Meeting name")
created_at: str = Field(
..., alias="createdAt", description="Meeting creation date in ISO format"
)
duration: float = Field(..., description="Meeting duration in seconds")
url: str | None = Field(None, description="URL of the virtual meeting")
recording_url: str | None = Field(
None,
alias="recordingUrl",
description="URL of the meeting recording (valid for 24 hours)",
)
tags: list[str] = Field(default_factory=list, description="Meeting tags")
ical_uid: str | None = Field(
None, alias="icalUid", description="Unique identifier of the calendar event"
)
attendees: list[CirclebackAttendee] = Field(
default_factory=list, description="Meeting attendees"
)
notes: str = Field("", description="Meeting notes in Markdown format")
action_items: list[CirclebackActionItem] = Field(
default_factory=list,
alias="actionItems",
description="Action items from the meeting",
)
transcript: list[CirclebackTranscriptSegment] = Field(
default_factory=list, description="Meeting transcript segments"
)
insights: dict[str, list[CirclebackInsightItem]] = Field(
default_factory=dict, description="Custom insights from the meeting"
)
def format_circleback_meeting_to_markdown(payload: CirclebackWebhookPayload) -> str:
"""
Convert Circleback webhook payload to a well-formatted Markdown document.
Args:
payload: The Circleback webhook payload
Returns:
Markdown string representation of the meeting
"""
lines = []
# Title
lines.append(f"# {payload.name}")
lines.append("")
# Meeting metadata
lines.append("## Meeting Details")
lines.append("")
# Parse and format date
try:
created_dt = datetime.fromisoformat(payload.created_at.replace("Z", "+00:00"))
formatted_date = created_dt.strftime("%Y-%m-%d %H:%M:%S UTC")
except (ValueError, AttributeError):
formatted_date = payload.created_at
lines.append(f"- **Date:** {formatted_date}")
lines.append(f"- **Duration:** {int(payload.duration // 60)} minutes")
if payload.url:
lines.append(f"- **Meeting URL:** {payload.url}")
if payload.tags:
lines.append(f"- **Tags:** {', '.join(payload.tags)}")
lines.append(
f"- **Circleback Link:** [View on Circleback](https://app.circleback.ai/meetings/{payload.id})"
)
lines.append("")
# Attendees
if payload.attendees:
lines.append("## Attendees")
lines.append("")
for attendee in payload.attendees:
name = attendee.name or "Unknown"
if attendee.email:
lines.append(f"- **{name}** ({attendee.email})")
else:
lines.append(f"- **{name}**")
lines.append("")
# Notes (if provided)
if payload.notes:
lines.append("## Meeting Notes")
lines.append("")
lines.append(payload.notes)
lines.append("")
# Action Items
if payload.action_items:
lines.append("## Action Items")
lines.append("")
for item in payload.action_items:
status_emoji = "" if item.status == "DONE" else ""
assignee_text = ""
if item.assignee and item.assignee.name:
assignee_text = f" (Assigned to: {item.assignee.name})"
lines.append(f"{status_emoji} **{item.title}**{assignee_text}")
if item.description:
lines.append(f" {item.description}")
lines.append("")
# Insights
if payload.insights:
lines.append("## Insights")
lines.append("")
for insight_name, insight_items in payload.insights.items():
lines.append(f"### {insight_name}")
lines.append("")
for insight_item in insight_items:
if isinstance(insight_item.insight, dict):
for key, value in insight_item.insight.items():
lines.append(f"- **{key}:** {value}")
else:
speaker_info = (
f" _{insight_item.speaker}_" if insight_item.speaker else ""
)
lines.append(f"- {insight_item.insight}{speaker_info}")
lines.append("")
# Transcript
if payload.transcript:
lines.append("## Transcript")
lines.append("")
for segment in payload.transcript:
# Format timestamp as MM:SS
minutes = int(segment.timestamp // 60)
seconds = int(segment.timestamp % 60)
timestamp_str = f"[{minutes:02d}:{seconds:02d}]"
lines.append(f"**{segment.speaker}** {timestamp_str}: {segment.text}")
lines.append("")
return "\n".join(lines)
@router.post("/webhooks/circleback/{search_space_id}")
async def receive_circleback_webhook(
search_space_id: int,
payload: CirclebackWebhookPayload,
):
"""
Receive and process a Circleback webhook.
This endpoint receives meeting data from Circleback and saves it as a document
in the specified search space. The meeting data is converted to Markdown format
and processed asynchronously.
Args:
search_space_id: The ID of the search space to save the document to
payload: The Circleback webhook payload containing meeting data
Returns:
Success message with document details
Note:
This endpoint does not require authentication as it's designed to receive
webhooks from Circleback. Signature verification can be added later for security.
"""
try:
logger.info(
f"Received Circleback webhook for meeting {payload.id} in search space {search_space_id}"
)
# Convert to markdown
markdown_content = format_circleback_meeting_to_markdown(payload)
# Trigger async document processing
from app.tasks.celery_tasks.document_tasks import (
process_circleback_meeting_task,
)
# Prepare meeting metadata for the task
meeting_metadata = {
"circleback_meeting_id": payload.id,
"meeting_name": payload.name,
"meeting_date": payload.created_at,
"duration_seconds": payload.duration,
"meeting_url": payload.url,
"tags": payload.tags,
"attendees_count": len(payload.attendees),
"action_items_count": len(payload.action_items),
"has_transcript": len(payload.transcript) > 0,
}
# Queue the processing task
process_circleback_meeting_task.delay(
meeting_id=payload.id,
meeting_name=payload.name,
markdown_content=markdown_content,
metadata=meeting_metadata,
search_space_id=search_space_id,
)
logger.info(
f"Queued Circleback meeting {payload.id} for processing in search space {search_space_id}"
)
return {
"status": "accepted",
"message": f"Meeting '{payload.name}' queued for processing",
"meeting_id": payload.id,
"search_space_id": search_space_id,
}
except Exception as e:
logger.error(f"Error processing Circleback webhook: {e!s}", exc_info=True)
raise HTTPException(
status_code=500,
detail=f"Failed to process Circleback webhook: {e!s}",
) from e
@router.get("/webhooks/circleback/{search_space_id}/info")
async def get_circleback_webhook_info(
search_space_id: int,
):
"""
Get information about the Circleback webhook endpoint.
This endpoint provides information about how to configure the Circleback
webhook integration.
Args:
search_space_id: The ID of the search space
Returns:
Webhook configuration information
"""
from app.config import config
# Construct the webhook URL
base_url = getattr(config, "API_BASE_URL", "http://localhost:8000")
webhook_url = f"{base_url}/api/v1/webhooks/circleback/{search_space_id}"
return {
"webhook_url": webhook_url,
"search_space_id": search_space_id,
"method": "POST",
"content_type": "application/json",
"description": "Use this URL in your Circleback automation to send meeting data to SurfSense",
"note": "Configure this URL in Circleback Settings → Automations → Create automation → Send webhook request",
}

View file

@ -28,10 +28,8 @@ from app.config import config
from app.connectors.google_drive import ( from app.connectors.google_drive import (
GoogleDriveClient, GoogleDriveClient,
get_start_page_token, get_start_page_token,
get_valid_credentials,
list_folder_contents, list_folder_contents,
) )
from app.connectors.google_drive.folder_manager import list_folders
from app.db import ( from app.db import (
SearchSourceConnector, SearchSourceConnector,
SearchSourceConnectorType, SearchSourceConnectorType,
@ -111,7 +109,9 @@ async def connect_drive(space_id: int, user: User = Depends(current_active_user)
state=state_encoded, state=state_encoded,
) )
logger.info(f"Initiating Google Drive OAuth for user {user.id}, space {space_id}") logger.info(
f"Initiating Google Drive OAuth for user {user.id}, space {space_id}"
)
return {"auth_url": auth_url} return {"auth_url": auth_url}
except Exception as e: except Exception as e:
@ -146,7 +146,9 @@ async def drive_callback(
user_id = UUID(data["user_id"]) user_id = UUID(data["user_id"])
space_id = data["space_id"] space_id = data["space_id"]
logger.info(f"Processing Google Drive callback for user {user_id}, space {space_id}") logger.info(
f"Processing Google Drive callback for user {user_id}, space {space_id}"
)
# Exchange authorization code for tokens # Exchange authorization code for tokens
flow = get_google_flow() flow = get_google_flow()
@ -200,7 +202,9 @@ async def drive_callback(
flag_modified(db_connector, "config") flag_modified(db_connector, "config")
await session.commit() await session.commit()
logger.info(f"Set initial start page token for connector {db_connector.id}") logger.info(
f"Set initial start page token for connector {db_connector.id}"
)
except Exception as e: except Exception as e:
logger.warning(f"Failed to get initial start page token: {e!s}") logger.warning(f"Failed to get initial start page token: {e!s}")
@ -246,7 +250,7 @@ async def list_google_drive_folders(
): ):
""" """
List folders AND files in user's Google Drive with hierarchical support. List folders AND files in user's Google Drive with hierarchical support.
This is called at index time from the manage connector page to display This is called at index time from the manage connector page to display
the complete file system (folders and files). Only folders are selectable. the complete file system (folders and files). Only folders are selectable.
@ -299,7 +303,7 @@ async def list_google_drive_folders(
f"✅ Listed {len(items)} total items ({folder_count} folders, {file_count} files) for connector {connector_id}" f"✅ Listed {len(items)} total items ({folder_count} folders, {file_count} files) for connector {connector_id}"
+ (f" in folder {parent_id}" if parent_id else " in ROOT") + (f" in folder {parent_id}" if parent_id else " in ROOT")
) )
# Log first few items for debugging # Log first few items for debugging
if items: if items:
logger.info(f"First 3 items: {[item.get('name') for item in items[:3]]}") logger.info(f"First 3 items: {[item.get('name') for item in items[:3]]}")

View file

@ -45,7 +45,6 @@ from app.tasks.connector_indexers import (
index_github_repos, index_github_repos,
index_google_calendar_events, index_google_calendar_events,
index_google_gmail_messages, index_google_gmail_messages,
index_google_drive_files,
index_jira_issues, index_jira_issues,
index_linear_issues, index_linear_issues,
index_luma_events, index_luma_events,
@ -1572,7 +1571,9 @@ async def run_google_drive_indexing(
errors = [] errors = []
# Index each folder # Index each folder
for folder_id, folder_name in zip(folder_id_list, folder_name_list): for folder_id, folder_name in zip(
folder_id_list, folder_name_list, strict=False
):
try: try:
indexed_count, error_message = await index_google_drive_files( indexed_count, error_message = await index_google_drive_files(
session, session,
@ -1589,7 +1590,7 @@ async def run_google_drive_indexing(
else: else:
total_indexed += indexed_count total_indexed += indexed_count
except Exception as e: except Exception as e:
errors.append(f"{folder_name}: {str(e)}") errors.append(f"{folder_name}: {e!s}")
logger.error( logger.error(
f"Error indexing folder {folder_name} ({folder_id}): {e}", f"Error indexing folder {folder_name} ({folder_id}): {e}",
exc_info=True, exc_info=True,

View file

@ -268,3 +268,105 @@ async def _process_file_upload(
) )
logger.error(error_message) logger.error(error_message)
raise raise
@celery_app.task(name="process_circleback_meeting", bind=True)
def process_circleback_meeting_task(
self,
meeting_id: int,
meeting_name: str,
markdown_content: str,
metadata: dict,
search_space_id: int,
):
"""
Celery task to process Circleback meeting webhook data.
Args:
meeting_id: Circleback meeting ID
meeting_name: Name of the meeting
markdown_content: Meeting content formatted as markdown
metadata: Meeting metadata dictionary
search_space_id: ID of the search space
"""
import asyncio
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(
_process_circleback_meeting(
meeting_id,
meeting_name,
markdown_content,
metadata,
search_space_id,
)
)
finally:
loop.close()
async def _process_circleback_meeting(
meeting_id: int,
meeting_name: str,
markdown_content: str,
metadata: dict,
search_space_id: int,
):
"""Process Circleback meeting with new session."""
from app.tasks.document_processors.circleback_processor import (
add_circleback_meeting_document,
)
async with get_celery_session_maker()() as session:
task_logger = TaskLoggingService(session, search_space_id)
log_entry = await task_logger.log_task_start(
task_name="process_circleback_meeting",
source="circleback_webhook",
message=f"Starting Circleback meeting processing: {meeting_name}",
metadata={
"document_type": "CIRCLEBACK",
"meeting_id": meeting_id,
"meeting_name": meeting_name,
**metadata,
},
)
try:
result = await add_circleback_meeting_document(
session=session,
meeting_id=meeting_id,
meeting_name=meeting_name,
markdown_content=markdown_content,
metadata=metadata,
search_space_id=search_space_id,
)
if result:
await task_logger.log_task_success(
log_entry,
f"Successfully processed Circleback meeting: {meeting_name}",
{
"document_id": result.id,
"meeting_id": meeting_id,
"content_hash": result.content_hash,
},
)
else:
await task_logger.log_task_success(
log_entry,
f"Circleback meeting document already exists (duplicate): {meeting_name}",
{"duplicate_detected": True, "meeting_id": meeting_id},
)
except Exception as e:
await task_logger.log_task_failure(
log_entry,
f"Failed to process Circleback meeting: {meeting_name}",
str(e),
{"error_type": type(e).__name__, "meeting_id": meeting_id},
)
logger.error(f"Error processing Circleback meeting: {e!s}")
raise

View file

@ -34,8 +34,8 @@ from .discord_indexer import index_discord_messages
from .elasticsearch_indexer import index_elasticsearch_documents from .elasticsearch_indexer import index_elasticsearch_documents
from .github_indexer import index_github_repos from .github_indexer import index_github_repos
from .google_calendar_indexer import index_google_calendar_events from .google_calendar_indexer import index_google_calendar_events
from .google_gmail_indexer import index_google_gmail_messages
from .google_drive_indexer import index_google_drive_files from .google_drive_indexer import index_google_drive_files
from .google_gmail_indexer import index_google_gmail_messages
from .jira_indexer import index_jira_issues from .jira_indexer import index_jira_issues
# Issue tracking and project management # Issue tracking and project management

View file

@ -1,7 +1,6 @@
"""Google Drive indexer using Surfsense file processors.""" """Google Drive indexer using Surfsense file processors."""
import logging import logging
from datetime import datetime
from sqlalchemy.exc import SQLAlchemyError from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.ext.asyncio import AsyncSession
@ -99,11 +98,15 @@ async def index_google_drive_files(
target_folder_id = folder_id target_folder_id = folder_id
target_folder_name = folder_name or "Selected Folder" target_folder_name = folder_name or "Selected Folder"
logger.info(f"Indexing Google Drive folder: {target_folder_name} ({target_folder_id})") logger.info(
f"Indexing Google Drive folder: {target_folder_name} ({target_folder_id})"
)
folder_tokens = connector.config.get("folder_tokens", {}) folder_tokens = connector.config.get("folder_tokens", {})
start_page_token = folder_tokens.get(target_folder_id) start_page_token = folder_tokens.get(target_folder_id)
can_use_delta_sync = use_delta_sync and start_page_token and connector.last_indexed_at can_use_delta_sync = (
use_delta_sync and start_page_token and connector.last_indexed_at
)
if can_use_delta_sync: if can_use_delta_sync:
logger.info(f"Using delta sync for connector {connector_id}") logger.info(f"Using delta sync for connector {connector_id}")
@ -151,9 +154,7 @@ async def index_google_drive_files(
await update_connector_last_indexed(session, connector, update_last_indexed) await update_connector_last_indexed(session, connector, update_last_indexed)
await session.commit() await session.commit()
logger.info( logger.info("Successfully committed Google Drive indexing changes to database")
f"Successfully committed Google Drive indexing changes to database"
)
await task_logger.log_task_success( await task_logger.log_task_success(
log_entry, log_entry,
@ -252,7 +253,9 @@ async def _index_full_scan(
if documents_indexed % 10 == 0 and documents_indexed > 0: if documents_indexed % 10 == 0 and documents_indexed > 0:
await session.commit() await session.commit()
logger.info(f"Committed batch: {documents_indexed} files indexed so far") logger.info(
f"Committed batch: {documents_indexed} files indexed so far"
)
page_token = next_token page_token = next_token
if not page_token: if not page_token:
@ -391,9 +394,7 @@ async def _process_single_file(
return 0, 1 return 0, 1
async def _remove_document( async def _remove_document(session: AsyncSession, file_id: str, search_space_id: int):
session: AsyncSession, file_id: str, search_space_id: int
):
"""Remove a document that was deleted in Drive.""" """Remove a document that was deleted in Drive."""
unique_identifier_hash = generate_unique_identifier_hash( unique_identifier_hash = generate_unique_identifier_hash(
DocumentType.GOOGLE_DRIVE_FILE, file_id, search_space_id DocumentType.GOOGLE_DRIVE_FILE, file_id, search_space_id
@ -406,5 +407,3 @@ async def _remove_document(
if existing_document: if existing_document:
await session.delete(existing_document) await session.delete(existing_document)
logger.info(f"Removed deleted file document: {file_id}") logger.info(f"Removed deleted file document: {file_id}")

View file

@ -0,0 +1,183 @@
"""
Circleback meeting document processor.
This module processes meeting data received from Circleback webhooks
and stores it as searchable documents in the database.
"""
import logging
from typing import Any
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.asyncio import AsyncSession
from app.db import Document, DocumentType
from app.services.llm_service import get_document_summary_llm
from app.utils.document_converters import (
create_document_chunks,
generate_content_hash,
generate_document_summary,
generate_unique_identifier_hash,
)
from .base import (
check_document_by_unique_identifier,
get_current_timestamp,
)
logger = logging.getLogger(__name__)
async def add_circleback_meeting_document(
session: AsyncSession,
meeting_id: int,
meeting_name: str,
markdown_content: str,
metadata: dict[str, Any],
search_space_id: int,
) -> Document | None:
"""
Process and store a Circleback meeting document.
Args:
session: Database session
meeting_id: Circleback meeting ID
meeting_name: Name of the meeting
markdown_content: Meeting content formatted as markdown
metadata: Meeting metadata dictionary
search_space_id: ID of the search space
Returns:
Document object if successful, None if failed or duplicate
"""
try:
# Generate unique identifier hash using Circleback meeting ID
unique_identifier = f"circleback_{meeting_id}"
unique_identifier_hash = generate_unique_identifier_hash(
DocumentType.CIRCLEBACK, unique_identifier, search_space_id
)
# Generate content hash
content_hash = generate_content_hash(markdown_content, search_space_id)
# Check if document with this unique identifier already exists
existing_document = await check_document_by_unique_identifier(
session, unique_identifier_hash
)
if existing_document:
# Document exists - check if content has changed
if existing_document.content_hash == content_hash:
logger.info(f"Circleback meeting {meeting_id} unchanged. Skipping.")
return existing_document
else:
# Content has changed - update the existing document
logger.info(
f"Content changed for Circleback meeting {meeting_id}. Updating document."
)
# Get LLM for generating summary
llm = await get_document_summary_llm(session, search_space_id)
if not llm:
logger.warning(
f"No LLM configured for search space {search_space_id}. Using content as summary."
)
# Use first 1000 chars as summary if no LLM available
summary_content = (
markdown_content[:1000] + "..."
if len(markdown_content) > 1000
else markdown_content
)
summary_embedding = None
else:
# Generate summary with metadata
document_metadata = {
"meeting_name": meeting_name,
"meeting_id": meeting_id,
"document_type": "Circleback Meeting",
**{
k: v
for k, v in metadata.items()
if isinstance(v, str | int | float | bool)
},
}
summary_content, summary_embedding = await generate_document_summary(
markdown_content, llm, document_metadata
)
# Process chunks
chunks = await create_document_chunks(markdown_content)
# Convert to BlockNote JSON for editing capability
from app.utils.blocknote_converter import convert_markdown_to_blocknote
blocknote_json = await convert_markdown_to_blocknote(markdown_content)
if not blocknote_json:
logger.warning(
f"Failed to convert Circleback meeting {meeting_id} to BlockNote JSON, document will not be editable"
)
# Prepare document metadata
document_metadata = {
"CIRCLEBACK_MEETING_ID": meeting_id,
"MEETING_NAME": meeting_name,
"SOURCE": "CIRCLEBACK_WEBHOOK",
**metadata,
}
# Update or create document
if existing_document:
# Update existing document
existing_document.title = meeting_name
existing_document.content = summary_content
existing_document.content_hash = content_hash
if summary_embedding is not None:
existing_document.embedding = summary_embedding
existing_document.document_metadata = document_metadata
existing_document.chunks = chunks
existing_document.blocknote_document = blocknote_json
existing_document.content_needs_reindexing = False
existing_document.updated_at = get_current_timestamp()
await session.commit()
await session.refresh(existing_document)
document = existing_document
logger.info(
f"Updated Circleback meeting document {meeting_id} in search space {search_space_id}"
)
else:
# Create new document
document = Document(
search_space_id=search_space_id,
title=meeting_name,
document_type=DocumentType.CIRCLEBACK,
document_metadata=document_metadata,
content=summary_content,
embedding=summary_embedding,
chunks=chunks,
content_hash=content_hash,
unique_identifier_hash=unique_identifier_hash,
blocknote_document=blocknote_json,
content_needs_reindexing=False,
updated_at=get_current_timestamp(),
)
session.add(document)
await session.commit()
await session.refresh(document)
logger.info(
f"Created new Circleback meeting document {meeting_id} in search space {search_space_id}"
)
return document
except SQLAlchemyError as db_error:
await session.rollback()
logger.error(
f"Database error processing Circleback meeting {meeting_id}: {db_error}"
)
raise db_error
except Exception as e:
await session.rollback()
logger.error(f"Failed to process Circleback meeting {meeting_id}: {e!s}")
raise RuntimeError(f"Failed to process Circleback meeting: {e!s}") from e

View file

@ -473,7 +473,8 @@ async def process_file_in_background(
session: AsyncSession, session: AsyncSession,
task_logger: TaskLoggingService, task_logger: TaskLoggingService,
log_entry: Log, log_entry: Log,
connector: dict | None = None, # Optional: {"type": "GOOGLE_DRIVE_FILE", "metadata": {...}} connector: dict
| None = None, # Optional: {"type": "GOOGLE_DRIVE_FILE", "metadata": {...}}
): ):
try: try:
# Check if the file is a markdown or text file # Check if the file is a markdown or text file
@ -926,7 +927,9 @@ async def process_file_in_background(
) )
if connector: if connector:
await _update_document_from_connector(last_created_doc, connector, session) await _update_document_from_connector(
last_created_doc, connector, session
)
await task_logger.log_task_success( await task_logger.log_task_success(
log_entry, log_entry,
@ -1053,7 +1056,9 @@ async def process_file_in_background(
) )
if connector: if connector:
await _update_document_from_connector(doc_result, connector, session) await _update_document_from_connector(
doc_result, connector, session
)
await task_logger.log_task_success( await task_logger.log_task_success(
log_entry, log_entry,

View file

@ -0,0 +1,363 @@
"use client";
import { zodResolver } from "@hookform/resolvers/zod";
import { useAtomValue } from "jotai";
import { ArrowLeft, Check, Copy, ExternalLink, Loader2, Webhook } from "lucide-react";
import { motion } from "motion/react";
import Link from "next/link";
import { useParams, useRouter } from "next/navigation";
import { useEffect, useState } from "react";
import { useForm } from "react-hook-form";
import { toast } from "sonner";
import * as z from "zod";
import { createConnectorMutationAtom } from "@/atoms/connectors/connector-mutation.atoms";
import { connectorsAtom } from "@/atoms/connectors/connector-query.atoms";
import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert";
import { Button } from "@/components/ui/button";
import {
Card,
CardContent,
CardDescription,
CardFooter,
CardHeader,
CardTitle,
} from "@/components/ui/card";
import {
Form,
FormControl,
FormDescription,
FormField,
FormItem,
FormLabel,
FormMessage,
} from "@/components/ui/form";
import { Input } from "@/components/ui/input";
import { EnumConnectorName } from "@/contracts/enums/connector";
import { getConnectorIcon } from "@/contracts/enums/connectorIcons";
import type { SearchSourceConnector } from "@/contracts/types/connector.types";
// Define the form schema with Zod
const circlebackConnectorFormSchema = z.object({
name: z.string().min(3, {
message: "Connector name must be at least 3 characters.",
}),
});
// Define the type for the form values
type CirclebackConnectorFormValues = z.infer<typeof circlebackConnectorFormSchema>;
export default function CirclebackConnectorPage() {
const router = useRouter();
const params = useParams();
const searchSpaceId = params.search_space_id as string;
const [isSubmitting, setIsSubmitting] = useState(false);
const [doesConnectorExist, setDoesConnectorExist] = useState(false);
const [copied, setCopied] = useState(false);
const { data: connectors } = useAtomValue(connectorsAtom);
const { mutateAsync: createConnector } = useAtomValue(createConnectorMutationAtom);
// Construct the webhook URL
const apiBaseUrl = process.env.NEXT_PUBLIC_BACKEND_URL || "http://localhost:8000";
const webhookUrl = `${apiBaseUrl}/api/v1/webhooks/circleback/${searchSpaceId}`;
// Initialize the form
const form = useForm<CirclebackConnectorFormValues>({
resolver: zodResolver(circlebackConnectorFormSchema),
defaultValues: {
name: "Circleback Meetings",
},
});
const { refetch: fetchConnectors } = useAtomValue(connectorsAtom);
useEffect(() => {
fetchConnectors().then((data) => {
const connectors = data.data || [];
const connector = connectors.find(
(c: SearchSourceConnector) => c.connector_type === EnumConnectorName.CIRCLEBACK_CONNECTOR
);
if (connector) {
setDoesConnectorExist(true);
}
});
}, []);
// Copy webhook URL to clipboard
const copyToClipboard = async () => {
try {
await navigator.clipboard.writeText(webhookUrl);
setCopied(true);
toast.success("Webhook URL copied to clipboard!");
setTimeout(() => setCopied(false), 2000);
} catch {
toast.error("Failed to copy to clipboard");
}
};
// Handle form submission
const onSubmit = async (values: CirclebackConnectorFormValues) => {
setIsSubmitting(true);
try {
await createConnector({
data: {
name: values.name,
connector_type: EnumConnectorName.CIRCLEBACK_CONNECTOR,
config: {
webhook_url: webhookUrl,
},
is_indexable: false, // Webhooks push data, not indexed
last_indexed_at: null,
periodic_indexing_enabled: false,
indexing_frequency_minutes: null,
next_scheduled_at: null,
},
queryParams: {
search_space_id: searchSpaceId,
},
});
toast.success("Circleback connector created successfully!");
// Navigate back to connectors page
router.push(`/dashboard/${searchSpaceId}/connectors`);
} catch (error) {
console.error("Error creating connector:", error);
toast.error(error instanceof Error ? error.message : "Failed to create connector");
} finally {
setIsSubmitting(false);
}
};
return (
<div className="container mx-auto py-8 max-w-2xl">
<motion.div
initial={{ opacity: 0, y: 20 }}
animate={{ opacity: 1, y: 0 }}
transition={{ duration: 0.5 }}
>
{/* Header */}
<div className="mb-8">
<Link
href={`/dashboard/${searchSpaceId}/connectors/add`}
className="inline-flex items-center text-sm text-muted-foreground hover:text-foreground mb-4"
>
<ArrowLeft className="mr-2 h-4 w-4" />
Back to connectors
</Link>
<div className="flex items-center gap-4">
<div className="flex h-12 w-12 items-center justify-center rounded-lg">
{getConnectorIcon(EnumConnectorName.CIRCLEBACK_CONNECTOR, "h-6 w-6")}
</div>
<div>
<h1 className="text-3xl font-bold tracking-tight">Connect Circleback</h1>
<p className="text-muted-foreground">
Receive meeting notes and transcripts via webhook.
</p>
</div>
</div>
</div>
{/* Connection Card */}
{!doesConnectorExist ? (
<>
<Card className="mb-6">
<CardHeader>
<CardTitle>Webhook Configuration</CardTitle>
<CardDescription>
Use this webhook URL in your Circleback automation to send meeting data to
SurfSense.
</CardDescription>
</CardHeader>
<CardContent className="space-y-4">
<div className="space-y-2">
<label className="text-sm font-medium">Webhook URL</label>
<div className="flex gap-2">
<Input value={webhookUrl} readOnly className="font-mono text-sm" />
<Button
type="button"
variant="outline"
size="icon"
onClick={copyToClipboard}
className="shrink-0"
>
{copied ? (
<Check className="h-4 w-4 text-green-500" />
) : (
<Copy className="h-4 w-4" />
)}
</Button>
</div>
<p className="text-xs text-muted-foreground">
Copy this URL and paste it in your Circleback automation settings.
</p>
</div>
<Alert>
<Webhook className="h-4 w-4" />
<AlertTitle>How it works</AlertTitle>
<AlertDescription>
When you configure this webhook in Circleback, it will automatically send
meeting notes, transcripts, and action items to SurfSense after each meeting.
</AlertDescription>
</Alert>
</CardContent>
</Card>
<Card>
<CardHeader>
<CardTitle>Create Connector</CardTitle>
<CardDescription>
Register the Circleback connector to track incoming meeting data.
</CardDescription>
</CardHeader>
<Form {...form}>
<form onSubmit={form.handleSubmit(onSubmit)}>
<CardContent className="space-y-4">
<FormField
control={form.control}
name="name"
render={({ field }) => (
<FormItem>
<FormLabel>Connector Name</FormLabel>
<FormControl>
<Input placeholder="My Circleback Meetings" {...field} />
</FormControl>
<FormDescription>
A friendly name to identify this connector.
</FormDescription>
<FormMessage />
</FormItem>
)}
/>
<div className="space-y-2 pt-2">
<div className="flex items-center space-x-2 text-sm text-muted-foreground">
<Check className="h-4 w-4 text-green-500" />
<span>Automatic meeting notes import</span>
</div>
<div className="flex items-center space-x-2 text-sm text-muted-foreground">
<Check className="h-4 w-4 text-green-500" />
<span>Full transcripts with speaker identification</span>
</div>
<div className="flex items-center space-x-2 text-sm text-muted-foreground">
<Check className="h-4 w-4 text-green-500" />
<span>Action items and insights extraction</span>
</div>
</div>
</CardContent>
<CardFooter className="flex justify-between">
<Button
type="button"
variant="outline"
onClick={() => router.push(`/dashboard/${searchSpaceId}/connectors/add`)}
>
Cancel
</Button>
<Button type="submit" disabled={isSubmitting}>
{isSubmitting ? (
<>
<Loader2 className="mr-2 h-4 w-4 animate-spin" />
Creating...
</>
) : (
<>
<Webhook className="mr-2 h-4 w-4" />
Create Connector
</>
)}
</Button>
</CardFooter>
</form>
</Form>
</Card>
</>
) : (
/* Success Card */
<Card>
<CardHeader>
<CardTitle> Circleback connector is active!</CardTitle>
<CardDescription>
Your Circleback meetings will be automatically imported to this search space.
</CardDescription>
</CardHeader>
<CardContent className="space-y-4">
<div className="space-y-2">
<label className="text-sm font-medium">Webhook URL</label>
<div className="flex gap-2">
<Input value={webhookUrl} readOnly className="font-mono text-sm" />
<Button
type="button"
variant="outline"
size="icon"
onClick={copyToClipboard}
className="shrink-0"
>
{copied ? (
<Check className="h-4 w-4 text-green-500" />
) : (
<Copy className="h-4 w-4" />
)}
</Button>
</div>
</div>
</CardContent>
</Card>
)}
{/* Help Section */}
<Card className="mt-6">
<CardHeader>
<CardTitle className="text-lg">Setup Instructions</CardTitle>
</CardHeader>
<CardContent className="space-y-4">
<div>
<h4 className="font-medium mb-2">1. Copy the Webhook URL</h4>
<p className="text-sm text-muted-foreground">
Copy the webhook URL shown above. You'll need this for the next step.
</p>
</div>
<div>
<h4 className="font-medium mb-2">2. Open Circleback Automations</h4>
<p className="text-sm text-muted-foreground">
Go to{" "}
<a
href="https://app.circleback.ai/automations"
target="_blank"
rel="noopener noreferrer"
className="text-primary hover:underline inline-flex items-center gap-1"
>
Circleback Automations
<ExternalLink className="h-3 w-3" />
</a>{" "}
and click "Create automation".
</p>
</div>
<div>
<h4 className="font-medium mb-2">3. Configure the Webhook</h4>
<p className="text-sm text-muted-foreground">
Set your automation conditions, then select "Send webhook request" and paste the
webhook URL.
</p>
</div>
<div>
<h4 className="font-medium mb-2">4. Select Meeting Outcomes</h4>
<p className="text-sm text-muted-foreground">
Choose which meeting data to include: notes, transcript, action items, and
insights.
</p>
</div>
<div>
<h4 className="font-medium mb-2">5. Create & Test</h4>
<p className="text-sm text-muted-foreground">
Give your automation a name and create it. You can send a test request to verify
the integration works.
</p>
</div>
</CardContent>
</Card>
</motion.div>
</div>
);
}

View file

@ -190,20 +190,27 @@ export const connectorCategories: ConnectorCategory[] = [
icon: getConnectorIcon(EnumConnectorName.GOOGLE_DRIVE_CONNECTOR, "h-6 w-6"), icon: getConnectorIcon(EnumConnectorName.GOOGLE_DRIVE_CONNECTOR, "h-6 w-6"),
status: "available", status: "available",
}, },
{ {
id: "luma-connector", id: "luma-connector",
title: "Luma", title: "Luma",
description: "luma_desc", description: "luma_desc",
icon: getConnectorIcon(EnumConnectorName.LUMA_CONNECTOR, "h-6 w-6"), icon: getConnectorIcon(EnumConnectorName.LUMA_CONNECTOR, "h-6 w-6"),
status: "available", status: "available",
}, },
{ {
id: "zoom", id: "circleback-connector",
title: "Zoom", title: "Circleback",
description: "zoom_desc", description: "circleback_desc",
icon: <IconBrandZoom className="h-6 w-6" />, icon: getConnectorIcon(EnumConnectorName.CIRCLEBACK_CONNECTOR, "h-6 w-6"),
status: "coming-soon", status: "available",
}, },
], {
}, id: "zoom",
title: "Zoom",
description: "zoom_desc",
icon: <IconBrandZoom className="h-6 w-6" />,
status: "coming-soon",
},
],
},
]; ];

View file

@ -19,4 +19,5 @@ export enum EnumConnectorName {
LUMA_CONNECTOR = "LUMA_CONNECTOR", LUMA_CONNECTOR = "LUMA_CONNECTOR",
ELASTICSEARCH_CONNECTOR = "ELASTICSEARCH_CONNECTOR", ELASTICSEARCH_CONNECTOR = "ELASTICSEARCH_CONNECTOR",
WEBCRAWLER_CONNECTOR = "WEBCRAWLER_CONNECTOR", WEBCRAWLER_CONNECTOR = "WEBCRAWLER_CONNECTOR",
CIRCLEBACK_CONNECTOR = "CIRCLEBACK_CONNECTOR",
} }

View file

@ -15,6 +15,7 @@ import {
IconSparkles, IconSparkles,
IconTable, IconTable,
IconTicket, IconTicket,
IconUsersGroup,
IconWorldWww, IconWorldWww,
} from "@tabler/icons-react"; } from "@tabler/icons-react";
import { import {
@ -74,7 +75,11 @@ export const getConnectorIcon = (connectorType: EnumConnectorName | string, clas
return <IconBrandElastic {...iconProps} />; return <IconBrandElastic {...iconProps} />;
case EnumConnectorName.WEBCRAWLER_CONNECTOR: case EnumConnectorName.WEBCRAWLER_CONNECTOR:
return <Globe {...iconProps} />; return <Globe {...iconProps} />;
case EnumConnectorName.CIRCLEBACK_CONNECTOR:
return <IconUsersGroup {...iconProps} />;
// Additional cases for non-enum connector types // Additional cases for non-enum connector types
case "CIRCLEBACK":
return <IconUsersGroup {...iconProps} />;
case "CRAWLED_URL": case "CRAWLED_URL":
return <Globe {...iconProps} />; return <Globe {...iconProps} />;
case "YOUTUBE_VIDEO": case "YOUTUBE_VIDEO":

View file

@ -305,6 +305,7 @@
"bookstack_desc": "Connect to BookStack to search wiki pages and documentation.", "bookstack_desc": "Connect to BookStack to search wiki pages and documentation.",
"airtable_desc": "Connect to Airtable to search records, tables and database content.", "airtable_desc": "Connect to Airtable to search records, tables and database content.",
"luma_desc": "Connect to Luma to search events, meetups and gatherings.", "luma_desc": "Connect to Luma to search events, meetups and gatherings.",
"circleback_desc": "Receive meeting notes, transcripts and action items from Circleback via webhook.",
"calendar_desc": "Connect to Google Calendar to search events, meetings and schedules.", "calendar_desc": "Connect to Google Calendar to search events, meetings and schedules.",
"gmail_desc": "Connect to your Gmail account to search through your emails.", "gmail_desc": "Connect to your Gmail account to search through your emails.",
"google_drive_desc": "Connect to Google Drive to search and index your files and documents.", "google_drive_desc": "Connect to Google Drive to search and index your files and documents.",

View file

@ -305,6 +305,7 @@
"bookstack_desc": "连接到 BookStack 以搜索 Wiki 页面和文档。", "bookstack_desc": "连接到 BookStack 以搜索 Wiki 页面和文档。",
"airtable_desc": "连接到 Airtable 以搜索记录、表格和数据库内容。", "airtable_desc": "连接到 Airtable 以搜索记录、表格和数据库内容。",
"luma_desc": "连接到 Luma 以搜索活动、聚会和集会。", "luma_desc": "连接到 Luma 以搜索活动、聚会和集会。",
"circleback_desc": "通过 Webhook 从 Circleback 接收会议记录、转录和行动项目。",
"calendar_desc": "连接到 Google 日历以搜索活动、会议和日程。", "calendar_desc": "连接到 Google 日历以搜索活动、会议和日程。",
"gmail_desc": "连接到您的 Gmail 账户以搜索您的电子邮件。", "gmail_desc": "连接到您的 Gmail 账户以搜索您的电子邮件。",
"google_drive_desc": "连接到 Google 云端硬盘以搜索和索引您的文件和文档。", "google_drive_desc": "连接到 Google 云端硬盘以搜索和索引您的文件和文档。",