mirror of
https://github.com/MODSetter/SurfSense.git
synced 2026-05-15 18:25:18 +02:00
feat: added attachment support
This commit is contained in:
parent
bb971460fc
commit
c2dcb2045d
62 changed files with 1166 additions and 9012 deletions
|
|
@ -16,6 +16,7 @@ from .logs_routes import router as logs_router
|
|||
from .luma_add_connector_route import router as luma_add_connector_router
|
||||
from .new_chat_routes import router as new_chat_router
|
||||
from .notes_routes import router as notes_router
|
||||
from .podcasts_routes import router as podcasts_router
|
||||
from .rbac_routes import router as rbac_router
|
||||
from .search_source_connectors_routes import router as search_source_connectors_router
|
||||
from .search_spaces_routes import router as search_spaces_router
|
||||
|
|
@ -28,6 +29,7 @@ router.include_router(editor_router)
|
|||
router.include_router(documents_router)
|
||||
router.include_router(notes_router)
|
||||
router.include_router(new_chat_router) # Chat with assistant-ui persistence
|
||||
router.include_router(podcasts_router) # Podcast task status and audio
|
||||
router.include_router(search_source_connectors_router)
|
||||
router.include_router(google_calendar_add_connector_router)
|
||||
router.include_router(google_gmail_add_connector_router)
|
||||
|
|
|
|||
|
|
@ -8,11 +8,16 @@ These endpoints support the ThreadHistoryAdapter pattern from assistant-ui:
|
|||
- PUT /threads/{thread_id} - Update thread (rename, archive)
|
||||
- DELETE /threads/{thread_id} - Delete thread
|
||||
- POST /threads/{thread_id}/messages - Append message
|
||||
- POST /attachments/process - Process attachments for chat context
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import tempfile
|
||||
import uuid
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request
|
||||
from fastapi import APIRouter, Depends, File, HTTPException, Request, UploadFile
|
||||
from fastapi.responses import StreamingResponse
|
||||
from sqlalchemy.exc import IntegrityError, OperationalError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
|
@ -650,10 +655,10 @@ async def handle_new_chat(
|
|||
):
|
||||
"""
|
||||
Stream chat responses from the deep agent.
|
||||
|
||||
|
||||
This endpoint handles the new chat functionality with streaming responses
|
||||
using Server-Sent Events (SSE) format compatible with Vercel AI SDK.
|
||||
|
||||
|
||||
Requires CHATS_CREATE permission.
|
||||
"""
|
||||
try:
|
||||
|
|
@ -695,6 +700,7 @@ async def handle_new_chat(
|
|||
session=session,
|
||||
llm_config_id=llm_config_id,
|
||||
messages=request.messages,
|
||||
attachments=request.attachments,
|
||||
),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
|
|
@ -711,3 +717,185 @@ async def handle_new_chat(
|
|||
status_code=500,
|
||||
detail=f"An unexpected error occurred: {e!s}",
|
||||
) from None
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Attachment Processing Endpoint
|
||||
# =============================================================================
|
||||
|
||||
|
||||
@router.post("/attachments/process")
|
||||
async def process_attachment(
|
||||
file: UploadFile = File(...),
|
||||
session: AsyncSession = Depends(get_async_session),
|
||||
user: User = Depends(current_active_user),
|
||||
):
|
||||
"""
|
||||
Process an attachment file and extract its content as markdown.
|
||||
|
||||
This endpoint uses the configured ETL service to parse files and return
|
||||
the extracted content that can be used as context in chat messages.
|
||||
|
||||
Supported file types depend on the configured ETL_SERVICE:
|
||||
- Markdown/Text files: .md, .markdown, .txt (always supported)
|
||||
- Audio files: .mp3, .mp4, .mpeg, .mpga, .m4a, .wav, .webm (if STT configured)
|
||||
- Documents: .pdf, .docx, .doc, .pptx, .xlsx (depends on ETL service)
|
||||
|
||||
Returns:
|
||||
JSON with attachment id, name, type, and extracted content
|
||||
"""
|
||||
from app.config import config as app_config
|
||||
|
||||
if not file.filename:
|
||||
raise HTTPException(status_code=400, detail="No filename provided")
|
||||
|
||||
filename = file.filename
|
||||
attachment_id = str(uuid.uuid4())
|
||||
|
||||
try:
|
||||
# Save file to a temporary location
|
||||
file_ext = os.path.splitext(filename)[1].lower()
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=file_ext) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
content = await file.read()
|
||||
temp_file.write(content)
|
||||
|
||||
extracted_content = ""
|
||||
|
||||
# Process based on file type
|
||||
if file_ext in (".md", ".markdown", ".txt"):
|
||||
# For text/markdown files, read content directly
|
||||
with open(temp_path, encoding="utf-8") as f:
|
||||
extracted_content = f.read()
|
||||
|
||||
elif file_ext in (".mp3", ".mp4", ".mpeg", ".mpga", ".m4a", ".wav", ".webm"):
|
||||
# Audio files - transcribe if STT service is configured
|
||||
if not app_config.STT_SERVICE:
|
||||
raise HTTPException(
|
||||
status_code=422,
|
||||
detail="Audio transcription is not configured. Please set STT_SERVICE.",
|
||||
)
|
||||
|
||||
stt_service_type = (
|
||||
"local" if app_config.STT_SERVICE.startswith("local/") else "external"
|
||||
)
|
||||
|
||||
if stt_service_type == "local":
|
||||
from app.services.stt_service import stt_service
|
||||
|
||||
result = stt_service.transcribe_file(temp_path)
|
||||
extracted_content = result.get("text", "")
|
||||
else:
|
||||
from litellm import atranscription
|
||||
|
||||
with open(temp_path, "rb") as audio_file:
|
||||
transcription_kwargs = {
|
||||
"model": app_config.STT_SERVICE,
|
||||
"file": audio_file,
|
||||
"api_key": app_config.STT_SERVICE_API_KEY,
|
||||
}
|
||||
if app_config.STT_SERVICE_API_BASE:
|
||||
transcription_kwargs["api_base"] = (
|
||||
app_config.STT_SERVICE_API_BASE
|
||||
)
|
||||
|
||||
transcription_response = await atranscription(
|
||||
**transcription_kwargs
|
||||
)
|
||||
extracted_content = transcription_response.get("text", "")
|
||||
|
||||
if extracted_content:
|
||||
extracted_content = (
|
||||
f"# Transcription of {filename}\n\n{extracted_content}"
|
||||
)
|
||||
|
||||
else:
|
||||
# Document files - use configured ETL service
|
||||
if app_config.ETL_SERVICE == "UNSTRUCTURED":
|
||||
from langchain_unstructured import UnstructuredLoader
|
||||
|
||||
from app.utils.document_converters import convert_document_to_markdown
|
||||
|
||||
loader = UnstructuredLoader(
|
||||
temp_path,
|
||||
mode="elements",
|
||||
post_processors=[],
|
||||
languages=["eng"],
|
||||
include_orig_elements=False,
|
||||
include_metadata=False,
|
||||
strategy="auto",
|
||||
)
|
||||
docs = await loader.aload()
|
||||
extracted_content = await convert_document_to_markdown(docs)
|
||||
|
||||
elif app_config.ETL_SERVICE == "LLAMACLOUD":
|
||||
from llama_cloud_services import LlamaParse
|
||||
from llama_cloud_services.parse.utils import ResultType
|
||||
|
||||
parser = LlamaParse(
|
||||
api_key=app_config.LLAMA_CLOUD_API_KEY,
|
||||
num_workers=1,
|
||||
verbose=False,
|
||||
language="en",
|
||||
result_type=ResultType.MD,
|
||||
)
|
||||
result = await parser.aparse(temp_path)
|
||||
markdown_documents = await result.aget_markdown_documents(
|
||||
split_by_page=False
|
||||
)
|
||||
|
||||
if markdown_documents:
|
||||
extracted_content = "\n\n".join(
|
||||
doc.text for doc in markdown_documents
|
||||
)
|
||||
|
||||
elif app_config.ETL_SERVICE == "DOCLING":
|
||||
from app.services.docling_service import create_docling_service
|
||||
|
||||
docling_service = create_docling_service()
|
||||
result = await docling_service.process_document(temp_path, filename)
|
||||
extracted_content = result.get("content", "")
|
||||
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=422,
|
||||
detail=f"ETL service not configured or unsupported file type: {file_ext}",
|
||||
)
|
||||
|
||||
# Clean up temp file
|
||||
with contextlib.suppress(Exception):
|
||||
os.unlink(temp_path)
|
||||
|
||||
if not extracted_content:
|
||||
raise HTTPException(
|
||||
status_code=422,
|
||||
detail=f"Could not extract content from file: {filename}",
|
||||
)
|
||||
|
||||
# Determine attachment type (must be one of: "image", "document", "file")
|
||||
# assistant-ui only supports these three types
|
||||
if file_ext in (".png", ".jpg", ".jpeg", ".gif", ".webp"):
|
||||
attachment_type = "image"
|
||||
else:
|
||||
# All other files (including audio, documents, text) are treated as "document"
|
||||
attachment_type = "document"
|
||||
|
||||
return {
|
||||
"id": attachment_id,
|
||||
"name": filename,
|
||||
"type": attachment_type,
|
||||
"content": extracted_content,
|
||||
"contentLength": len(extracted_content),
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
# Clean up temp file on error
|
||||
with contextlib.suppress(Exception):
|
||||
os.unlink(temp_path)
|
||||
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to process attachment: {e!s}",
|
||||
) from e
|
||||
|
|
|
|||
277
surfsense_backend/app/routes/podcasts_routes.py
Normal file
277
surfsense_backend/app/routes/podcasts_routes.py
Normal file
|
|
@ -0,0 +1,277 @@
|
|||
"""
|
||||
Podcast routes for task status polling and audio retrieval.
|
||||
|
||||
These routes support the podcast generation feature in new-chat.
|
||||
Note: The old Chat-based podcast generation has been removed.
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from celery.result import AsyncResult
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from fastapi.responses import StreamingResponse
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.exc import SQLAlchemyError
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.celery_app import celery_app
|
||||
from app.db import (
|
||||
Permission,
|
||||
Podcast,
|
||||
SearchSpace,
|
||||
SearchSpaceMembership,
|
||||
User,
|
||||
get_async_session,
|
||||
)
|
||||
from app.schemas import PodcastRead
|
||||
from app.users import current_active_user
|
||||
from app.utils.rbac import check_permission
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/podcasts", response_model=list[PodcastRead])
|
||||
async def read_podcasts(
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
search_space_id: int | None = None,
|
||||
session: AsyncSession = Depends(get_async_session),
|
||||
user: User = Depends(current_active_user),
|
||||
):
|
||||
"""
|
||||
List podcasts the user has access to.
|
||||
Requires PODCASTS_READ permission for the search space(s).
|
||||
"""
|
||||
if skip < 0 or limit < 1:
|
||||
raise HTTPException(status_code=400, detail="Invalid pagination parameters")
|
||||
try:
|
||||
if search_space_id is not None:
|
||||
# Check permission for specific search space
|
||||
await check_permission(
|
||||
session,
|
||||
user,
|
||||
search_space_id,
|
||||
Permission.PODCASTS_READ.value,
|
||||
"You don't have permission to read podcasts in this search space",
|
||||
)
|
||||
result = await session.execute(
|
||||
select(Podcast)
|
||||
.filter(Podcast.search_space_id == search_space_id)
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
)
|
||||
else:
|
||||
# Get podcasts from all search spaces user has membership in
|
||||
result = await session.execute(
|
||||
select(Podcast)
|
||||
.join(SearchSpace)
|
||||
.join(SearchSpaceMembership)
|
||||
.filter(SearchSpaceMembership.user_id == user.id)
|
||||
.offset(skip)
|
||||
.limit(limit)
|
||||
)
|
||||
return result.scalars().all()
|
||||
except HTTPException:
|
||||
raise
|
||||
except SQLAlchemyError:
|
||||
raise HTTPException(
|
||||
status_code=500, detail="Database error occurred while fetching podcasts"
|
||||
) from None
|
||||
|
||||
|
||||
@router.get("/podcasts/{podcast_id}", response_model=PodcastRead)
|
||||
async def read_podcast(
|
||||
podcast_id: int,
|
||||
session: AsyncSession = Depends(get_async_session),
|
||||
user: User = Depends(current_active_user),
|
||||
):
|
||||
"""
|
||||
Get a specific podcast by ID.
|
||||
Requires PODCASTS_READ permission for the search space.
|
||||
"""
|
||||
try:
|
||||
result = await session.execute(select(Podcast).filter(Podcast.id == podcast_id))
|
||||
podcast = result.scalars().first()
|
||||
|
||||
if not podcast:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail="Podcast not found",
|
||||
)
|
||||
|
||||
# Check permission for the search space
|
||||
await check_permission(
|
||||
session,
|
||||
user,
|
||||
podcast.search_space_id,
|
||||
Permission.PODCASTS_READ.value,
|
||||
"You don't have permission to read podcasts in this search space",
|
||||
)
|
||||
|
||||
return podcast
|
||||
except HTTPException as he:
|
||||
raise he
|
||||
except SQLAlchemyError:
|
||||
raise HTTPException(
|
||||
status_code=500, detail="Database error occurred while fetching podcast"
|
||||
) from None
|
||||
|
||||
|
||||
@router.delete("/podcasts/{podcast_id}", response_model=dict)
|
||||
async def delete_podcast(
|
||||
podcast_id: int,
|
||||
session: AsyncSession = Depends(get_async_session),
|
||||
user: User = Depends(current_active_user),
|
||||
):
|
||||
"""
|
||||
Delete a podcast.
|
||||
Requires PODCASTS_DELETE permission for the search space.
|
||||
"""
|
||||
try:
|
||||
result = await session.execute(select(Podcast).filter(Podcast.id == podcast_id))
|
||||
db_podcast = result.scalars().first()
|
||||
|
||||
if not db_podcast:
|
||||
raise HTTPException(status_code=404, detail="Podcast not found")
|
||||
|
||||
# Check permission for the search space
|
||||
await check_permission(
|
||||
session,
|
||||
user,
|
||||
db_podcast.search_space_id,
|
||||
Permission.PODCASTS_DELETE.value,
|
||||
"You don't have permission to delete podcasts in this search space",
|
||||
)
|
||||
|
||||
await session.delete(db_podcast)
|
||||
await session.commit()
|
||||
return {"message": "Podcast deleted successfully"}
|
||||
except HTTPException as he:
|
||||
raise he
|
||||
except SQLAlchemyError:
|
||||
await session.rollback()
|
||||
raise HTTPException(
|
||||
status_code=500, detail="Database error occurred while deleting podcast"
|
||||
) from None
|
||||
|
||||
|
||||
@router.get("/podcasts/{podcast_id}/stream")
|
||||
@router.get("/podcasts/{podcast_id}/audio")
|
||||
async def stream_podcast(
|
||||
podcast_id: int,
|
||||
session: AsyncSession = Depends(get_async_session),
|
||||
user: User = Depends(current_active_user),
|
||||
):
|
||||
"""
|
||||
Stream a podcast audio file.
|
||||
Requires PODCASTS_READ permission for the search space.
|
||||
|
||||
Note: Both /stream and /audio endpoints are supported for compatibility.
|
||||
"""
|
||||
try:
|
||||
result = await session.execute(select(Podcast).filter(Podcast.id == podcast_id))
|
||||
podcast = result.scalars().first()
|
||||
|
||||
if not podcast:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail="Podcast not found",
|
||||
)
|
||||
|
||||
# Check permission for the search space
|
||||
await check_permission(
|
||||
session,
|
||||
user,
|
||||
podcast.search_space_id,
|
||||
Permission.PODCASTS_READ.value,
|
||||
"You don't have permission to access podcasts in this search space",
|
||||
)
|
||||
|
||||
# Get the file path
|
||||
file_path = podcast.file_location
|
||||
|
||||
# Check if the file exists
|
||||
if not file_path or not os.path.isfile(file_path):
|
||||
raise HTTPException(status_code=404, detail="Podcast audio file not found")
|
||||
|
||||
# Define a generator function to stream the file
|
||||
def iterfile():
|
||||
with open(file_path, mode="rb") as file_like:
|
||||
yield from file_like
|
||||
|
||||
# Return a streaming response with appropriate headers
|
||||
return StreamingResponse(
|
||||
iterfile(),
|
||||
media_type="audio/mpeg",
|
||||
headers={
|
||||
"Accept-Ranges": "bytes",
|
||||
"Content-Disposition": f"inline; filename={Path(file_path).name}",
|
||||
},
|
||||
)
|
||||
|
||||
except HTTPException as he:
|
||||
raise he
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=500, detail=f"Error streaming podcast: {e!s}"
|
||||
) from e
|
||||
|
||||
|
||||
@router.get("/podcasts/task/{task_id}/status")
|
||||
async def get_podcast_task_status(
|
||||
task_id: str,
|
||||
user: User = Depends(current_active_user),
|
||||
):
|
||||
"""
|
||||
Get the status of a podcast generation task.
|
||||
Used by new-chat frontend to poll for completion.
|
||||
|
||||
Returns:
|
||||
- status: "processing" | "success" | "error"
|
||||
- podcast_id: (only if status == "success")
|
||||
- title: (only if status == "success")
|
||||
- error: (only if status == "error")
|
||||
"""
|
||||
try:
|
||||
result = AsyncResult(task_id, app=celery_app)
|
||||
|
||||
if result.ready():
|
||||
# Task completed
|
||||
if result.successful():
|
||||
task_result = result.result
|
||||
if isinstance(task_result, dict):
|
||||
if task_result.get("status") == "success":
|
||||
return {
|
||||
"status": "success",
|
||||
"podcast_id": task_result.get("podcast_id"),
|
||||
"title": task_result.get("title"),
|
||||
"transcript_entries": task_result.get("transcript_entries"),
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "error",
|
||||
"error": task_result.get("error", "Unknown error"),
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "error",
|
||||
"error": "Unexpected task result format",
|
||||
}
|
||||
else:
|
||||
# Task failed
|
||||
return {
|
||||
"status": "error",
|
||||
"error": str(result.result) if result.result else "Task failed",
|
||||
}
|
||||
else:
|
||||
# Task still processing
|
||||
return {
|
||||
"status": "processing",
|
||||
"state": result.state,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=500, detail=f"Error checking task status: {e!s}"
|
||||
) from e
|
||||
Loading…
Add table
Add a link
Reference in a new issue