mirror of
https://github.com/MODSetter/SurfSense.git
synced 2026-04-25 08:46:22 +02:00
chore: ran linting
This commit is contained in:
parent
00a617ef17
commit
aa66928154
44 changed files with 2025 additions and 1658 deletions
|
|
@ -17,29 +17,30 @@ md = MarkdownifyTransformer()
|
|||
def safe_set_chunks(document: Document, chunks: list) -> None:
|
||||
"""
|
||||
Safely assign chunks to a document without triggering lazy loading.
|
||||
|
||||
|
||||
ALWAYS use this instead of `document.chunks = chunks` to avoid
|
||||
SQLAlchemy async errors (MissingGreenlet / greenlet_spawn).
|
||||
|
||||
|
||||
Why this is needed:
|
||||
- Direct assignment `document.chunks = chunks` triggers SQLAlchemy to
|
||||
load the OLD chunks first (for comparison/orphan detection)
|
||||
- This lazy loading fails in async context with asyncpg driver
|
||||
- set_committed_value bypasses this by setting the value directly
|
||||
|
||||
|
||||
This function is safe regardless of how the document was loaded
|
||||
(with or without selectinload).
|
||||
|
||||
|
||||
Args:
|
||||
document: The Document object to update
|
||||
chunks: List of Chunk objects to assign
|
||||
|
||||
|
||||
Example:
|
||||
# Instead of: document.chunks = chunks (DANGEROUS!)
|
||||
safe_set_chunks(document, chunks) # Always safe
|
||||
"""
|
||||
from sqlalchemy.orm.attributes import set_committed_value
|
||||
set_committed_value(document, 'chunks', chunks)
|
||||
|
||||
set_committed_value(document, "chunks", chunks)
|
||||
|
||||
|
||||
def get_current_timestamp() -> datetime:
|
||||
|
|
|
|||
|
|
@ -91,7 +91,9 @@ async def add_circleback_meeting_document(
|
|||
# Document exists - check if content has changed
|
||||
if existing_document.content_hash == content_hash:
|
||||
# Ensure status is ready (might have been stuck in processing/pending)
|
||||
if not DocumentStatus.is_state(existing_document.status, DocumentStatus.READY):
|
||||
if not DocumentStatus.is_state(
|
||||
existing_document.status, DocumentStatus.READY
|
||||
):
|
||||
existing_document.status = DocumentStatus.ready()
|
||||
await session.commit()
|
||||
logger.info(f"Circleback meeting {meeting_id} unchanged. Skipping.")
|
||||
|
|
@ -110,7 +112,7 @@ async def add_circleback_meeting_document(
|
|||
# PHASE 1: Create document with PENDING status
|
||||
# This makes the document visible in the UI immediately
|
||||
# =======================================================================
|
||||
|
||||
|
||||
# Fetch the user who set up the Circleback connector (preferred)
|
||||
# or fall back to search space owner if no connector found
|
||||
created_by_user_id = None
|
||||
|
|
@ -173,7 +175,7 @@ async def add_circleback_meeting_document(
|
|||
# =======================================================================
|
||||
# PHASE 3: Process the document content
|
||||
# =======================================================================
|
||||
|
||||
|
||||
# Get LLM for generating summary
|
||||
llm = await get_document_summary_llm(session, search_space_id)
|
||||
if not llm:
|
||||
|
|
@ -243,7 +245,7 @@ async def add_circleback_meeting_document(
|
|||
|
||||
await session.commit()
|
||||
await session.refresh(document)
|
||||
|
||||
|
||||
if existing_document:
|
||||
logger.info(
|
||||
f"Updated Circleback meeting document {meeting_id} in search space {search_space_id}"
|
||||
|
|
@ -267,7 +269,9 @@ async def add_circleback_meeting_document(
|
|||
document.updated_at = get_current_timestamp()
|
||||
await session.commit()
|
||||
except Exception as status_error:
|
||||
logger.error(f"Failed to update document status to failed: {status_error}")
|
||||
logger.error(
|
||||
f"Failed to update document status to failed: {status_error}"
|
||||
)
|
||||
raise db_error
|
||||
except Exception as e:
|
||||
await session.rollback()
|
||||
|
|
@ -279,5 +283,7 @@ async def add_circleback_meeting_document(
|
|||
document.updated_at = get_current_timestamp()
|
||||
await session.commit()
|
||||
except Exception as status_error:
|
||||
logger.error(f"Failed to update document status to failed: {status_error}")
|
||||
logger.error(
|
||||
f"Failed to update document status to failed: {status_error}"
|
||||
)
|
||||
raise RuntimeError(f"Failed to process Circleback meeting: {e!s}") from e
|
||||
|
|
|
|||
|
|
@ -1629,16 +1629,16 @@ async def process_file_in_background_with_document(
|
|||
) -> Document | None:
|
||||
"""
|
||||
Process file and update existing pending document (2-phase pattern).
|
||||
|
||||
|
||||
This function is Phase 2 of the real-time document status updates:
|
||||
- Phase 1 (API): Created document with pending status
|
||||
- Phase 2 (this): Process file and update document to ready/failed
|
||||
|
||||
|
||||
The document already exists with pending status. This function:
|
||||
1. Parses the file content (markdown, audio, or ETL services)
|
||||
2. Updates the document with content, embeddings, and chunks
|
||||
3. Sets status to 'ready' on success
|
||||
|
||||
|
||||
Args:
|
||||
document: Existing document with pending status
|
||||
file_path: Path to the uploaded file
|
||||
|
|
@ -1650,7 +1650,7 @@ async def process_file_in_background_with_document(
|
|||
log_entry: Log entry for this task
|
||||
connector: Optional connector info for Google Drive files
|
||||
notification: Optional notification for progress updates
|
||||
|
||||
|
||||
Returns:
|
||||
Updated Document object if successful, None if duplicate content detected
|
||||
"""
|
||||
|
|
@ -1665,13 +1665,18 @@ async def process_file_in_background_with_document(
|
|||
etl_service = None
|
||||
|
||||
# ===== STEP 1: Parse file content based on type =====
|
||||
|
||||
|
||||
# Check if the file is a markdown or text file
|
||||
if filename.lower().endswith((".md", ".markdown", ".txt")):
|
||||
# Update notification: parsing stage
|
||||
if notification:
|
||||
await NotificationService.document_processing.notify_processing_progress(
|
||||
session, notification, stage="parsing", stage_message="Reading file"
|
||||
await (
|
||||
NotificationService.document_processing.notify_processing_progress(
|
||||
session,
|
||||
notification,
|
||||
stage="parsing",
|
||||
stage_message="Reading file",
|
||||
)
|
||||
)
|
||||
|
||||
await task_logger.log_task_progress(
|
||||
|
|
@ -1695,8 +1700,13 @@ async def process_file_in_background_with_document(
|
|||
):
|
||||
# Update notification: parsing stage (transcription)
|
||||
if notification:
|
||||
await NotificationService.document_processing.notify_processing_progress(
|
||||
session, notification, stage="parsing", stage_message="Transcribing audio"
|
||||
await (
|
||||
NotificationService.document_processing.notify_processing_progress(
|
||||
session,
|
||||
notification,
|
||||
stage="parsing",
|
||||
stage_message="Transcribing audio",
|
||||
)
|
||||
)
|
||||
|
||||
await task_logger.log_task_progress(
|
||||
|
|
@ -1708,7 +1718,8 @@ async def process_file_in_background_with_document(
|
|||
# Transcribe audio
|
||||
stt_service_type = (
|
||||
"local"
|
||||
if app_config.STT_SERVICE and app_config.STT_SERVICE.startswith("local/")
|
||||
if app_config.STT_SERVICE
|
||||
and app_config.STT_SERVICE.startswith("local/")
|
||||
else "external"
|
||||
)
|
||||
|
||||
|
|
@ -1719,7 +1730,9 @@ async def process_file_in_background_with_document(
|
|||
transcribed_text = result.get("text", "")
|
||||
if not transcribed_text:
|
||||
raise ValueError("Transcription returned empty text")
|
||||
markdown_content = f"# Transcription of {filename}\n\n{transcribed_text}"
|
||||
markdown_content = (
|
||||
f"# Transcription of {filename}\n\n{transcribed_text}"
|
||||
)
|
||||
else:
|
||||
with open(file_path, "rb") as audio_file:
|
||||
transcription_kwargs = {
|
||||
|
|
@ -1728,12 +1741,18 @@ async def process_file_in_background_with_document(
|
|||
"api_key": app_config.STT_SERVICE_API_KEY,
|
||||
}
|
||||
if app_config.STT_SERVICE_API_BASE:
|
||||
transcription_kwargs["api_base"] = app_config.STT_SERVICE_API_BASE
|
||||
transcription_response = await atranscription(**transcription_kwargs)
|
||||
transcription_kwargs["api_base"] = (
|
||||
app_config.STT_SERVICE_API_BASE
|
||||
)
|
||||
transcription_response = await atranscription(
|
||||
**transcription_kwargs
|
||||
)
|
||||
transcribed_text = transcription_response.get("text", "")
|
||||
if not transcribed_text:
|
||||
raise ValueError("Transcription returned empty text")
|
||||
markdown_content = f"# Transcription of {filename}\n\n{transcribed_text}"
|
||||
markdown_content = (
|
||||
f"# Transcription of {filename}\n\n{transcribed_text}"
|
||||
)
|
||||
|
||||
etl_service = "AUDIO_TRANSCRIPTION"
|
||||
# Clean up temp file
|
||||
|
|
@ -1742,13 +1761,18 @@ async def process_file_in_background_with_document(
|
|||
|
||||
else:
|
||||
# Document files - use ETL service
|
||||
from app.services.page_limit_service import PageLimitExceededError, PageLimitService
|
||||
from app.services.page_limit_service import (
|
||||
PageLimitExceededError,
|
||||
PageLimitService,
|
||||
)
|
||||
|
||||
page_limit_service = PageLimitService(session)
|
||||
|
||||
# Estimate page count
|
||||
try:
|
||||
estimated_pages = page_limit_service.estimate_pages_before_processing(file_path)
|
||||
estimated_pages = page_limit_service.estimate_pages_before_processing(
|
||||
file_path
|
||||
)
|
||||
except Exception:
|
||||
file_size = os.path.getsize(file_path)
|
||||
estimated_pages = max(1, file_size // (80 * 1024))
|
||||
|
|
@ -1759,14 +1783,22 @@ async def process_file_in_background_with_document(
|
|||
if app_config.ETL_SERVICE == "UNSTRUCTURED":
|
||||
if notification:
|
||||
await NotificationService.document_processing.notify_processing_progress(
|
||||
session, notification, stage="parsing", stage_message="Extracting content"
|
||||
session,
|
||||
notification,
|
||||
stage="parsing",
|
||||
stage_message="Extracting content",
|
||||
)
|
||||
|
||||
from langchain_unstructured import UnstructuredLoader
|
||||
|
||||
loader = UnstructuredLoader(
|
||||
file_path, mode="elements", post_processors=[], languages=["eng"],
|
||||
include_orig_elements=False, include_metadata=False, strategy="auto"
|
||||
file_path,
|
||||
mode="elements",
|
||||
post_processors=[],
|
||||
languages=["eng"],
|
||||
include_orig_elements=False,
|
||||
include_metadata=False,
|
||||
strategy="auto",
|
||||
)
|
||||
docs = await loader.aload()
|
||||
markdown_content = await convert_document_to_markdown(docs)
|
||||
|
|
@ -1775,37 +1807,55 @@ async def process_file_in_background_with_document(
|
|||
etl_service = "UNSTRUCTURED"
|
||||
|
||||
# Update page usage
|
||||
await page_limit_service.update_page_usage(user_id, final_page_count, allow_exceed=True)
|
||||
await page_limit_service.update_page_usage(
|
||||
user_id, final_page_count, allow_exceed=True
|
||||
)
|
||||
|
||||
elif app_config.ETL_SERVICE == "LLAMACLOUD":
|
||||
if notification:
|
||||
await NotificationService.document_processing.notify_processing_progress(
|
||||
session, notification, stage="parsing", stage_message="Extracting content"
|
||||
session,
|
||||
notification,
|
||||
stage="parsing",
|
||||
stage_message="Extracting content",
|
||||
)
|
||||
|
||||
result = await parse_with_llamacloud_retry(
|
||||
file_path=file_path, estimated_pages=estimated_pages,
|
||||
task_logger=task_logger, log_entry=log_entry
|
||||
file_path=file_path,
|
||||
estimated_pages=estimated_pages,
|
||||
task_logger=task_logger,
|
||||
log_entry=log_entry,
|
||||
)
|
||||
markdown_documents = await result.aget_markdown_documents(
|
||||
split_by_page=False
|
||||
)
|
||||
markdown_documents = await result.aget_markdown_documents(split_by_page=False)
|
||||
if not markdown_documents:
|
||||
raise RuntimeError(f"LlamaCloud parsing returned no documents: {filename}")
|
||||
raise RuntimeError(
|
||||
f"LlamaCloud parsing returned no documents: {filename}"
|
||||
)
|
||||
markdown_content = markdown_documents[0].text
|
||||
etl_service = "LLAMACLOUD"
|
||||
|
||||
# Update page usage
|
||||
await page_limit_service.update_page_usage(user_id, estimated_pages, allow_exceed=True)
|
||||
await page_limit_service.update_page_usage(
|
||||
user_id, estimated_pages, allow_exceed=True
|
||||
)
|
||||
|
||||
elif app_config.ETL_SERVICE == "DOCLING":
|
||||
if notification:
|
||||
await NotificationService.document_processing.notify_processing_progress(
|
||||
session, notification, stage="parsing", stage_message="Extracting content"
|
||||
session,
|
||||
notification,
|
||||
stage="parsing",
|
||||
stage_message="Extracting content",
|
||||
)
|
||||
|
||||
# Suppress logging during Docling import
|
||||
getLogger("docling.pipeline.base_pipeline").setLevel(ERROR)
|
||||
getLogger("docling.document_converter").setLevel(ERROR)
|
||||
getLogger("docling_core.transforms.chunker.hierarchical_chunker").setLevel(ERROR)
|
||||
getLogger(
|
||||
"docling_core.transforms.chunker.hierarchical_chunker"
|
||||
).setLevel(ERROR)
|
||||
|
||||
from docling.document_converter import DocumentConverter
|
||||
|
||||
|
|
@ -1815,7 +1865,9 @@ async def process_file_in_background_with_document(
|
|||
etl_service = "DOCLING"
|
||||
|
||||
# Update page usage
|
||||
await page_limit_service.update_page_usage(user_id, estimated_pages, allow_exceed=True)
|
||||
await page_limit_service.update_page_usage(
|
||||
user_id, estimated_pages, allow_exceed=True
|
||||
)
|
||||
|
||||
else:
|
||||
raise RuntimeError(f"Unknown ETL_SERVICE: {app_config.ETL_SERVICE}")
|
||||
|
|
@ -1829,7 +1881,7 @@ async def process_file_in_background_with_document(
|
|||
|
||||
# ===== STEP 2: Check for duplicate content =====
|
||||
content_hash = generate_content_hash(markdown_content, search_space_id)
|
||||
|
||||
|
||||
existing_by_content = await check_duplicate_document(session, content_hash)
|
||||
if existing_by_content and existing_by_content.id != document.id:
|
||||
# Duplicate content found - mark this document as failed
|
||||
|
|
@ -1846,7 +1898,7 @@ async def process_file_in_background_with_document(
|
|||
)
|
||||
|
||||
user_llm = await get_user_long_context_llm(session, user_id, search_space_id)
|
||||
|
||||
|
||||
if user_llm:
|
||||
document_metadata = {
|
||||
"file_name": filename,
|
||||
|
|
@ -1881,10 +1933,10 @@ async def process_file_in_background_with_document(
|
|||
**(document.document_metadata or {}),
|
||||
}
|
||||
flag_modified(document, "document_metadata")
|
||||
|
||||
|
||||
# Use safe_set_chunks to avoid async issues
|
||||
safe_set_chunks(document, chunks)
|
||||
|
||||
|
||||
document.blocknote_document = blocknote_json
|
||||
document.content_needs_reindexing = False
|
||||
document.updated_at = get_current_timestamp()
|
||||
|
|
@ -1922,7 +1974,11 @@ async def process_file_in_background_with_document(
|
|||
log_entry,
|
||||
error_message,
|
||||
str(e),
|
||||
{"error_type": type(e).__name__, "filename": filename, "document_id": document.id},
|
||||
{
|
||||
"error_type": type(e).__name__,
|
||||
"filename": filename,
|
||||
"document_id": document.id,
|
||||
},
|
||||
)
|
||||
logging.error(f"Error processing file with document: {error_message}")
|
||||
raise
|
||||
|
|
|
|||
|
|
@ -136,11 +136,19 @@ async def add_youtube_video_document(
|
|||
document = existing_document
|
||||
is_new_document = False
|
||||
# Check if already being processed
|
||||
if DocumentStatus.is_state(existing_document.status, DocumentStatus.PENDING):
|
||||
logging.info(f"YouTube video {video_id} already pending. Returning existing.")
|
||||
if DocumentStatus.is_state(
|
||||
existing_document.status, DocumentStatus.PENDING
|
||||
):
|
||||
logging.info(
|
||||
f"YouTube video {video_id} already pending. Returning existing."
|
||||
)
|
||||
return existing_document
|
||||
if DocumentStatus.is_state(existing_document.status, DocumentStatus.PROCESSING):
|
||||
logging.info(f"YouTube video {video_id} already processing. Returning existing.")
|
||||
if DocumentStatus.is_state(
|
||||
existing_document.status, DocumentStatus.PROCESSING
|
||||
):
|
||||
logging.info(
|
||||
f"YouTube video {video_id} already processing. Returning existing."
|
||||
)
|
||||
return existing_document
|
||||
else:
|
||||
# Create new document with PENDING status (visible in UI immediately)
|
||||
|
|
@ -300,7 +308,9 @@ async def add_youtube_video_document(
|
|||
"video_id": video_id,
|
||||
},
|
||||
)
|
||||
logging.info(f"Document for YouTube video {video_id} unchanged. Marking as ready.")
|
||||
logging.info(
|
||||
f"Document for YouTube video {video_id} unchanged. Marking as ready."
|
||||
)
|
||||
document.status = DocumentStatus.ready()
|
||||
await session.commit()
|
||||
return document
|
||||
|
|
@ -408,7 +418,9 @@ async def add_youtube_video_document(
|
|||
# Mark document as failed if it exists
|
||||
if document:
|
||||
try:
|
||||
document.status = DocumentStatus.failed(f"Database error: {str(db_error)[:150]}")
|
||||
document.status = DocumentStatus.failed(
|
||||
f"Database error: {str(db_error)[:150]}"
|
||||
)
|
||||
document.updated_at = get_current_timestamp()
|
||||
await session.commit()
|
||||
except Exception:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue