mirror of
https://github.com/MODSetter/SurfSense.git
synced 2026-05-15 18:25:18 +02:00
fix: Resolve merge conflict in documents_routes.py
- Integrated Docling ETL service with new task logging system - Maintained consistent logging pattern across all ETL services - Added progress and success/failure logging for Docling processing
This commit is contained in:
commit
f117d94ef7
34 changed files with 4160 additions and 520 deletions
|
|
@ -5,6 +5,7 @@ from .podcasts_routes import router as podcasts_router
|
|||
from .chats_routes import router as chats_router
|
||||
from .search_source_connectors_routes import router as search_source_connectors_router
|
||||
from .llm_config_routes import router as llm_config_router
|
||||
from .logs_routes import router as logs_router
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
|
@ -14,3 +15,4 @@ router.include_router(podcasts_router)
|
|||
router.include_router(chats_router)
|
||||
router.include_router(search_source_connectors_router)
|
||||
router.include_router(llm_config_router)
|
||||
router.include_router(logs_router)
|
||||
|
|
|
|||
|
|
@ -54,32 +54,23 @@ async def handle_chat_data(
|
|||
if message['role'] == "user":
|
||||
langchain_chat_history.append(HumanMessage(content=message['content']))
|
||||
elif message['role'] == "assistant":
|
||||
# Find the last "ANSWER" annotation specifically
|
||||
answer_annotation = None
|
||||
for annotation in reversed(message['annotations']):
|
||||
if annotation['type'] == "ANSWER":
|
||||
answer_annotation = annotation
|
||||
break
|
||||
|
||||
if answer_annotation:
|
||||
answer_text = answer_annotation['content']
|
||||
# If content is a list, join it into a single string
|
||||
if isinstance(answer_text, list):
|
||||
answer_text = "\n".join(answer_text)
|
||||
langchain_chat_history.append(AIMessage(content=answer_text))
|
||||
langchain_chat_history.append(AIMessage(content=message['content']))
|
||||
|
||||
response = StreamingResponse(stream_connector_search_results(
|
||||
user_query,
|
||||
user.id,
|
||||
search_space_id, # Already converted to int in lines 32-37
|
||||
session,
|
||||
research_mode,
|
||||
selected_connectors,
|
||||
langchain_chat_history,
|
||||
search_mode_str,
|
||||
document_ids_to_add_in_context
|
||||
))
|
||||
response.headers['x-vercel-ai-data-stream'] = 'v1'
|
||||
response = StreamingResponse(
|
||||
stream_connector_search_results(
|
||||
user_query,
|
||||
user.id,
|
||||
search_space_id,
|
||||
session,
|
||||
research_mode,
|
||||
selected_connectors,
|
||||
langchain_chat_history,
|
||||
search_mode_str,
|
||||
document_ids_to_add_in_context,
|
||||
)
|
||||
)
|
||||
|
||||
response.headers["x-vercel-ai-data-stream"] = "v1"
|
||||
return response
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -135,11 +135,19 @@ async def process_file_in_background(
|
|||
filename: str,
|
||||
search_space_id: int,
|
||||
user_id: str,
|
||||
session: AsyncSession
|
||||
session: AsyncSession,
|
||||
task_logger: 'TaskLoggingService',
|
||||
log_entry: 'Log'
|
||||
):
|
||||
try:
|
||||
# Check if the file is a markdown or text file
|
||||
if filename.lower().endswith(('.md', '.markdown', '.txt')):
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Processing markdown/text file: {filename}",
|
||||
{"file_type": "markdown", "processing_stage": "reading_file"}
|
||||
)
|
||||
|
||||
# For markdown files, read the content directly
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
markdown_content = f.read()
|
||||
|
|
@ -151,16 +159,42 @@ async def process_file_in_background(
|
|||
except:
|
||||
pass
|
||||
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Creating document from markdown content: {filename}",
|
||||
{"processing_stage": "creating_document", "content_length": len(markdown_content)}
|
||||
)
|
||||
|
||||
# Process markdown directly through specialized function
|
||||
await add_received_markdown_file_document(
|
||||
result = await add_received_markdown_file_document(
|
||||
session,
|
||||
filename,
|
||||
markdown_content,
|
||||
search_space_id,
|
||||
user_id
|
||||
)
|
||||
|
||||
if result:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Successfully processed markdown file: {filename}",
|
||||
{"document_id": result.id, "content_hash": result.content_hash, "file_type": "markdown"}
|
||||
)
|
||||
else:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Markdown file already exists (duplicate): {filename}",
|
||||
{"duplicate_detected": True, "file_type": "markdown"}
|
||||
)
|
||||
|
||||
# Check if the file is an audio file
|
||||
elif filename.lower().endswith(('.mp3', '.mp4', '.mpeg', '.mpga', '.m4a', '.wav', '.webm')):
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Processing audio file for transcription: {filename}",
|
||||
{"file_type": "audio", "processing_stage": "starting_transcription"}
|
||||
)
|
||||
|
||||
# Open the audio file for transcription
|
||||
with open(file_path, "rb") as audio_file:
|
||||
# Use LiteLLM for audio transcription
|
||||
|
|
@ -184,6 +218,12 @@ async def process_file_in_background(
|
|||
# Add metadata about the transcription
|
||||
transcribed_text = f"# Transcription of {filename}\n\n{transcribed_text}"
|
||||
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Transcription completed, creating document: {filename}",
|
||||
{"processing_stage": "transcription_complete", "transcript_length": len(transcribed_text)}
|
||||
)
|
||||
|
||||
# Clean up the temp file
|
||||
try:
|
||||
os.unlink(file_path)
|
||||
|
|
@ -191,15 +231,35 @@ async def process_file_in_background(
|
|||
pass
|
||||
|
||||
# Process transcription as markdown document
|
||||
await add_received_markdown_file_document(
|
||||
result = await add_received_markdown_file_document(
|
||||
session,
|
||||
filename,
|
||||
transcribed_text,
|
||||
search_space_id,
|
||||
user_id
|
||||
)
|
||||
|
||||
if result:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Successfully transcribed and processed audio file: {filename}",
|
||||
{"document_id": result.id, "content_hash": result.content_hash, "file_type": "audio", "transcript_length": len(transcribed_text)}
|
||||
)
|
||||
else:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Audio file transcript already exists (duplicate): {filename}",
|
||||
{"duplicate_detected": True, "file_type": "audio"}
|
||||
)
|
||||
|
||||
else:
|
||||
if app_config.ETL_SERVICE == "UNSTRUCTURED":
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Processing file with Unstructured ETL: {filename}",
|
||||
{"file_type": "document", "etl_service": "UNSTRUCTURED", "processing_stage": "loading"}
|
||||
)
|
||||
|
||||
from langchain_unstructured import UnstructuredLoader
|
||||
|
||||
# Process the file
|
||||
|
|
@ -215,6 +275,12 @@ async def process_file_in_background(
|
|||
|
||||
docs = await loader.aload()
|
||||
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Unstructured ETL completed, creating document: {filename}",
|
||||
{"processing_stage": "etl_complete", "elements_count": len(docs)}
|
||||
)
|
||||
|
||||
# Clean up the temp file
|
||||
import os
|
||||
try:
|
||||
|
|
@ -223,14 +289,34 @@ async def process_file_in_background(
|
|||
pass
|
||||
|
||||
# Pass the documents to the existing background task
|
||||
await add_received_file_document_using_unstructured(
|
||||
result = await add_received_file_document_using_unstructured(
|
||||
session,
|
||||
filename,
|
||||
docs,
|
||||
search_space_id,
|
||||
user_id
|
||||
)
|
||||
|
||||
if result:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Successfully processed file with Unstructured: {filename}",
|
||||
{"document_id": result.id, "content_hash": result.content_hash, "file_type": "document", "etl_service": "UNSTRUCTURED"}
|
||||
)
|
||||
else:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Document already exists (duplicate): {filename}",
|
||||
{"duplicate_detected": True, "file_type": "document", "etl_service": "UNSTRUCTURED"}
|
||||
)
|
||||
|
||||
elif app_config.ETL_SERVICE == "LLAMACLOUD":
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Processing file with LlamaCloud ETL: {filename}",
|
||||
{"file_type": "document", "etl_service": "LLAMACLOUD", "processing_stage": "parsing"}
|
||||
)
|
||||
|
||||
from llama_cloud_services import LlamaParse
|
||||
from llama_cloud_services.parse.utils import ResultType
|
||||
|
||||
|
|
@ -257,19 +343,45 @@ async def process_file_in_background(
|
|||
# Get markdown documents from the result
|
||||
markdown_documents = await result.aget_markdown_documents(split_by_page=False)
|
||||
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"LlamaCloud parsing completed, creating documents: {filename}",
|
||||
{"processing_stage": "parsing_complete", "documents_count": len(markdown_documents)}
|
||||
)
|
||||
|
||||
for doc in markdown_documents:
|
||||
# Extract text content from the markdown documents
|
||||
markdown_content = doc.text
|
||||
|
||||
# Process the documents using our LlamaCloud background task
|
||||
await add_received_file_document_using_llamacloud(
|
||||
doc_result = await add_received_file_document_using_llamacloud(
|
||||
session,
|
||||
filename,
|
||||
llamacloud_markdown_document=markdown_content,
|
||||
search_space_id=search_space_id,
|
||||
user_id=user_id
|
||||
)
|
||||
|
||||
if doc_result:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Successfully processed file with LlamaCloud: {filename}",
|
||||
{"document_id": doc_result.id, "content_hash": doc_result.content_hash, "file_type": "document", "etl_service": "LLAMACLOUD"}
|
||||
)
|
||||
else:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Document already exists (duplicate): {filename}",
|
||||
{"duplicate_detected": True, "file_type": "document", "etl_service": "LLAMACLOUD"}
|
||||
)
|
||||
|
||||
elif app_config.ETL_SERVICE == "DOCLING":
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Processing file with Docling ETL: {filename}",
|
||||
{"file_type": "document", "etl_service": "DOCLING", "processing_stage": "parsing"}
|
||||
)
|
||||
|
||||
# Use Docling service for document processing
|
||||
from app.services.document_processing.docling_service import create_docling_service
|
||||
|
||||
|
|
@ -286,17 +398,43 @@ async def process_file_in_background(
|
|||
except:
|
||||
pass
|
||||
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Docling parsing completed, creating document: {filename}",
|
||||
{"processing_stage": "parsing_complete", "content_length": len(result['content'])}
|
||||
)
|
||||
|
||||
# Process the document using our Docling background task
|
||||
await add_received_file_document_using_docling(
|
||||
doc_result = await add_received_file_document_using_docling(
|
||||
session,
|
||||
filename,
|
||||
docling_markdown_document=result['content'],
|
||||
search_space_id=search_space_id,
|
||||
user_id=user_id
|
||||
)
|
||||
|
||||
if doc_result:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Successfully processed file with Docling: {filename}",
|
||||
{"document_id": doc_result.id, "content_hash": doc_result.content_hash, "file_type": "document", "etl_service": "DOCLING"}
|
||||
)
|
||||
else:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Document already exists (duplicate): {filename}",
|
||||
{"duplicate_detected": True, "file_type": "document", "etl_service": "DOCLING"}
|
||||
)
|
||||
except Exception as e:
|
||||
await task_logger.log_task_failure(
|
||||
log_entry,
|
||||
f"Failed to process file: {filename}",
|
||||
str(e),
|
||||
{"error_type": type(e).__name__, "filename": filename}
|
||||
)
|
||||
import logging
|
||||
logging.error(f"Error processing file in background: {str(e)}")
|
||||
raise # Re-raise so the wrapper can also handle it
|
||||
|
||||
|
||||
@router.get("/documents/", response_model=List[DocumentRead])
|
||||
|
|
@ -467,11 +605,47 @@ async def process_extension_document_with_new_session(
|
|||
):
|
||||
"""Create a new session and process extension document."""
|
||||
from app.db import async_session_maker
|
||||
from app.services.task_logging_service import TaskLoggingService
|
||||
|
||||
async with async_session_maker() as session:
|
||||
# Initialize task logging service
|
||||
task_logger = TaskLoggingService(session, search_space_id)
|
||||
|
||||
# Log task start
|
||||
log_entry = await task_logger.log_task_start(
|
||||
task_name="process_extension_document",
|
||||
source="document_processor",
|
||||
message=f"Starting processing of extension document from {individual_document.metadata.VisitedWebPageTitle}",
|
||||
metadata={
|
||||
"document_type": "EXTENSION",
|
||||
"url": individual_document.metadata.VisitedWebPageURL,
|
||||
"title": individual_document.metadata.VisitedWebPageTitle,
|
||||
"user_id": user_id
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
await add_extension_received_document(session, individual_document, search_space_id, user_id)
|
||||
result = await add_extension_received_document(session, individual_document, search_space_id, user_id)
|
||||
|
||||
if result:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Successfully processed extension document: {individual_document.metadata.VisitedWebPageTitle}",
|
||||
{"document_id": result.id, "content_hash": result.content_hash}
|
||||
)
|
||||
else:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Extension document already exists (duplicate): {individual_document.metadata.VisitedWebPageTitle}",
|
||||
{"duplicate_detected": True}
|
||||
)
|
||||
except Exception as e:
|
||||
await task_logger.log_task_failure(
|
||||
log_entry,
|
||||
f"Failed to process extension document: {individual_document.metadata.VisitedWebPageTitle}",
|
||||
str(e),
|
||||
{"error_type": type(e).__name__}
|
||||
)
|
||||
import logging
|
||||
logging.error(f"Error processing extension document: {str(e)}")
|
||||
|
||||
|
|
@ -483,11 +657,46 @@ async def process_crawled_url_with_new_session(
|
|||
):
|
||||
"""Create a new session and process crawled URL."""
|
||||
from app.db import async_session_maker
|
||||
from app.services.task_logging_service import TaskLoggingService
|
||||
|
||||
async with async_session_maker() as session:
|
||||
# Initialize task logging service
|
||||
task_logger = TaskLoggingService(session, search_space_id)
|
||||
|
||||
# Log task start
|
||||
log_entry = await task_logger.log_task_start(
|
||||
task_name="process_crawled_url",
|
||||
source="document_processor",
|
||||
message=f"Starting URL crawling and processing for: {url}",
|
||||
metadata={
|
||||
"document_type": "CRAWLED_URL",
|
||||
"url": url,
|
||||
"user_id": user_id
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
await add_crawled_url_document(session, url, search_space_id, user_id)
|
||||
result = await add_crawled_url_document(session, url, search_space_id, user_id)
|
||||
|
||||
if result:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Successfully crawled and processed URL: {url}",
|
||||
{"document_id": result.id, "title": result.title, "content_hash": result.content_hash}
|
||||
)
|
||||
else:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"URL document already exists (duplicate): {url}",
|
||||
{"duplicate_detected": True}
|
||||
)
|
||||
except Exception as e:
|
||||
await task_logger.log_task_failure(
|
||||
log_entry,
|
||||
f"Failed to crawl URL: {url}",
|
||||
str(e),
|
||||
{"error_type": type(e).__name__}
|
||||
)
|
||||
import logging
|
||||
logging.error(f"Error processing crawled URL: {str(e)}")
|
||||
|
||||
|
|
@ -500,9 +709,38 @@ async def process_file_in_background_with_new_session(
|
|||
):
|
||||
"""Create a new session and process file."""
|
||||
from app.db import async_session_maker
|
||||
from app.services.task_logging_service import TaskLoggingService
|
||||
|
||||
async with async_session_maker() as session:
|
||||
await process_file_in_background(file_path, filename, search_space_id, user_id, session)
|
||||
# Initialize task logging service
|
||||
task_logger = TaskLoggingService(session, search_space_id)
|
||||
|
||||
# Log task start
|
||||
log_entry = await task_logger.log_task_start(
|
||||
task_name="process_file_upload",
|
||||
source="document_processor",
|
||||
message=f"Starting file processing for: {filename}",
|
||||
metadata={
|
||||
"document_type": "FILE",
|
||||
"filename": filename,
|
||||
"file_path": file_path,
|
||||
"user_id": user_id
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
await process_file_in_background(file_path, filename, search_space_id, user_id, session, task_logger, log_entry)
|
||||
|
||||
# Note: success/failure logging is handled within process_file_in_background
|
||||
except Exception as e:
|
||||
await task_logger.log_task_failure(
|
||||
log_entry,
|
||||
f"Failed to process file: {filename}",
|
||||
str(e),
|
||||
{"error_type": type(e).__name__}
|
||||
)
|
||||
import logging
|
||||
logging.error(f"Error processing file: {str(e)}")
|
||||
|
||||
|
||||
async def process_youtube_video_with_new_session(
|
||||
|
|
@ -512,11 +750,46 @@ async def process_youtube_video_with_new_session(
|
|||
):
|
||||
"""Create a new session and process YouTube video."""
|
||||
from app.db import async_session_maker
|
||||
from app.services.task_logging_service import TaskLoggingService
|
||||
|
||||
async with async_session_maker() as session:
|
||||
# Initialize task logging service
|
||||
task_logger = TaskLoggingService(session, search_space_id)
|
||||
|
||||
# Log task start
|
||||
log_entry = await task_logger.log_task_start(
|
||||
task_name="process_youtube_video",
|
||||
source="document_processor",
|
||||
message=f"Starting YouTube video processing for: {url}",
|
||||
metadata={
|
||||
"document_type": "YOUTUBE_VIDEO",
|
||||
"url": url,
|
||||
"user_id": user_id
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
await add_youtube_video_document(session, url, search_space_id, user_id)
|
||||
result = await add_youtube_video_document(session, url, search_space_id, user_id)
|
||||
|
||||
if result:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Successfully processed YouTube video: {result.title}",
|
||||
{"document_id": result.id, "video_id": result.document_metadata.get("video_id"), "content_hash": result.content_hash}
|
||||
)
|
||||
else:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"YouTube video document already exists (duplicate): {url}",
|
||||
{"duplicate_detected": True}
|
||||
)
|
||||
except Exception as e:
|
||||
await task_logger.log_task_failure(
|
||||
log_entry,
|
||||
f"Failed to process YouTube video: {url}",
|
||||
str(e),
|
||||
{"error_type": type(e).__name__}
|
||||
)
|
||||
import logging
|
||||
logging.error(f"Error processing YouTube video: {str(e)}")
|
||||
|
||||
|
|
|
|||
280
surfsense_backend/app/routes/logs_routes.py
Normal file
280
surfsense_backend/app/routes/logs_routes.py
Normal file
|
|
@ -0,0 +1,280 @@
|
|||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.future import select
|
||||
from sqlalchemy import and_, desc
|
||||
from typing import List, Optional
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from app.db import get_async_session, User, SearchSpace, Log, LogLevel, LogStatus
|
||||
from app.schemas import LogCreate, LogUpdate, LogRead, LogFilter
|
||||
from app.users import current_active_user
|
||||
from app.utils.check_ownership import check_ownership
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@router.post("/logs/", response_model=LogRead)
|
||||
async def create_log(
|
||||
log: LogCreate,
|
||||
session: AsyncSession = Depends(get_async_session),
|
||||
user: User = Depends(current_active_user)
|
||||
):
|
||||
"""Create a new log entry."""
|
||||
try:
|
||||
# Check if the user owns the search space
|
||||
await check_ownership(session, SearchSpace, log.search_space_id, user)
|
||||
|
||||
db_log = Log(**log.model_dump())
|
||||
session.add(db_log)
|
||||
await session.commit()
|
||||
await session.refresh(db_log)
|
||||
return db_log
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
await session.rollback()
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to create log: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/logs/", response_model=List[LogRead])
|
||||
async def read_logs(
|
||||
skip: int = 0,
|
||||
limit: int = 100,
|
||||
search_space_id: Optional[int] = None,
|
||||
level: Optional[LogLevel] = None,
|
||||
status: Optional[LogStatus] = None,
|
||||
source: Optional[str] = None,
|
||||
start_date: Optional[datetime] = None,
|
||||
end_date: Optional[datetime] = None,
|
||||
session: AsyncSession = Depends(get_async_session),
|
||||
user: User = Depends(current_active_user)
|
||||
):
|
||||
"""Get logs with optional filtering."""
|
||||
try:
|
||||
# Build base query - only logs from user's search spaces
|
||||
query = (
|
||||
select(Log)
|
||||
.join(SearchSpace)
|
||||
.filter(SearchSpace.user_id == user.id)
|
||||
.order_by(desc(Log.created_at)) # Most recent first
|
||||
)
|
||||
|
||||
# Apply filters
|
||||
filters = []
|
||||
|
||||
if search_space_id is not None:
|
||||
await check_ownership(session, SearchSpace, search_space_id, user)
|
||||
filters.append(Log.search_space_id == search_space_id)
|
||||
|
||||
if level is not None:
|
||||
filters.append(Log.level == level)
|
||||
|
||||
if status is not None:
|
||||
filters.append(Log.status == status)
|
||||
|
||||
if source is not None:
|
||||
filters.append(Log.source.ilike(f"%{source}%"))
|
||||
|
||||
if start_date is not None:
|
||||
filters.append(Log.created_at >= start_date)
|
||||
|
||||
if end_date is not None:
|
||||
filters.append(Log.created_at <= end_date)
|
||||
|
||||
if filters:
|
||||
query = query.filter(and_(*filters))
|
||||
|
||||
# Apply pagination
|
||||
result = await session.execute(query.offset(skip).limit(limit))
|
||||
return result.scalars().all()
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to fetch logs: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/logs/{log_id}", response_model=LogRead)
|
||||
async def read_log(
|
||||
log_id: int,
|
||||
session: AsyncSession = Depends(get_async_session),
|
||||
user: User = Depends(current_active_user)
|
||||
):
|
||||
"""Get a specific log by ID."""
|
||||
try:
|
||||
# Get log and verify user owns the search space
|
||||
result = await session.execute(
|
||||
select(Log)
|
||||
.join(SearchSpace)
|
||||
.filter(Log.id == log_id, SearchSpace.user_id == user.id)
|
||||
)
|
||||
log = result.scalars().first()
|
||||
|
||||
if not log:
|
||||
raise HTTPException(status_code=404, detail="Log not found")
|
||||
|
||||
return log
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to fetch log: {str(e)}"
|
||||
)
|
||||
|
||||
@router.put("/logs/{log_id}", response_model=LogRead)
|
||||
async def update_log(
|
||||
log_id: int,
|
||||
log_update: LogUpdate,
|
||||
session: AsyncSession = Depends(get_async_session),
|
||||
user: User = Depends(current_active_user)
|
||||
):
|
||||
"""Update a log entry."""
|
||||
try:
|
||||
# Get log and verify user owns the search space
|
||||
result = await session.execute(
|
||||
select(Log)
|
||||
.join(SearchSpace)
|
||||
.filter(Log.id == log_id, SearchSpace.user_id == user.id)
|
||||
)
|
||||
db_log = result.scalars().first()
|
||||
|
||||
if not db_log:
|
||||
raise HTTPException(status_code=404, detail="Log not found")
|
||||
|
||||
# Update only provided fields
|
||||
update_data = log_update.model_dump(exclude_unset=True)
|
||||
for field, value in update_data.items():
|
||||
setattr(db_log, field, value)
|
||||
|
||||
await session.commit()
|
||||
await session.refresh(db_log)
|
||||
return db_log
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
await session.rollback()
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to update log: {str(e)}"
|
||||
)
|
||||
|
||||
@router.delete("/logs/{log_id}")
|
||||
async def delete_log(
|
||||
log_id: int,
|
||||
session: AsyncSession = Depends(get_async_session),
|
||||
user: User = Depends(current_active_user)
|
||||
):
|
||||
"""Delete a log entry."""
|
||||
try:
|
||||
# Get log and verify user owns the search space
|
||||
result = await session.execute(
|
||||
select(Log)
|
||||
.join(SearchSpace)
|
||||
.filter(Log.id == log_id, SearchSpace.user_id == user.id)
|
||||
)
|
||||
db_log = result.scalars().first()
|
||||
|
||||
if not db_log:
|
||||
raise HTTPException(status_code=404, detail="Log not found")
|
||||
|
||||
await session.delete(db_log)
|
||||
await session.commit()
|
||||
return {"message": "Log deleted successfully"}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
await session.rollback()
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to delete log: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/logs/search-space/{search_space_id}/summary")
|
||||
async def get_logs_summary(
|
||||
search_space_id: int,
|
||||
hours: int = 24,
|
||||
session: AsyncSession = Depends(get_async_session),
|
||||
user: User = Depends(current_active_user)
|
||||
):
|
||||
"""Get a summary of logs for a search space in the last X hours."""
|
||||
try:
|
||||
# Check ownership
|
||||
await check_ownership(session, SearchSpace, search_space_id, user)
|
||||
|
||||
# Calculate time window
|
||||
since = datetime.utcnow().replace(microsecond=0) - timedelta(hours=hours)
|
||||
|
||||
# Get logs from the time window
|
||||
result = await session.execute(
|
||||
select(Log)
|
||||
.filter(
|
||||
and_(
|
||||
Log.search_space_id == search_space_id,
|
||||
Log.created_at >= since
|
||||
)
|
||||
)
|
||||
.order_by(desc(Log.created_at))
|
||||
)
|
||||
logs = result.scalars().all()
|
||||
|
||||
# Create summary
|
||||
summary = {
|
||||
"total_logs": len(logs),
|
||||
"time_window_hours": hours,
|
||||
"by_status": {},
|
||||
"by_level": {},
|
||||
"by_source": {},
|
||||
"active_tasks": [],
|
||||
"recent_failures": []
|
||||
}
|
||||
|
||||
# Count by status and level
|
||||
for log in logs:
|
||||
# Status counts
|
||||
status_str = log.status.value
|
||||
summary["by_status"][status_str] = summary["by_status"].get(status_str, 0) + 1
|
||||
|
||||
# Level counts
|
||||
level_str = log.level.value
|
||||
summary["by_level"][level_str] = summary["by_level"].get(level_str, 0) + 1
|
||||
|
||||
# Source counts
|
||||
if log.source:
|
||||
summary["by_source"][log.source] = summary["by_source"].get(log.source, 0) + 1
|
||||
|
||||
# Active tasks (IN_PROGRESS)
|
||||
if log.status == LogStatus.IN_PROGRESS:
|
||||
task_name = log.log_metadata.get("task_name", "Unknown") if log.log_metadata else "Unknown"
|
||||
summary["active_tasks"].append({
|
||||
"id": log.id,
|
||||
"task_name": task_name,
|
||||
"message": log.message,
|
||||
"started_at": log.created_at,
|
||||
"source": log.source
|
||||
})
|
||||
|
||||
# Recent failures
|
||||
if log.status == LogStatus.FAILED and len(summary["recent_failures"]) < 10:
|
||||
task_name = log.log_metadata.get("task_name", "Unknown") if log.log_metadata else "Unknown"
|
||||
summary["recent_failures"].append({
|
||||
"id": log.id,
|
||||
"task_name": task_name,
|
||||
"message": log.message,
|
||||
"failed_at": log.created_at,
|
||||
"source": log.source,
|
||||
"error_details": log.log_metadata.get("error_details") if log.log_metadata else None
|
||||
})
|
||||
|
||||
return summary
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to generate logs summary: {str(e)}"
|
||||
)
|
||||
Loading…
Add table
Add a link
Reference in a new issue